Go to the documentation of this file.
62 #define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) * 128 + (level))
90 int block_last_index,
const uint8_t scantable[64])
96 for (j = 1; j <= block_last_index; j++) {
97 const int index = scantable[j];
101 if ((
level & (~127)) == 0) {
102 if (j < block_last_index)
107 rate +=
s->ac_esc_length;
125 const int dir[6],
const uint8_t *st[6],
126 const int zigzag_last_index[6])
129 memcpy(
s->c.block_last_index, zigzag_last_index,
sizeof(
int) * 6);
131 for (n = 0; n < 6; n++) {
132 int16_t *ac_val = &
s->c.ac_val[0][0][0] +
s->c.block_index[n] * 16;
134 st[n] =
s->c.intra_scantable.permutated;
137 for (
i = 1;
i < 8;
i++)
138 block[n][
s->c.idsp.idct_permutation[
i]] = ac_val[
i + 8];
141 for (
i = 1;
i < 8;
i++)
142 block[n][
s->c.idsp.idct_permutation[
i << 3]] = ac_val[
i];
156 const int dir[6],
const uint8_t *st[6],
157 int zigzag_last_index[6])
161 const int8_t *
const qscale_table =
s->c.cur_pic.qscale_table;
163 memcpy(zigzag_last_index,
s->c.block_last_index,
sizeof(
int) * 6);
165 for (n = 0; n < 6; n++) {
166 int16_t *ac_val, *ac_val1;
169 s->c.intra_scantable.permutated);
171 ac_val = &
s->c.ac_val[0][0][0] +
s->c.block_index[n] * 16;
174 const int xy =
s->c.mb_x +
s->c.mb_y *
s->c.mb_stride -
s->c.mb_stride;
176 ac_val -=
s->c.block_wrap[n] * 16;
177 if (
s->c.mb_y == 0 ||
s->c.qscale == qscale_table[xy] || n == 2 || n == 3) {
179 for (
i = 1;
i < 8;
i++) {
180 const int level =
block[n][
s->c.idsp.idct_permutation[
i]];
181 block[n][
s->c.idsp.idct_permutation[
i]] =
level - ac_val[
i + 8];
182 ac_val1[
i] =
block[n][
s->c.idsp.idct_permutation[
i << 3]];
187 for (
i = 1;
i < 8;
i++) {
188 const int level =
block[n][
s->c.idsp.idct_permutation[
i]];
190 ac_val1[
i] =
block[n][
s->c.idsp.idct_permutation[
i << 3]];
194 st[n] =
s->c.permutated_intra_h_scantable;
196 const int xy =
s->c.mb_x - 1 +
s->c.mb_y *
s->c.mb_stride;
199 if (
s->c.mb_x == 0 ||
s->c.qscale == qscale_table[xy] || n == 1 || n == 3) {
201 for (
i = 1;
i < 8;
i++) {
202 const int level =
block[n][
s->c.idsp.idct_permutation[
i << 3]];
203 block[n][
s->c.idsp.idct_permutation[
i << 3]] =
level - ac_val[
i];
205 ac_val1[
i + 8] =
block[n][
s->c.idsp.idct_permutation[
i]];
209 for (
i = 1;
i < 8;
i++) {
210 const int level =
block[n][
s->c.idsp.idct_permutation[
i << 3]];
213 ac_val1[
i + 8] =
block[n][
s->c.idsp.idct_permutation[
i]];
216 st[n] =
s->c.permutated_intra_v_scantable;
219 for (
i = 63;
i > 0;
i--)
222 s->c.block_last_index[n] =
i;
240 int8_t *
const qscale_table =
s->c.cur_pic.qscale_table;
249 for (
int i = 0;
i <
s->c.mb_num;
i++) {
250 int mb_xy =
s->c.mb_index2xy[
i];
251 odd += qscale_table[mb_xy] & 1;
254 if (2 * odd >
s->c.mb_num)
259 for (
int i = 0;
i <
s->c.mb_num;
i++) {
260 int mb_xy =
s->c.mb_index2xy[
i];
261 if ((qscale_table[mb_xy] & 1) != odd)
262 qscale_table[mb_xy]++;
263 if (qscale_table[mb_xy] > 31)
264 qscale_table[mb_xy] = 31;
267 for (
int i = 1;
i <
s->c.mb_num;
i++) {
268 int mb_xy =
s->c.mb_index2xy[
i];
269 if (qscale_table[mb_xy] != qscale_table[
s->c.mb_index2xy[
i - 1]] &&
307 const int16_t *
block,
int n,
int intra_dc,
311 int i, last_non_zero;
312 const uint32_t *bits_tab;
313 const uint8_t *len_tab;
314 const int last_index =
s->c.block_last_index[n];
333 last_non_zero =
i - 1;
334 for (;
i < last_index;
i++) {
337 int run =
i - last_non_zero - 1;
339 if ((
level & (~127)) == 0) {
344 7 + 2 + 1 + 6 + 1 + 12 + 1,
345 (3 << 23) + (3 << 21) + (0 << 20) + (
run << 14) +
346 (1 << 13) + (((
level - 64) & 0xfff) << 1) + 1);
353 int run =
i - last_non_zero - 1;
355 if ((
level & (~127)) == 0) {
360 7 + 2 + 1 + 6 + 1 + 12 + 1,
361 (3 << 23) + (3 << 21) + (1 << 20) + (
run << 14) +
362 (1 << 13) + (((
level - 64) & 0xfff) << 1) + 1);
368 const int16_t *
block,
int n,
369 int intra_dc,
const uint8_t *scan_table)
371 int i, last_non_zero;
372 const uint8_t *len_tab;
373 const int last_index =
s->c.block_last_index[n];
391 last_non_zero =
i - 1;
392 for (;
i < last_index;
i++) {
395 int run =
i - last_non_zero - 1;
397 if ((
level & (~127)) == 0) {
401 len += 7 + 2 + 1 + 6 + 1 + 12 + 1;
408 int run =
i - last_non_zero - 1;
410 if ((
level & (~127)) == 0) {
414 len += 7 + 2 + 1 + 6 + 1 + 12 + 1;
422 const int16_t
block[6][64],
423 const int intra_dc[6],
424 const uint8_t *
const *scan_table,
432 for (
i = 0;
i < 6;
i++)
435 intra_dc[
i], scan_table[
i]));
438 for (
i = 0;
i < 6;
i++)
440 intra_dc[
i], scan_table[
i], dc_pb, ac_pb);
444 for (
i = 0;
i < 6;
i++)
447 s->c.intra_scantable.permutated));
450 for (
i = 0;
i < 6;
i++)
452 s->c.intra_scantable.permutated, dc_pb, ac_pb);
458 int motion_x,
int motion_y,
int mb_type)
466 for (
i = 0;
i < 6;
i++) {
467 if (
s->coded_score[
i] < 0) {
468 score +=
s->coded_score[
i];
475 if ((motion_x | motion_y |
s->dquant | mb_type) == 0)
478 zero_score *= lambda;
479 if (zero_score <= score)
483 for (
i = 0;
i < 6;
i++) {
484 if (
s->c.block_last_index[
i] >= 0 && ((cbp >> (5 -
i)) & 1) == 0) {
485 s->c.block_last_index[
i] = -1;
486 s->c.bdsp.clear_block(
s->c.block[
i]);
490 for (
i = 0;
i < 6;
i++) {
491 if (
s->c.block_last_index[
i] >= 0)
502 int motion_x,
int motion_y)
504 int cbpc, cbpy, pred_x, pred_y;
508 const int interleaved_stats = (
s->c.avctx->flags &
AV_CODEC_FLAG_PASS1) && !
s->c.data_partitioning ? 1 : 0;
510 if (!
s->c.mb_intra) {
515 static const int mb_type_table[8] = { -1, 3, 2, 1, -1, -1, -1, 0 };
516 int mb_type = mb_type_table[
s->c.mv_dir];
518 if (
s->c.mb_x == 0) {
519 for (
i = 0;
i < 2;
i++)
520 s->c.last_mv[
i][0][0] =
521 s->c.last_mv[
i][0][1] =
522 s->c.last_mv[
i][1][0] =
523 s->c.last_mv[
i][1][1] = 0;
531 if (
s->c.next_pic.mbskip_table[
s->c.mb_y *
s->c.mb_stride +
s->c.mb_x]) {
535 s->c.mv[1][0][1] = 0;
537 s->c.qscale -=
s->dquant;
545 if ((cbp | motion_x | motion_y | mb_type) == 0) {
551 if (interleaved_stats) {
564 if (cbp && mb_type) {
570 s->c.qscale -=
s->dquant;
572 if (!
s->c.progressive_sequence) {
579 if (interleaved_stats)
590 s->c.mv[0][0][0] -
s->c.last_mv[0][0][0],
591 s->c.mv[0][0][1] -
s->c.last_mv[0][0][1],
593 s->c.last_mv[0][0][0] =
594 s->c.last_mv[0][1][0] =
s->c.mv[0][0][0];
595 s->c.last_mv[0][0][1] =
596 s->c.last_mv[0][1][1] =
s->c.mv[0][0][1];
600 s->c.mv[1][0][0] -
s->c.last_mv[1][0][0],
601 s->c.mv[1][0][1] -
s->c.last_mv[1][0][1],
603 s->c.last_mv[1][0][0] =
604 s->c.last_mv[1][1][0] =
s->c.mv[1][0][0];
605 s->c.last_mv[1][0][1] =
606 s->c.last_mv[1][1][1] =
s->c.mv[1][0][1];
618 for (
i = 0;
i < 2;
i++) {
620 s->c.mv[0][
i][0] -
s->c.last_mv[0][
i][0],
621 s->c.mv[0][
i][1] -
s->c.last_mv[0][
i][1] / 2,
623 s->c.last_mv[0][
i][0] =
s->c.mv[0][
i][0];
624 s->c.last_mv[0][
i][1] =
s->c.mv[0][
i][1] * 2;
628 for (
i = 0;
i < 2;
i++) {
630 s->c.mv[1][
i][0] -
s->c.last_mv[1][
i][0],
631 s->c.mv[1][
i][1] -
s->c.last_mv[1][
i][1] / 2,
633 s->c.last_mv[1][
i][0] =
s->c.mv[1][
i][0];
634 s->c.last_mv[1][
i][1] =
s->c.mv[1][
i][1] * 2;
640 if (interleaved_stats)
645 if (interleaved_stats)
650 if ((cbp | motion_x | motion_y |
s->dquant) == 0 &&
658 const uint8_t *p_pic;
663 offset = x + y *
s->c.linesize;
664 p_pic =
s->new_pic->data[0] +
offset;
668 const uint8_t *b_pic;
679 if (x + 16 >
s->c.width || y + 16 >
s->c.height) {
681 int xe =
FFMIN(16,
s->c.width - x);
682 int ye =
FFMIN(16,
s->c.height - y);
684 for (y1 = 0; y1 < ye; y1++) {
685 for (x1 = 0; x1 < xe; x1++) {
686 diff +=
FFABS(p_pic[x1 + y1 *
s->c.linesize] - b_pic[x1 + y1 *
s->c.linesize]);
691 diff =
s->sad_cmp[0](
NULL, p_pic, b_pic,
s->c.linesize, 16);
693 if (
diff >
s->c.qscale * 70) {
701 if (
s->c.mb_skipped == 1) {
705 if (interleaved_stats) {
729 if (!
s->c.progressive_sequence) {
735 if (interleaved_stats)
761 if (interleaved_stats)
772 s->c.mv[0][0][0] - pred_x,
773 s->c.mv[0][0][1] - pred_y,
776 s->c.mv[0][1][0] - pred_x,
777 s->c.mv[0][1][1] - pred_y,
786 if (!
s->c.progressive_sequence && cbp)
789 if (interleaved_stats)
792 for (
i = 0;
i < 4;
i++) {
797 s->c.cur_pic.motion_val[0][
s->c.block_index[
i]][0] - pred_x,
798 s->c.cur_pic.motion_val[0][
s->c.block_index[
i]][1] - pred_y,
803 if (interleaved_stats)
808 if (interleaved_stats)
815 int zigzag_last_index[6];
816 const uint8_t *scan_table[6];
819 for (
int i = 0;
i < 6;
i++) {
821 int scale =
i < 4 ?
s->c.y_dc_scale :
s->c.c_dc_scale;
831 for (
i = 0;
i < 6;
i++)
832 scan_table[
i] =
s->c.intra_scantable.permutated;
837 for (
i = 0;
i < 6;
i++)
838 if (
s->c.block_last_index[
i] >= 1)
862 if (!
s->c.progressive_sequence)
865 if (interleaved_stats)
870 if (interleaved_stats)
888 put_bits(pbc, length, (1 << (length - 1)) - 1);
897 s->c.last_time_base =
s->c.time_base;
898 s->c.time_base =
FFUDIV(
s->c.time,
s->c.avctx->time_base.den);
905 int64_t hours, minutes, seconds;
910 time =
s->c.cur_pic.ptr->f->pts;
913 time = time *
s->c.avctx->time_base.num;
914 s->c.last_time_base =
FFUDIV(time,
s->c.avctx->time_base.den);
916 seconds =
FFUDIV(time,
s->c.avctx->time_base.den);
917 minutes =
FFUDIV(seconds, 60); seconds =
FFUMOD(seconds, 60);
918 hours =
FFUDIV(minutes, 60); minutes =
FFUMOD(minutes, 60);
919 hours =
FFUMOD(hours , 24);
935 int profile_and_level_indication;
939 profile_and_level_indication =
s->c.avctx->profile << 4;
941 profile_and_level_indication = 0xF0;
943 profile_and_level_indication = 0x00;
947 profile_and_level_indication |=
s->c.avctx->level;
949 profile_and_level_indication |= 1;
951 if (profile_and_level_indication >> 4 == 0xF)
960 put_bits(&
s->pb, 8, profile_and_level_indication);
980 int vo_ver_id, vo_type, aspect_ratio_info;
1003 av_reduce(&
s->c.avctx->sample_aspect_ratio.num, &
s->c.avctx->sample_aspect_ratio.den,
1004 s->c.avctx->sample_aspect_ratio.num,
s->c.avctx->sample_aspect_ratio.den, 255);
1005 put_bits(&
s->pb, 8,
s->c.avctx->sample_aspect_ratio.num);
1006 put_bits(&
s->pb, 8,
s->c.avctx->sample_aspect_ratio.den);
1017 put_bits(&
s->pb, 16,
s->c.avctx->time_base.den);
1027 put_bits(&
s->pb, 1,
s->c.progressive_sequence ? 0 : 1);
1037 if (
s->c.mpeg_quant) {
1046 put_bits(&
s->pb, 1,
s->c.data_partitioning ? 1 : 0);
1047 if (
s->c.data_partitioning)
1050 if (vo_ver_id != 1) {
1088 time_div =
FFUDIV(
s->c.time,
s->c.avctx->time_base.den);
1089 time_mod =
FFUMOD(
s->c.time,
s->c.avctx->time_base.den);
1090 time_incr = time_div -
s->c.last_time_base;
1093 if (time_incr > 3600*24) {
1110 if (!
s->c.progressive_sequence) {
1128 int level, uni_code, uni_len;
1184 int slevel,
run, last;
1189 for (slevel = -64; slevel < 64; slevel++) {
1193 for (last = 0; last <= 1; last++) {
1195 int level = slevel < 0 ? -slevel : slevel;
1196 int sign = slevel < 0 ? 1 : 0;
1200 len_tab[
index] = 100;
1262 bits =
bits * 4096 + (slevel & 0xfff);
1285 for (
int f_code =
MAX_FCODE; f_code > 0; f_code--) {
1286 for (
int mv = -(16 << f_code);
mv < (16 << f_code);
mv++)
1299 if (avctx->
width >= (1<<13) || avctx->
height >= (1<<13)) {
1309 s->min_qcoeff = -2048;
1310 s->max_qcoeff = 2047;
1316 s->ac_esc_length = 7 + 2 + 1 + 6 + 1 + 12 + 1;
1328 "timebase %d/%d not supported by MPEG 4 standard, "
1329 "the maximum admitted value for the timebase denominator "
1356 uint8_t *end =
s->pb.buf_end;
1357 int size = end - start;
1358 int pb_size = (((intptr_t)start +
size / 3) & (~3)) - (intptr_t)start;
1359 int tex_size = (
size - 2 * pb_size) & (~3);
1374 s->misc_bits += 19 + pb2_len +
bits -
s->last_bits;
1375 s->i_tex_bits += tex_pb_len;
1378 s->misc_bits += 17 + pb2_len;
1379 s->mv_bits +=
bits -
s->last_bits;
1380 s->p_tex_bits += tex_pb_len;
1394 int mb_num_bits =
av_log2(
s->c.mb_num - 1) + 1;
1399 put_bits(&
s->pb, mb_num_bits,
s->c.mb_x +
s->c.mb_y *
s->c.mb_width);
1404 #define OFFSET(x) offsetof(MPVEncContext, c.x)
1405 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1407 {
"data_partitioning",
"Use data partitioning.",
OFFSET(data_partitioning),
AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1,
VE },
1408 {
"alternate_scan",
"Enable alternate scantable.",
OFFSET(alternate_scan),
AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1,
VE },
1409 {
"mpeg_quant",
"Use MPEG quantizers instead of H.263",
static av_cold void mpeg4_encode_init_static(void)
static int mpeg4_get_dc_length(int level, int n)
const uint8_t * fcode_tab
smallest fcode needed for each MV
#define CODEC_PIXFMTS(...)
#define CANDIDATE_MB_TYPE_BIDIR
#define MV_TYPE_16X16
1 vector for the whole mb
#define FF_ASPECT_EXTENDED
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void av_unused put_bits32(PutBitContext *s, uint32_t value)
Write exactly 32 bits into a bitstream.
static int put_bytes_output(const PutBitContext *s)
static const int8_t mv[256][2]
static void mpeg4_encode_gop_header(MPVMainEncContext *const m)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
void ff_clean_h263_qscales(MPVEncContext *s)
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static void mpeg4_encode_block(const MPVEncContext *const s, const int16_t *block, int n, int intra_dc, const uint8_t *scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb)
Encode an 8x8 block.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static av_cold void init_uni_dc_tab(void)
int time_increment_bits
number of bits to represent the fractional part of time
static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64], int motion_x, int motion_y)
av_cold void ff_mpeg4_init_rl_intra(void)
int(* encode_picture_header)(struct MPVMainEncContext *m)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
static uint32_t uni_mpeg4_intra_rl_bits[64 *64 *2 *2]
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
#define FF_MPV_COMMON_MOTION_EST_OPTS
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
#define FF_MPV_COMMON_OPTS
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
static uint8_t uni_mpeg4_intra_rl_len[64 *64 *2 *2]
const uint8_t ff_mpeg4_DCtab_chrom[13][2]
AVCodec p
The public AVCodec.
static int mpeg4_get_block_length(MPVEncContext *const s, const int16_t *block, int n, int intra_dc, const uint8_t *scan_table)
static Mpeg4EncContext * mainctx_to_mpeg4(MPVMainEncContext *m)
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s)
static void mpeg4_encode_visual_object_header(MPVMainEncContext *const m)
static uint32_t uni_mpeg4_inter_rl_bits[64 *64 *2 *2]
int flags
AV_CODEC_FLAG_*.
static uint8_t uni_DCtab_chrom_len[512]
#define FF_MPV_FLAG_CBP_RD
#define AV_CODEC_FLAG2_NO_OUTPUT
Skip bitstream encoding.
void ff_mpeg4_init_partitions(MPVEncContext *const s)
#define FF_CODEC_ENCODE_CB(func)
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
static const int dquant_code[5]
#define CANDIDATE_MB_TYPE_DIRECT
int n
number of entries of table_vlc minus 1
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_PROFILE_UNKNOWN
int8_t * max_level[2]
encoding & decoding
static void mpeg4_encode_blocks(MPVEncContext *const s, const int16_t block[6][64], const int intra_dc[6], const uint8_t *const *scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb)
static uint8_t uni_mpeg4_inter_rl_len[64 *64 *2 *2]
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
static int get_rl_index(const RLTable *rl, int last, int run, int level)
static void skip_put_bits(PutBitContext *s, int n)
Skip the given number of bits.
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
RLTable ff_mpeg4_rl_intra
static uint16_t uni_DCtab_chrom_bits[512]
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define UNI_MPEG4_ENC_INDEX(last, run, level)
static uint16_t uni_DCtab_lum_bits[512]
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
int max_b_frames
max number of B-frames
void ff_put_string(PutBitContext *pb, const char *string, int terminate_string)
Put the string string in the bitstream.
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static void mpeg4_encode_vol_header(Mpeg4EncContext *const m4, int vo_number, int vol_number)
static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_tab)
#define CODEC_LONG_NAME(str)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
const uint8_t ff_mpeg4_DCtab_lum[13][2]
static int get_block_rate(MPVEncContext *const s, int16_t block[64], int block_last_index, const uint8_t scantable[64])
Return the number of bits that encoding the 8x8 block in block would need.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
#define FF_COMPLIANCE_VERY_STRICT
Strictly conform to an older more strict version of the spec or reference software.
const uint16_t(* table_vlc)[2]
static int ff_mpeg4_pred_dc(MpegEncContext *s, int n, int *dir_ptr)
Predict the dc.
#define ROUNDED_DIV(a, b)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
static int get_p_cbp(MPVEncContext *const s, int16_t block[6][64], int motion_x, int motion_y)
static int mpeg4_encode_picture_header(MPVMainEncContext *const m)
#define VISUAL_OBJ_STARTCODE
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
void ff_set_mpeg4_time(MPVEncContext *const s)
#define ADV_SIMPLE_VO_TYPE
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
enum AVPictureType pict_type
Picture type of the frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
const uint8_t ff_mpeg4_y_dc_scale_table[32]
const uint8_t ff_h263_cbpy_tab[16][2]
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
static uint8_t fcode_tab[MAX_MV *2+1]
Minimal fcode that a motion vector component would need.
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define MV_TYPE_FIELD
2 vectors, one per field
const uint8_t ff_h263_inter_MCBPC_bits[28]
#define UNI_AC_ENC_INDEX(run, level)
#define FF_MPEG4_PROFILE_OPTS
static int get_bits_diff(MPVEncContext *s)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static int put_bits_count(PutBitContext *s)
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
int8_t * max_run[2]
encoding & decoding
static uint8_t uni_DCtab_lum_len[512]
static void restore_ac_coeffs(MPVEncContext *const s, int16_t block[6][64], const int dir[6], const uint8_t *st[6], const int zigzag_last_index[6])
Restore the ac coefficients in block that have been changed by decide_ac_pred().
static const AVClass mpeg4enc_class
static const AVOption options[]
const FFCodec ff_mpeg4_encoder
const char * name
Name of the codec implementation.
av_const int ff_h263_aspect_to_info(AVRational aspect)
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
#define AV_CODEC_FLAG_CLOSED_GOP
static const float pred[4]
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static av_cold int encode_init(AVCodecContext *avctx)
void ff_mpeg4_init_direct_mv(MpegEncContext *s)
const uint8_t ff_h263_intra_MCBPC_bits[9]
main external API structure.
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
const uint8_t ff_h263_intra_MCBPC_code[9]
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
static void mpeg4_encode_dc(PutBitContext *s, int level, int n)
Encode the dc value.
@ AV_OPT_TYPE_INT
Underlying C type is int.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define FF_MPV_COMMON_BFRAME_OPTS
#define USER_DATA_STARTCODE
const uint8_t ff_h263_inter_MCBPC_code[28]
@ AV_PICTURE_TYPE_P
Predicted.
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
const uint8_t ff_mpeg4_c_dc_scale_table[32]
static int decide_ac_pred(MPVEncContext *const s, int16_t block[6][64], const int dir[6], const uint8_t *st[6], int zigzag_last_index[6])
Return the optimal value (0 or 1) for the ac_pred element for the given MB in MPEG-4.
static void scale(int *out, const int *in, const int w, const int h, const int shift)
static const MPVMainEncContext * slice_to_mainenc(const MPVEncContext *s)
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
static void ff_h263_encode_motion_vector(MPVEncContext *s, int x, int y, int f_code)
int width
picture width / height.
The exact code depends on how similar the blocks are and how related they are to the block
static int get_b_cbp(MPVEncContext *const s, int16_t block[6][64], int motion_x, int motion_y, int mb_type)
MPVEncContext s
The main slicecontext.
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.