Go to the documentation of this file.
   34 #include "config_components.h" 
   80 #define QUANT_BIAS_SHIFT 8 
   82 #define QMAT_SHIFT_MMX 16 
  108                        uint16_t (*
qmat16)[2][64],
 
  109                        const uint16_t *quant_matrix,
 
  110                        int bias, 
int qmin, 
int qmax, 
int intra)
 
  121         else                 qscale2 = 
qscale << 1;
 
  128             for (
i = 0; 
i < 64; 
i++) {
 
  129                 const int j = 
s->idsp.idct_permutation[
i];
 
  140             for (
i = 0; 
i < 64; 
i++) {
 
  141                 const int j = 
s->idsp.idct_permutation[
i];
 
  152             for (
i = 0; 
i < 64; 
i++) {
 
  153                 const int j = 
s->idsp.idct_permutation[
i];
 
  174         for (
i = intra; 
i < 64; 
i++) {
 
  186                "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
 
  193     if (
s->q_scale_type == 1 && 0) {
 
  195         int bestdiff=INT_MAX;
 
  203             if (
diff < bestdiff) {
 
  212         s->qscale = 
av_clip(
s->qscale, 
s->avctx->qmin, 
s->vbv_ignore_qmax ? 31 : 
s->avctx->qmax);
 
  225         for (
i = 0; 
i < 64; 
i++) {
 
  237     int8_t * 
const qscale_table = 
s->current_picture.qscale_table;
 
  240     for (
i = 0; 
i < 
s->mb_num; 
i++) {
 
  241         unsigned int lam = 
s->lambda_table[
s->mb_index2xy[
i]];
 
  243         qscale_table[
s->mb_index2xy[
i]] = 
av_clip(qp, 
s->avctx->qmin,
 
  251 #define COPY(a) dst->a= src->a 
  267    for (
int i = -16; 
i < 16; 
i++)
 
  286     s->input_picture_number  = 0;
 
  287     s->picture_in_gop_number = 0;
 
  296     if (CONFIG_H263_ENCODER)
 
  298     if (!
s->dct_quantize)
 
  302     s->fast_dct_quantize = 
s->dct_quantize;
 
  303     if (
s->avctx->trellis)
 
  315     int mb_array_size, mv_table_size;
 
  343                "keyframe interval too large!, reducing it from %d to %d\n",
 
  355                "max b frames must be 0 or positive for mpegvideo based encoders\n");
 
  366     s->rtp_mode           = !!
s->rtp_payload_size;
 
  370     if (
s->intra_dc_precision < 0) {
 
  371         s->intra_dc_precision += 8;
 
  372     } 
else if (
s->intra_dc_precision >= 8)
 
  373         s->intra_dc_precision -= 8;
 
  375     if (
s->intra_dc_precision < 0) {
 
  377                 "intra dc precision must be positive, note some applications use" 
  378                 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
 
  388     if (
s->gop_size <= 1) {
 
  442                "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
 
  459                "impossible bitrate constraints, this will fail\n");
 
  469     if (!
s->fixed_qscale &&
 
  475         if (nbt <= INT_MAX) {
 
  488                "Warning vbv_delay will be set to 0xFFFF (=VBR) as the " 
  489                "specified vbv buffer is too large for the given bitrate!\n");
 
  501                "OBMC is only supported with simple mb decision\n");
 
  516                "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
 
  569     if (
s->scenechange_threshold < 1000000000 &&
 
  572                "closed gop with scene change detection are not supported yet, " 
  573                "set threshold to 1000000000\n");
 
  581                    "low delay forcing is only available for mpeg2, " 
  582                    "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
 
  585         if (
s->max_b_frames != 0) {
 
  587                    "B-frames cannot be used with low delay\n");
 
  592     if (
s->q_scale_type == 1) {
 
  595                    "non linear quant only supports qmax <= 28 currently\n");
 
  608                "notice: b_frame_strategy only affects the first pass\n");
 
  609         s->b_frame_strategy = 0;
 
  623         s->inter_quant_bias = 0;
 
  625         s->intra_quant_bias = 0;
 
  640                "timebase %d/%d not supported by MPEG 4 standard, " 
  641                "the maximum admitted value for the timebase denominator " 
  649 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER 
  656         avctx->
delay  = 
s->low_delay ? 0 : (
s->max_b_frames + 1);
 
  660 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER 
  674         if (!CONFIG_SPEEDHQ_ENCODER)
 
  682         if (!CONFIG_H261_ENCODER)
 
  693         if (!CONFIG_H263_ENCODER)
 
  696                              s->width, 
s->height) == 8) {
 
  698                    "The specified picture size of %dx%d is not valid for " 
  699                    "the H.263 codec.\nValid sizes are 128x96, 176x144, " 
  700                    "352x288, 704x576, and 1408x1152. " 
  701                    "Try H.263+.\n", 
s->width, 
s->height);
 
  713         s->modified_quant  = 
s->h263_aic;
 
  715         s->unrestricted_mv = 
s->obmc || 
s->loop_filter || 
s->umvplus;
 
  725         s->unrestricted_mv = 1;
 
  739         s->modified_quant  = 1;
 
  743         s->unrestricted_mv = 0;
 
  748         s->unrestricted_mv = 1;
 
  749         s->low_delay       = 
s->max_b_frames ? 0 : 1;
 
  750         avctx->
delay       = 
s->low_delay ? 0 : (
s->max_b_frames + 1);
 
  755         s->unrestricted_mv = 1;
 
  756         s->msmpeg4_version = 2;
 
  763         s->unrestricted_mv   = 1;
 
  764         s->msmpeg4_version   = 3;
 
  765         s->flipflop_rounding = 1;
 
  772         s->unrestricted_mv   = 1;
 
  773         s->msmpeg4_version   = 4;
 
  774         s->flipflop_rounding = 1;
 
  781         s->unrestricted_mv   = 1;
 
  782         s->msmpeg4_version   = 5;
 
  783         s->flipflop_rounding = 1;
 
  795     s->progressive_frame    =
 
  800     if (
s->lmin > 
s->lmax) {
 
  829     mv_table_size = (
s->mb_height + 2) * 
s->mb_stride + 1;
 
  837     s->p_mv_table            = 
s->p_mv_table_base + 
s->mb_stride + 1;
 
  838     s->b_forw_mv_table       = 
s->b_forw_mv_table_base + 
s->mb_stride + 1;
 
  839     s->b_back_mv_table       = 
s->b_back_mv_table_base + 
s->mb_stride + 1;
 
  840     s->b_bidir_forw_mv_table = 
s->b_bidir_forw_mv_table_base + 
s->mb_stride + 1;
 
  841     s->b_bidir_back_mv_table = 
s->b_bidir_back_mv_table_base + 
s->mb_stride + 1;
 
  842     s->b_direct_mv_table     = 
s->b_direct_mv_table_base + 
s->mb_stride + 1;
 
  845     mb_array_size = 
s->mb_stride * 
s->mb_height;
 
  855 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p)))) 
  860         if (!(tmp1 = 
ALLOCZ_ARRAYS(
s->b_field_mv_table_base, 8, mv_table_size)) ||
 
  861             !(tmp2 = 
ALLOCZ_ARRAYS(
s->b_field_select_table[0][0], 2 * 4, mv_table_size)) ||
 
  865         s->p_field_select_table[1] = 
s->p_field_select_table[0] + 2 * mv_table_size;
 
  866         tmp1 += 
s->mb_stride + 1;
 
  868         for (
int i = 0; 
i < 2; 
i++) {
 
  869             for (
int j = 0; j < 2; j++) {
 
  870                 for (
int k = 0; k < 2; k++) {
 
  871                     s->b_field_mv_table[
i][j][k] = tmp1;
 
  872                     tmp1 += mv_table_size;
 
  874                 s->b_field_select_table[
i][j] = tmp2;
 
  875                 tmp2 += 2 * mv_table_size;
 
  880     if (
s->noise_reduction) {
 
  888         s->dct_unquantize_intra = 
s->dct_unquantize_mpeg2_intra;
 
  889         s->dct_unquantize_inter = 
s->dct_unquantize_mpeg2_inter;
 
  891         s->dct_unquantize_intra = 
s->dct_unquantize_h263_intra;
 
  892         s->dct_unquantize_inter = 
s->dct_unquantize_h263_inter;
 
  894         s->dct_unquantize_intra = 
s->dct_unquantize_mpeg1_intra;
 
  895         s->dct_unquantize_inter = 
s->dct_unquantize_mpeg1_inter;
 
  898     if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && 
s->modified_quant)
 
  901     if (
s->slice_context_count > 1) {
 
  905             s->h263_slice_structured = 1;
 
  908     s->quant_precision = 5;
 
  915     if (CONFIG_H263_ENCODER && 
s->out_format == 
FMT_H263) {
 
  917         if (CONFIG_MSMPEG4ENC && 
s->msmpeg4_version)
 
  922     for (
i = 0; 
i < 64; 
i++) {
 
  923         int j = 
s->idsp.idct_permutation[
i];
 
  936             s->chroma_intra_matrix[j] =
 
  960     if (
s->b_frame_strategy == 2) {
 
  961         for (
i = 0; 
i < 
s->max_b_frames + 2; 
i++) {
 
  963             if (!
s->tmp_frames[
i])
 
  967             s->tmp_frames[
i]->width  = 
s->width  >> 
s->brd_scale;
 
  968             s->tmp_frames[
i]->height = 
s->height >> 
s->brd_scale;
 
 1006     av_freep(&
s->b_bidir_forw_mv_table_base);
 
 1007     av_freep(&
s->b_bidir_back_mv_table_base);
 
 1010     av_freep(&
s->b_field_select_table[0][0]);
 
 1019     if(
s->q_chroma_intra_matrix   != 
s->q_intra_matrix  ) 
av_freep(&
s->q_chroma_intra_matrix);
 
 1020     if(
s->q_chroma_intra_matrix16 != 
s->q_intra_matrix16) 
av_freep(&
s->q_chroma_intra_matrix16);
 
 1021     s->q_chroma_intra_matrix=   
NULL;
 
 1022     s->q_chroma_intra_matrix16= 
NULL;
 
 1037 #define IS_ENCODER 1 
 1045        for (
int i = 0; 
i < 6; 
i++) {
 
 1046            for (
int j = 0; j < 64; j++) {
 
 1048                       block[
i][
s->idsp.idct_permutation[j]]);
 
 1062     for (y = 0; y < 16; y++) {
 
 1063         for (x = 0; x < 16; x++) {
 
 1078     h = 
s->height & ~15;
 
 1080     for (y = 0; y < 
h; y += 16) {
 
 1081         for (x = 0; x < 
w; x += 16) {
 
 1088             acc += sae + 500 < sad;
 
 1106     for (
int i = 0; pic->
f->
data[
i]; 
i++) {
 
 1116                             s->mb_stride, 
s->mb_width, 
s->mb_height, 
s->b8_stride,
 
 1117                             &
s->linesize, &
s->uvlinesize);
 
 1124     int i, display_picture_number = 0, 
ret;
 
 1125     int encoding_delay = 
s->max_b_frames ? 
s->max_b_frames
 
 1126                                          : (
s->low_delay ? 0 : 1);
 
 1127     int flush_offset = 1;
 
 1132         display_picture_number = 
s->input_picture_number++;
 
 1136                 int64_t last = 
s->user_specified_pts;
 
 1140                            "Invalid pts (%"PRId64
") <= last (%"PRId64
")\n",
 
 1145                 if (!
s->low_delay && display_picture_number == 1)
 
 1146                     s->dts_delta = 
pts - last;
 
 1148             s->user_specified_pts = 
pts;
 
 1151                 s->user_specified_pts =
 
 1152                 pts = 
s->user_specified_pts + 1;
 
 1154                        "Warning: AVFrame.pts=? trying to guess (%"PRId64
")\n",
 
 1157                 pts = display_picture_number;
 
 1161         if (pic_arg->
linesize[0] != 
s->linesize ||
 
 1162             pic_arg->
linesize[1] != 
s->uvlinesize ||
 
 1165         if ((
s->width & 15) || (
s->height & 15))
 
 1173                 pic_arg->
linesize[1], 
s->linesize, 
s->uvlinesize);
 
 1179         pic = &
s->picture[
i];
 
 1196             for (
int i = 0; 
i < 3; 
i++) {
 
 1197                 ptrdiff_t src_stride = pic_arg->
linesize[
i];
 
 1198                 ptrdiff_t dst_stride = 
i ? 
s->uvlinesize : 
s->linesize;
 
 1199                 int h_shift = 
i ? 
s->chroma_x_shift : 0;
 
 1200                 int v_shift = 
i ? 
s->chroma_y_shift : 0;
 
 1203                 const uint8_t *
src = pic_arg->
data[
i];
 
 1204                 uint8_t *dst = pic->
f->
data[
i];
 
 1208                     && !
s->progressive_sequence
 
 1209                     && 
FFALIGN(
s->height, 32) - 
s->height > 16)
 
 1212                 if (!
s->avctx->rc_buffer_size)
 
 1215                 if (src_stride == dst_stride)
 
 1216                     memcpy(dst, 
src, src_stride * 
h - src_stride + 
w);
 
 1219                     uint8_t *dst2 = dst;
 
 1221                         memcpy(dst2, 
src, 
w);
 
 1226                 if ((
s->width & 15) || (
s->height & (vpad-1))) {
 
 1227                     s->mpvencdsp.draw_edges(dst, dst_stride,
 
 1242         for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
 
 1243             if (
s->input_picture[flush_offset])
 
 1246         if (flush_offset <= 1)
 
 1249             encoding_delay = encoding_delay - flush_offset + 1;
 
 1254         s->input_picture[
i - flush_offset] = 
s->input_picture[
i];
 
 1256         s->input_picture[
i] = 
NULL;
 
 1258     s->input_picture[encoding_delay] = pic;
 
 1269     for (plane = 0; plane < 3; plane++) {
 
 1271         const int bw = plane ? 1 : 2;
 
 1272         for (y = 0; y < 
s->mb_height * bw; y++) {
 
 1273             for (x = 0; x < 
s->mb_width * bw; x++) {
 
 1274                 int off = p->
shared ? 0 : 16;
 
 1275                 const uint8_t *dptr = p->
f->
data[plane] + 8 * (x + y * 
stride) + off;
 
 1276                 const uint8_t *rptr = 
ref->f->data[plane] + 8 * (x + y * 
stride);
 
 1277                 int v = 
s->mecc.frame_skip_cmp[1](
s, dptr, rptr, 
stride, 8);
 
 1279                 switch (
FFABS(
s->frame_skip_exp)) {
 
 1280                 case 0: score    =  
FFMAX(score, v);          
break;
 
 1281                 case 1: score   += 
FFABS(v);                  
break;
 
 1282                 case 2: score64 += v * (
int64_t)v;                       
break;
 
 1293     if (
s->frame_skip_exp < 0)
 
 1294         score64 = pow(score64 / (
double)(
s->mb_width * 
s->mb_height),
 
 1295                       -1.0/
s->frame_skip_exp);
 
 1299     if (score64 < ((
s->frame_skip_factor * (
int64_t) 
s->lambda) >> 8))
 
 1328     const int scale = 
s->brd_scale;
 
 1333     int best_b_count = -1;
 
 1348         b_lambda = p_lambda;
 
 1352     for (
i = 0; 
i < 
s->max_b_frames + 2; 
i++) {
 
 1353         const Picture *pre_input_ptr = 
i ? 
s->input_picture[
i - 1] :
 
 1354                                            s->next_picture_ptr;
 
 1356         if (pre_input_ptr) {
 
 1357             const uint8_t *
data[4];
 
 1360             if (!pre_input_ptr->
shared && 
i) {
 
 1366             s->mpvencdsp.shrink[
scale](
s->tmp_frames[
i]->data[0],
 
 1367                                        s->tmp_frames[
i]->linesize[0],
 
 1371             s->mpvencdsp.shrink[
scale](
s->tmp_frames[
i]->data[1],
 
 1372                                        s->tmp_frames[
i]->linesize[1],
 
 1376             s->mpvencdsp.shrink[
scale](
s->tmp_frames[
i]->data[2],
 
 1377                                        s->tmp_frames[
i]->linesize[2],
 
 1384     for (j = 0; j < 
s->max_b_frames + 1; j++) {
 
 1388         if (!
s->input_picture[j])
 
 1401         c->mb_decision  = 
s->avctx->mb_decision;
 
 1402         c->me_cmp       = 
s->avctx->me_cmp;
 
 1403         c->mb_cmp       = 
s->avctx->mb_cmp;
 
 1404         c->me_sub_cmp   = 
s->avctx->me_sub_cmp;
 
 1406         c->time_base    = 
s->avctx->time_base;
 
 1407         c->max_b_frames = 
s->max_b_frames;
 
 1425         for (
i = 0; 
i < 
s->max_b_frames + 1; 
i++) {
 
 1426             int is_p = 
i % (j + 1) == j || 
i == 
s->max_b_frames;
 
 1428             s->tmp_frames[
i + 1]->pict_type = is_p ?
 
 1430             s->tmp_frames[
i + 1]->quality   = is_p ? p_lambda : b_lambda;
 
 1449         rd += 
c->error[0] + 
c->error[1] + 
c->error[2];
 
 1467     return best_b_count;
 
 1475         s->reordered_input_picture[
i - 1] = 
s->reordered_input_picture[
i];
 
 1479     if (!
s->reordered_input_picture[0] && 
s->input_picture[0]) {
 
 1480         if (
s->frame_skip_threshold || 
s->frame_skip_factor) {
 
 1481             if (
s->picture_in_gop_number < 
s->gop_size &&
 
 1482                 s->next_picture_ptr &&
 
 1494             !
s->next_picture_ptr || 
s->intra_only) {
 
 1495             s->reordered_input_picture[0] = 
s->input_picture[0];
 
 1497             s->reordered_input_picture[0]->coded_picture_number =
 
 1498                 s->coded_picture_number++;
 
 1503                 for (
i = 0; 
i < 
s->max_b_frames + 1; 
i++) {
 
 1504                     int pict_num = 
s->input_picture[0]->display_picture_number + 
i;
 
 1506                     if (pict_num >= 
s->rc_context.num_entries)
 
 1508                     if (!
s->input_picture[
i]) {
 
 1513                     s->input_picture[
i]->f->pict_type =
 
 1514                         s->rc_context.entry[pict_num].new_pict_type;
 
 1518             if (
s->b_frame_strategy == 0) {
 
 1519                 b_frames = 
s->max_b_frames;
 
 1520                 while (b_frames && !
s->input_picture[b_frames])
 
 1522             } 
else if (
s->b_frame_strategy == 1) {
 
 1523                 for (
i = 1; 
i < 
s->max_b_frames + 1; 
i++) {
 
 1524                     if (
s->input_picture[
i] &&
 
 1525                         s->input_picture[
i]->b_frame_score == 0) {
 
 1526                         s->input_picture[
i]->b_frame_score =
 
 1528                                             s->input_picture[
i    ]->f->data[0],
 
 1529                                             s->input_picture[
i - 1]->f->data[0],
 
 1533                 for (
i = 0; 
i < 
s->max_b_frames + 1; 
i++) {
 
 1534                     if (!
s->input_picture[
i] ||
 
 1535                         s->input_picture[
i]->b_frame_score - 1 >
 
 1536                             s->mb_num / 
s->b_sensitivity)
 
 1540                 b_frames = 
FFMAX(0, 
i - 1);
 
 1543                 for (
i = 0; 
i < b_frames + 1; 
i++) {
 
 1544                     s->input_picture[
i]->b_frame_score = 0;
 
 1546             } 
else if (
s->b_frame_strategy == 2) {
 
 1556             for (
i = b_frames - 1; 
i >= 0; 
i--) {
 
 1557                 int type = 
s->input_picture[
i]->f->pict_type;
 
 1562                 b_frames == 
s->max_b_frames) {
 
 1564                        "warning, too many B-frames in a row\n");
 
 1567             if (
s->picture_in_gop_number + b_frames >= 
s->gop_size) {
 
 1569                     s->gop_size > 
s->picture_in_gop_number) {
 
 1570                     b_frames = 
s->gop_size - 
s->picture_in_gop_number - 1;
 
 1582             s->reordered_input_picture[0] = 
s->input_picture[b_frames];
 
 1585             s->reordered_input_picture[0]->coded_picture_number =
 
 1586                 s->coded_picture_number++;
 
 1587             for (
i = 0; 
i < b_frames; 
i++) {
 
 1588                 s->reordered_input_picture[
i + 1] = 
s->input_picture[
i];
 
 1589                 s->reordered_input_picture[
i + 1]->f->pict_type =
 
 1591                 s->reordered_input_picture[
i + 1]->coded_picture_number =
 
 1592                     s->coded_picture_number++;
 
 1599     if (
s->reordered_input_picture[0]) {
 
 1600         s->reordered_input_picture[0]->reference =
 
 1601            s->reordered_input_picture[0]->f->pict_type !=
 
 1605                                 s->reordered_input_picture[0]->f)))
 
 1608         if (
s->reordered_input_picture[0]->shared || 
s->avctx->rc_buffer_size) {
 
 1616             pic = &
s->picture[
i];
 
 1618             pic->
reference = 
s->reordered_input_picture[0]->reference;
 
 1633             s->reordered_input_picture[0]->shared = 0;
 
 1635             s->current_picture_ptr = pic;
 
 1638             s->current_picture_ptr = 
s->reordered_input_picture[0];
 
 1639             for (
i = 0; 
i < 4; 
i++) {
 
 1640                 if (
s->new_picture->data[
i])
 
 1644         s->picture_number = 
s->current_picture_ptr->display_picture_number;
 
 1655     if (
s->unrestricted_mv &&
 
 1656         s->current_picture.reference &&
 
 1658         int hshift = 
s->chroma_x_shift;
 
 1659         int vshift = 
s->chroma_y_shift;
 
 1660         s->mpvencdsp.draw_edges(
s->current_picture.f->data[0],
 
 1661                                 s->current_picture.f->linesize[0],
 
 1662                                 s->h_edge_pos, 
s->v_edge_pos,
 
 1665         s->mpvencdsp.draw_edges(
s->current_picture.f->data[1],
 
 1666                                 s->current_picture.f->linesize[1],
 
 1667                                 s->h_edge_pos >> hshift,
 
 1668                                 s->v_edge_pos >> vshift,
 
 1672         s->mpvencdsp.draw_edges(
s->current_picture.f->data[2],
 
 1673                                 s->current_picture.f->linesize[2],
 
 1674                                 s->h_edge_pos >> hshift,
 
 1675                                 s->v_edge_pos >> vshift,
 
 1683     s->last_pict_type                 = 
s->pict_type;
 
 1684     s->last_lambda_for [
s->pict_type] = 
s->current_picture_ptr->f->quality;
 
 1686         s->last_non_b_pict_type = 
s->pict_type;
 
 1693     for (intra = 0; intra < 2; intra++) {
 
 1694         if (
s->dct_count[intra] > (1 << 16)) {
 
 1695             for (
i = 0; 
i < 64; 
i++) {
 
 1696                 s->dct_error_sum[intra][
i] >>= 1;
 
 1698             s->dct_count[intra] >>= 1;
 
 1701         for (
i = 0; 
i < 64; 
i++) {
 
 1702             s->dct_offset[intra][
i] = (
s->noise_reduction *
 
 1703                                        s->dct_count[intra] +
 
 1704                                        s->dct_error_sum[intra][
i] / 2) /
 
 1705                                       (
s->dct_error_sum[intra][
i] + 1);
 
 1716         s->last_picture_ptr != 
s->next_picture_ptr &&
 
 1717         s->last_picture_ptr->f->buf[0]) {
 
 1721     s->current_picture_ptr->f->pict_type = 
s->pict_type;
 
 1725                                    s->current_picture_ptr)) < 0)
 
 1729         s->last_picture_ptr = 
s->next_picture_ptr;
 
 1730         s->next_picture_ptr = 
s->current_picture_ptr;
 
 1733     if (
s->last_picture_ptr) {
 
 1735         if (
s->last_picture_ptr->f->buf[0] &&
 
 1737                                        s->last_picture_ptr)) < 0)
 
 1740     if (
s->next_picture_ptr) {
 
 1742         if (
s->next_picture_ptr->f->buf[0] &&
 
 1744                                        s->next_picture_ptr)) < 0)
 
 1748     if (
s->dct_error_sum) {
 
 1757                           const AVFrame *pic_arg, 
int *got_packet)
 
 1760     int i, stuffing_count, 
ret;
 
 1761     int context_count = 
s->slice_context_count;
 
 1763     s->vbv_ignore_qmax = 0;
 
 1765     s->picture_in_gop_number++;
 
 1775     if (
s->new_picture->data[0]) {
 
 1776         int growing_buffer = context_count == 1 && !
s->data_partitioning;
 
 1777         size_t pkt_size = 10000 + 
s->mb_width * 
s->mb_height *
 
 1790                                  s->mb_width*
s->mb_height*12);
 
 1791             s->prev_mb_info = 
s->last_mb_info = 
s->mb_info_size = 0;
 
 1794         for (
i = 0; 
i < context_count; 
i++) {
 
 1795             int start_y = 
s->thread_context[
i]->start_mb_y;
 
 1797             int h       = 
s->mb_height;
 
 1804         s->pict_type = 
s->new_picture->pict_type;
 
 1811         if (growing_buffer) {
 
 1821        if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && 
s->out_format == 
FMT_MJPEG)
 
 1831                 s->lambda < 
s->lmax) {
 
 1832                 s->next_lambda = 
FFMAX(
s->lambda + min_step, 
s->lambda *
 
 1833                                        (
s->qscale + 1) / 
s->qscale);
 
 1834                 if (
s->adaptive_quant) {
 
 1836                     for (
i = 0; 
i < 
s->mb_height * 
s->mb_stride; 
i++)
 
 1837                         s->lambda_table[
i] =
 
 1838                             FFMAX(
s->lambda_table[
i] + min_step,
 
 1839                                   s->lambda_table[
i] * (
s->qscale + 1) /
 
 1845                     if (
s->flipflop_rounding          ||
 
 1848                         s->no_rounding ^= 1;
 
 1851                     s->time_base       = 
s->last_time_base;
 
 1852                     s->last_non_b_time = 
s->time - 
s->pp_time;
 
 1854                 for (
i = 0; 
i < context_count; 
i++) {
 
 1858                 s->vbv_ignore_qmax = 1;
 
 1869         for (
i = 0; 
i < 4; 
i++) {
 
 1879                                              s->misc_bits + 
s->i_tex_bits +
 
 1885         s->stuffing_bits = 8*stuffing_count;
 
 1886         if (stuffing_count) {
 
 1892             switch (
s->codec_id) {
 
 1895                 while (stuffing_count--) {
 
 1902                 stuffing_count -= 4;
 
 1903                 while (stuffing_count--) {
 
 1909                 s->stuffing_bits = 0;
 
 1927             int    minbits = 
s->frame_bits - 8 *
 
 1928                              (
s->vbv_delay_pos - 1);
 
 1929             double bits    = 
s->rc_context.buffer_index + minbits - inbits;
 
 1930             uint8_t *
const vbv_delay_ptr = 
s->pb.buf + 
s->vbv_delay_pos;
 
 1934                        "Internal error, negative bits\n");
 
 1946             vbv_delay_ptr[0] &= 0xF8;
 
 1949             vbv_delay_ptr[2] &= 0x07;
 
 1958                                           (uint8_t*)props, props_size);
 
 1964         s->total_bits     += 
s->frame_bits;
 
 1966         pkt->
pts = 
s->current_picture.f->pts;
 
 1969             if (!
s->current_picture.coded_picture_number)
 
 1994         if (!
s->picture[
i].reference)
 
 2006                                                 int n, 
int threshold)
 
 2008     static const char tab[64] = {
 
 2009         3, 2, 2, 1, 1, 1, 1, 1,
 
 2010         1, 1, 1, 1, 1, 1, 1, 1,
 
 2011         1, 1, 1, 1, 1, 1, 1, 1,
 
 2012         0, 0, 0, 0, 0, 0, 0, 0,
 
 2013         0, 0, 0, 0, 0, 0, 0, 0,
 
 2014         0, 0, 0, 0, 0, 0, 0, 0,
 
 2015         0, 0, 0, 0, 0, 0, 0, 0,
 
 2016         0, 0, 0, 0, 0, 0, 0, 0
 
 2021     int16_t *
block = 
s->block[n];
 
 2022     const int last_index = 
s->block_last_index[n];
 
 2025     if (threshold < 0) {
 
 2027         threshold = -threshold;
 
 2032     if (last_index <= skip_dc - 1)
 
 2035     for (
i = 0; 
i <= last_index; 
i++) {
 
 2036         const int j = 
s->intra_scantable.permutated[
i];
 
 2039             if (skip_dc && 
i == 0)
 
 2043         } 
else if (
level > 1) {
 
 2049     if (score >= threshold)
 
 2051     for (
i = skip_dc; 
i <= last_index; 
i++) {
 
 2052         const int j = 
s->intra_scantable.permutated[
i];
 
 2056         s->block_last_index[n] = 0;
 
 2058         s->block_last_index[n] = -1;
 
 2065     const int maxlevel = 
s->max_qcoeff;
 
 2066     const int minlevel = 
s->min_qcoeff;
 
 2074     for (; 
i <= last_index; 
i++) {
 
 2075         const int j = 
s->intra_scantable.permutated[
i];
 
 2078         if (
level > maxlevel) {
 
 2081         } 
else if (
level < minlevel) {
 
 2091                "warning, clipping %d dct coefficients to %d..%d\n",
 
 2099     for (y = 0; y < 8; y++) {
 
 2100         for (x = 0; x < 8; x++) {
 
 2106             for (y2 = 
FFMAX(y - 1, 0); y2 < 
FFMIN(8, y + 2); y2++) {
 
 2107                 for (x2= 
FFMAX(x - 1, 0); x2 < 
FFMIN(8, x + 2); x2++) {
 
 2108                     int v = ptr[x2 + y2 * 
stride];
 
 2120                                                 int motion_x, 
int motion_y,
 
 2121                                                 int mb_block_height,
 
 2130 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \ 
 2131                            (s)->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) 
 2133     int16_t orig[12][64];
 
 2134     const int mb_x = 
s->mb_x;
 
 2135     const int mb_y = 
s->mb_y;
 
 2139     int uv_dct_offset = 
s->uvlinesize * 8;
 
 2140     const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
 
 2141     ptrdiff_t wrap_y, wrap_c;
 
 2143     for (
i = 0; 
i < mb_block_count; 
i++)
 
 2144         skip_dct[
i] = 
s->skipdct;
 
 2146     if (
s->adaptive_quant) {
 
 2147         const int last_qp = 
s->qscale;
 
 2148         const int mb_xy = 
mb_x + 
mb_y * 
s->mb_stride;
 
 2150         s->lambda = 
s->lambda_table[mb_xy];
 
 2154             s->qscale = 
s->current_picture_ptr->qscale_table[mb_xy];
 
 2155             s->dquant = 
s->qscale - last_qp;
 
 2176     wrap_y = 
s->linesize;
 
 2177     wrap_c = 
s->uvlinesize;
 
 2178     ptr_y  = 
s->new_picture->data[0] +
 
 2180     ptr_cb = 
s->new_picture->data[1] +
 
 2181              (
mb_y * mb_block_height * wrap_c) + 
mb_x * mb_block_width;
 
 2182     ptr_cr = 
s->new_picture->data[2] +
 
 2183              (
mb_y * mb_block_height * wrap_c) + 
mb_x * mb_block_width;
 
 2186         uint8_t *ebuf = 
s->sc.edge_emu_buffer + 38 * wrap_y;
 
 2189         s->vdsp.emulated_edge_mc(ebuf, ptr_y,
 
 2192                                  s->width, 
s->height);
 
 2194         s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
 
 2196                                  mb_block_width, mb_block_height,
 
 2197                                  mb_x * mb_block_width, 
mb_y * mb_block_height,
 
 2199         ptr_cb = ebuf + 16 * wrap_y;
 
 2200         s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
 
 2202                                  mb_block_width, mb_block_height,
 
 2203                                  mb_x * mb_block_width, 
mb_y * mb_block_height,
 
 2205         ptr_cr = ebuf + 16 * wrap_y + 16;
 
 2210             int progressive_score, interlaced_score;
 
 2212             s->interlaced_dct = 0;
 
 2213             progressive_score = 
s->mecc.ildct_cmp[4](
s, ptr_y, 
NULL, wrap_y, 8) +
 
 2214                                 s->mecc.ildct_cmp[4](
s, ptr_y + wrap_y * 8,
 
 2215                                                      NULL, wrap_y, 8) - 400;
 
 2217             if (progressive_score > 0) {
 
 2218                 interlaced_score = 
s->mecc.ildct_cmp[4](
s, ptr_y,
 
 2219                                                         NULL, wrap_y * 2, 8) +
 
 2220                                    s->mecc.ildct_cmp[4](
s, ptr_y + wrap_y,
 
 2221                                                         NULL, wrap_y * 2, 8);
 
 2222                 if (progressive_score > interlaced_score) {
 
 2223                     s->interlaced_dct = 1;
 
 2226                     uv_dct_offset = wrap_c;
 
 2235         s->pdsp.get_pixels(
s->block[0], ptr_y,                  wrap_y);
 
 2236         s->pdsp.get_pixels(
s->block[1], ptr_y + 8,              wrap_y);
 
 2237         s->pdsp.get_pixels(
s->block[2], ptr_y + 
dct_offset,     wrap_y);
 
 2238         s->pdsp.get_pixels(
s->block[3], ptr_y + 
dct_offset + 8, wrap_y);
 
 2244             s->pdsp.get_pixels(
s->block[4], ptr_cb, wrap_c);
 
 2245             s->pdsp.get_pixels(
s->block[5], ptr_cr, wrap_c);
 
 2247                 s->pdsp.get_pixels(
s->block[6], ptr_cb + uv_dct_offset, wrap_c);
 
 2248                 s->pdsp.get_pixels(
s->block[7], ptr_cr + uv_dct_offset, wrap_c);
 
 2250                 s->pdsp.get_pixels(
s->block[ 6], ptr_cb + 8, wrap_c);
 
 2251                 s->pdsp.get_pixels(
s->block[ 7], ptr_cr + 8, wrap_c);
 
 2252                 s->pdsp.get_pixels(
s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
 
 2253                 s->pdsp.get_pixels(
s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
 
 2254                 s->pdsp.get_pixels(
s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
 
 2255                 s->pdsp.get_pixels(
s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
 
 2261         uint8_t *dest_y, *dest_cb, *dest_cr;
 
 2263         dest_y  = 
s->dest[0];
 
 2264         dest_cb = 
s->dest[1];
 
 2265         dest_cr = 
s->dest[2];
 
 2268             op_pix  = 
s->hdsp.put_pixels_tab;
 
 2269             op_qpix = 
s->qdsp.put_qpel_pixels_tab;
 
 2271             op_pix  = 
s->hdsp.put_no_rnd_pixels_tab;
 
 2272             op_qpix = 
s->qdsp.put_no_rnd_qpel_pixels_tab;
 
 2277                           s->last_picture.f->data,
 
 2279             op_pix  = 
s->hdsp.avg_pixels_tab;
 
 2280             op_qpix = 
s->qdsp.avg_qpel_pixels_tab;
 
 2284                           s->next_picture.f->data,
 
 2289             int progressive_score, interlaced_score;
 
 2291             s->interlaced_dct = 0;
 
 2292             progressive_score = 
s->mecc.ildct_cmp[0](
s, dest_y, ptr_y, wrap_y, 8) +
 
 2293                                 s->mecc.ildct_cmp[0](
s, dest_y + wrap_y * 8,
 
 2298                 progressive_score -= 400;
 
 2300             if (progressive_score > 0) {
 
 2301                 interlaced_score = 
s->mecc.ildct_cmp[0](
s, dest_y, ptr_y,
 
 2303                                    s->mecc.ildct_cmp[0](
s, dest_y + wrap_y,
 
 2307                 if (progressive_score > interlaced_score) {
 
 2308                     s->interlaced_dct = 1;
 
 2311                     uv_dct_offset = wrap_c;
 
 2319         s->pdsp.diff_pixels(
s->block[0], ptr_y, dest_y, wrap_y);
 
 2320         s->pdsp.diff_pixels(
s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
 
 2323         s->pdsp.diff_pixels(
s->block[3], ptr_y + 
dct_offset + 8,
 
 2330             s->pdsp.diff_pixels(
s->block[4], ptr_cb, dest_cb, wrap_c);
 
 2331             s->pdsp.diff_pixels(
s->block[5], ptr_cr, dest_cr, wrap_c);
 
 2333                 s->pdsp.diff_pixels(
s->block[6], ptr_cb + uv_dct_offset,
 
 2334                                     dest_cb + uv_dct_offset, wrap_c);
 
 2335                 s->pdsp.diff_pixels(
s->block[7], ptr_cr + uv_dct_offset,
 
 2336                                     dest_cr + uv_dct_offset, wrap_c);
 
 2340         if (
s->mc_mb_var[
s->mb_stride * 
mb_y + 
mb_x] < 2 * 
s->qscale * 
s->qscale) {
 
 2342             if (
s->mecc.sad[1](
NULL, ptr_y, dest_y, wrap_y, 8) < 20 * 
s->qscale)
 
 2344             if (
s->mecc.sad[1](
NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * 
s->qscale)
 
 2347                                wrap_y, 8) < 20 * 
s->qscale)
 
 2350                                wrap_y, 8) < 20 * 
s->qscale)
 
 2352             if (
s->mecc.sad[1](
NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * 
s->qscale)
 
 2354             if (
s->mecc.sad[1](
NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * 
s->qscale)
 
 2357                 if (
s->mecc.sad[1](
NULL, ptr_cb + uv_dct_offset,
 
 2358                                    dest_cb + uv_dct_offset,
 
 2359                                    wrap_c, 8) < 20 * 
s->qscale)
 
 2361                 if (
s->mecc.sad[1](
NULL, ptr_cr + uv_dct_offset,
 
 2362                                    dest_cr + uv_dct_offset,
 
 2363                                    wrap_c, 8) < 20 * 
s->qscale)
 
 2369     if (
s->quantizer_noise_shaping) {
 
 2390         memcpy(orig[0], 
s->block[0], 
sizeof(int16_t) * 64 * mb_block_count);
 
 2396         for (
i = 0; 
i < mb_block_count; 
i++) {
 
 2399                 s->block_last_index[
i] = 
s->dct_quantize(
s, 
s->block[
i], 
i, 
s->qscale, &
overflow);
 
 2408                 s->block_last_index[
i] = -1;
 
 2410         if (
s->quantizer_noise_shaping) {
 
 2411             for (
i = 0; 
i < mb_block_count; 
i++) {
 
 2413                     s->block_last_index[
i] =
 
 2415                                             orig[
i], 
i, 
s->qscale);
 
 2420         if (
s->luma_elim_threshold && !
s->mb_intra)
 
 2421             for (
i = 0; 
i < 4; 
i++)
 
 2423         if (
s->chroma_elim_threshold && !
s->mb_intra)
 
 2424             for (
i = 4; 
i < mb_block_count; 
i++)
 
 2428             for (
i = 0; 
i < mb_block_count; 
i++) {
 
 2429                 if (
s->block_last_index[
i] == -1)
 
 2430                     s->coded_score[
i] = INT_MAX / 256;
 
 2436         s->block_last_index[4] =
 
 2437         s->block_last_index[5] = 0;
 
 2439         s->block[5][0] = (1024 + 
s->c_dc_scale / 2) / 
s->c_dc_scale;
 
 2441             for (
i=6; 
i<12; 
i++) {
 
 2442                 s->block_last_index[
i] = 0;
 
 2443                 s->block[
i][0] = 
s->block[4][0];
 
 2450         for (
i = 0; 
i < mb_block_count; 
i++) {
 
 2452             if (
s->block_last_index[
i] > 0) {
 
 2453                 for (j = 63; j > 0; j--) {
 
 2454                     if (
s->block[
i][
s->intra_scantable.permutated[j]])
 
 2457                 s->block_last_index[
i] = j;
 
 2463     switch(
s->codec_id){ 
 
 2466         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
 
 2470         if (CONFIG_MPEG4_ENCODER)
 
 2476         if (CONFIG_MSMPEG4ENC)
 
 2480         if (CONFIG_WMV2_ENCODER)
 
 2484         if (CONFIG_H261_ENCODER)
 
 2492         if (CONFIG_H263_ENCODER)
 
 2495 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER 
 2502         if (CONFIG_SPEEDHQ_ENCODER)
 
 2525     memcpy(
d->last_mv, 
s->last_mv, 2*2*2*
sizeof(
int)); 
 
 2528     d->mb_skip_run= 
s->mb_skip_run;
 
 2530         d->last_dc[
i] = 
s->last_dc[
i];
 
 2533     d->mv_bits= 
s->mv_bits;
 
 2534     d->i_tex_bits= 
s->i_tex_bits;
 
 2535     d->p_tex_bits= 
s->p_tex_bits;
 
 2536     d->i_count= 
s->i_count;
 
 2537     d->skip_count= 
s->skip_count;
 
 2538     d->misc_bits= 
s->misc_bits;
 
 2542     d->qscale= 
s->qscale;
 
 2543     d->dquant= 
s->dquant;
 
 2545     d->esc3_level_length= 
s->esc3_level_length;
 
 2553     memcpy(
d->mv, 
s->mv, 2*4*2*
sizeof(
int));
 
 2554     memcpy(
d->last_mv, 
s->last_mv, 2*2*2*
sizeof(
int)); 
 
 2557     d->mb_skip_run= 
s->mb_skip_run;
 
 2559         d->last_dc[
i] = 
s->last_dc[
i];
 
 2562     d->mv_bits= 
s->mv_bits;
 
 2563     d->i_tex_bits= 
s->i_tex_bits;
 
 2564     d->p_tex_bits= 
s->p_tex_bits;
 
 2565     d->i_count= 
s->i_count;
 
 2566     d->skip_count= 
s->skip_count;
 
 2567     d->misc_bits= 
s->misc_bits;
 
 2569     d->mb_intra= 
s->mb_intra;
 
 2570     d->mb_skipped= 
s->mb_skipped;
 
 2571     d->mv_type= 
s->mv_type;
 
 2572     d->mv_dir= 
s->mv_dir;
 
 2574     if(
s->data_partitioning){
 
 2576         d->tex_pb= 
s->tex_pb;
 
 2580         d->block_last_index[
i]= 
s->block_last_index[
i];
 
 2581     d->interlaced_dct= 
s->interlaced_dct;
 
 2582     d->qscale= 
s->qscale;
 
 2584     d->esc3_level_length= 
s->esc3_level_length;
 
 2589                            int *dmin, 
int *next_block, 
int motion_x, 
int motion_y)
 
 2592     uint8_t *dest_backup[3];
 
 2596     s->block= 
s->blocks[*next_block];
 
 2597     s->pb= 
pb[*next_block];
 
 2598     if(
s->data_partitioning){
 
 2599         s->pb2   = 
pb2   [*next_block];
 
 2600         s->tex_pb= 
tex_pb[*next_block];
 
 2604         memcpy(dest_backup, 
s->dest, 
sizeof(
s->dest));
 
 2605         s->dest[0] = 
s->sc.rd_scratchpad;
 
 2606         s->dest[1] = 
s->sc.rd_scratchpad + 16*
s->linesize;
 
 2607         s->dest[2] = 
s->sc.rd_scratchpad + 16*
s->linesize + 8;
 
 2614     if(
s->data_partitioning){
 
 2622         score *= 
s->lambda2;
 
 2627         memcpy(
s->dest, dest_backup, 
sizeof(
s->dest));
 
 2645     else if(
w==8 && 
h==8)
 
 2662     int chroma_mb_w = 
w >> 
s->chroma_x_shift;
 
 2663     int chroma_mb_h = 
h >> 
s->chroma_y_shift;
 
 2665     if(
s->mb_x*16 + 16 > 
s->width ) 
w= 
s->width - 
s->mb_x*16;
 
 2666     if(
s->mb_y*16 + 16 > 
s->height) 
h= 
s->height- 
s->mb_y*16;
 
 2670         return s->mecc.nsse[0](
s, 
s->new_picture->data[0] + 
s->mb_x * 16 + 
s->mb_y * 
s->linesize * 16,
 
 2671                                s->dest[0], 
s->linesize, 16) +
 
 2672                s->mecc.nsse[1](
s, 
s->new_picture->data[1] + 
s->mb_x * chroma_mb_w + 
s->mb_y * 
s->uvlinesize * chroma_mb_h,
 
 2673                                s->dest[1], 
s->uvlinesize, chroma_mb_h) +
 
 2674                s->mecc.nsse[1](
s, 
s->new_picture->data[2] + 
s->mb_x * chroma_mb_w + 
s->mb_y * 
s->uvlinesize * chroma_mb_h,
 
 2675                                s->dest[2], 
s->uvlinesize, chroma_mb_h);
 
 2677         return s->mecc.sse[0](
NULL, 
s->new_picture->data[0] + 
s->mb_x * 16 + 
s->mb_y * 
s->linesize * 16,
 
 2678                               s->dest[0], 
s->linesize, 16) +
 
 2679                s->mecc.sse[1](
NULL, 
s->new_picture->data[1] + 
s->mb_x * chroma_mb_w + 
s->mb_y * 
s->uvlinesize * chroma_mb_h,
 
 2680                               s->dest[1], 
s->uvlinesize, chroma_mb_h) +
 
 2681                s->mecc.sse[1](
NULL, 
s->new_picture->data[2] + 
s->mb_x * chroma_mb_w + 
s->mb_y * 
s->uvlinesize * chroma_mb_h,
 
 2682                               s->dest[2], 
s->uvlinesize, chroma_mb_h);
 
 2685         return  sse(
s, 
s->new_picture->data[0] + 
s->mb_x * 16 + 
s->mb_y * 
s->linesize * 16,
 
 2686                     s->dest[0], 
w, 
h, 
s->linesize) +
 
 2687                 sse(
s, 
s->new_picture->data[1] + 
s->mb_x * chroma_mb_w + 
s->mb_y * 
s->uvlinesize * chroma_mb_h,
 
 2688                     s->dest[1], 
w >> 
s->chroma_x_shift, 
h >> 
s->chroma_y_shift, 
s->uvlinesize) +
 
 2689                 sse(
s, 
s->new_picture->data[2] + 
s->mb_x * chroma_mb_w + 
s->mb_y * 
s->uvlinesize * chroma_mb_h,
 
 2690                     s->dest[2], 
w >> 
s->chroma_x_shift, 
h >> 
s->chroma_y_shift, 
s->uvlinesize);
 
 2698     s->me.dia_size= 
s->avctx->pre_dia_size;
 
 2699     s->first_slice_line=1;
 
 2700     for(
s->mb_y= 
s->end_mb_y-1; 
s->mb_y >= 
s->start_mb_y; 
s->mb_y--) {
 
 2701         for(
s->mb_x=
s->mb_width-1; 
s->mb_x >=0 ;
s->mb_x--) {
 
 2704         s->first_slice_line=0;
 
 2715     s->me.dia_size= 
s->avctx->dia_size;
 
 2716     s->first_slice_line=1;
 
 2717     for(
s->mb_y= 
s->start_mb_y; 
s->mb_y < 
s->end_mb_y; 
s->mb_y++) {
 
 2720         for(
s->mb_x=0; 
s->mb_x < 
s->mb_width; 
s->mb_x++) {
 
 2721             s->block_index[0]+=2;
 
 2722             s->block_index[1]+=2;
 
 2723             s->block_index[2]+=2;
 
 2724             s->block_index[3]+=2;
 
 2732         s->first_slice_line=0;
 
 2745             const uint8_t *pix = 
s->new_picture->data[0] + (yy * 
s->linesize) + xx;
 
 2747             int sum = 
s->mpvencdsp.pix_sum(pix, 
s->linesize);
 
 2749             varc = (
s->mpvencdsp.pix_norm1(pix, 
s->linesize) -
 
 2750                     (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
 
 2752             s->mb_var [
s->mb_stride * 
mb_y + 
mb_x] = varc;
 
 2753             s->mb_mean[
s->mb_stride * 
mb_y + 
mb_x] = (sum+128)>>8;
 
 2754             s->me.mb_var_sum_temp    += varc;
 
 2762         if(
s->partitioned_frame){
 
 2767     } 
else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
 
 2770     } 
else if (CONFIG_SPEEDHQ_ENCODER && 
s->out_format == 
FMT_SPEEDHQ) {
 
 2782     uint8_t *ptr = 
s->mb_info_ptr + 
s->mb_info_size - 12;
 
 2784     int mba  = 
s->mb_x + 
s->mb_width * (
s->mb_y % 
s->gob_index);
 
 2785     int gobn = 
s->mb_y / 
s->gob_index;
 
 2787     if (CONFIG_H263_ENCODER)
 
 2789     bytestream_put_le32(&ptr, 
offset);
 
 2790     bytestream_put_byte(&ptr, 
s->qscale);
 
 2791     bytestream_put_byte(&ptr, gobn);
 
 2792     bytestream_put_le16(&ptr, mba);
 
 2793     bytestream_put_byte(&ptr, pred_x); 
 
 2794     bytestream_put_byte(&ptr, pred_y); 
 
 2796     bytestream_put_byte(&ptr, 0); 
 
 2797     bytestream_put_byte(&ptr, 0); 
 
 2805         s->mb_info_size += 12;
 
 2806         s->prev_mb_info = 
s->last_mb_info;
 
 2818     if (!
s->mb_info_size)
 
 2819         s->mb_info_size += 12;
 
 2826         && 
s->slice_context_count == 1
 
 2827         && 
s->pb.buf == 
s->avctx->internal->byte_buffer) {
 
 2828         int lastgob_pos = 
s->ptr_lastgob - 
s->pb.buf;
 
 2830         uint8_t *new_buffer = 
NULL;
 
 2831         int new_buffer_size = 0;
 
 2833         if ((
s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
 
 2841                               s->avctx->internal->byte_buffer_size + size_increase);
 
 2845         memcpy(new_buffer, 
s->avctx->internal->byte_buffer, 
s->avctx->internal->byte_buffer_size);
 
 2846         av_free(
s->avctx->internal->byte_buffer);
 
 2847         s->avctx->internal->byte_buffer      = new_buffer;
 
 2848         s->avctx->internal->byte_buffer_size = new_buffer_size;
 
 2850         s->ptr_lastgob   = 
s->pb.buf + lastgob_pos;
 
 2860     int chr_h= 16>>
s->chroma_y_shift;
 
 2885         s->last_dc[
i] = 128 << 
s->intra_dc_precision;
 
 2887         s->encoding_error[
i] = 0;
 
 2890         s->last_dc[0] = 128*8/13;
 
 2891         s->last_dc[1] = 128*8/14;
 
 2892         s->last_dc[2] = 128*8/14;
 
 2895     memset(
s->last_mv, 0, 
sizeof(
s->last_mv));
 
 2899     switch(
s->codec_id){
 
 2903         if (CONFIG_H263_ENCODER)
 
 2907         if(CONFIG_MPEG4_ENCODER && 
s->partitioned_frame)
 
 2914     s->first_slice_line = 1;
 
 2915     s->ptr_lastgob = 
s->pb.buf;
 
 2916     for (mb_y_order = 
s->start_mb_y; mb_y_order < s->
end_mb_y; mb_y_order++) {
 
 2920             if (first_in_slice && mb_y_order != 
s->start_mb_y)
 
 2922             s->last_dc[0] = 
s->last_dc[1] = 
s->last_dc[2] = 1024 << 
s->intra_dc_precision;
 
 2938             int size_increase =  
s->avctx->internal->byte_buffer_size/4
 
 2946             if(
s->data_partitioning){
 
 2960                 xy= 
s->mb_y*
s->mb_stride + 
s->mb_x;
 
 2966                 int current_packet_size, is_gob_start;
 
 2969                                       - (
s->ptr_lastgob - 
s->pb.buf);
 
 2971                 is_gob_start = 
s->rtp_payload_size &&
 
 2972                                current_packet_size >= 
s->rtp_payload_size &&
 
 2975                 if(
s->start_mb_y == 
mb_y && 
mb_y > 0 && 
mb_x==0) is_gob_start=1;
 
 2977                 switch(
s->codec_id){
 
 2980                     if(!
s->h263_slice_structured)
 
 2981                         if(
s->mb_x || 
s->mb_y%
s->gob_index) is_gob_start=0;
 
 2984                     if(
s->mb_x==0 && 
s->mb_y!=0) is_gob_start=1;
 
 2986                     if(
s->mb_skip_run) is_gob_start=0;
 
 2989                     if(
s->mb_x==0 && 
s->mb_y!=0) is_gob_start=1;
 
 3005                     if (
s->error_rate && 
s->resync_mb_x + 
s->resync_mb_y > 0) {
 
 3007                         int d = 100 / 
s->error_rate;
 
 3009                             current_packet_size=0;
 
 3010                             s->pb.buf_ptr= 
s->ptr_lastgob;
 
 3015                     switch(
s->codec_id){
 
 3017                         if (CONFIG_MPEG4_ENCODER) {
 
 3024                         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
 
 3031                         if (CONFIG_H263_ENCODER) {
 
 3040                         s->misc_bits+= 
bits - 
s->last_bits;
 
 3044                     s->ptr_lastgob += current_packet_size;
 
 3045                     s->first_slice_line=1;
 
 3046                     s->resync_mb_x=
mb_x;
 
 3047                     s->resync_mb_y=
mb_y;
 
 3051             if(  (
s->resync_mb_x   == 
s->mb_x)
 
 3052                && 
s->resync_mb_y+1 == 
s->mb_y){
 
 3053                 s->first_slice_line=0;
 
 3063                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
 
 3069                 if(
s->data_partitioning){
 
 3070                     backup_s.pb2= 
s->pb2;
 
 3071                     backup_s.tex_pb= 
s->tex_pb;
 
 3078                     s->mv[0][0][0] = 
s->p_mv_table[xy][0];
 
 3079                     s->mv[0][0][1] = 
s->p_mv_table[xy][1];
 
 3081                                  &dmin, &next_block, 
s->mv[0][0][0], 
s->mv[0][0][1]);
 
 3088                         j= 
s->field_select[0][
i] = 
s->p_field_select_table[
i][xy];
 
 3089                         s->mv[0][
i][0] = 
s->p_field_mv_table[
i][j][xy][0];
 
 3090                         s->mv[0][
i][1] = 
s->p_field_mv_table[
i][j][xy][1];
 
 3093                                  &dmin, &next_block, 0, 0);
 
 3102                                  &dmin, &next_block, 
s->mv[0][0][0], 
s->mv[0][0][1]);
 
 3109                         s->mv[0][
i][0] = 
s->current_picture.motion_val[0][
s->block_index[
i]][0];
 
 3110                         s->mv[0][
i][1] = 
s->current_picture.motion_val[0][
s->block_index[
i]][1];
 
 3113                                  &dmin, &next_block, 0, 0);
 
 3119                     s->mv[0][0][0] = 
s->b_forw_mv_table[xy][0];
 
 3120                     s->mv[0][0][1] = 
s->b_forw_mv_table[xy][1];
 
 3122                                  &dmin, &next_block, 
s->mv[0][0][0], 
s->mv[0][0][1]);
 
 3128                     s->mv[1][0][0] = 
s->b_back_mv_table[xy][0];
 
 3129                     s->mv[1][0][1] = 
s->b_back_mv_table[xy][1];
 
 3131                                  &dmin, &next_block, 
s->mv[1][0][0], 
s->mv[1][0][1]);
 
 3137                     s->mv[0][0][0] = 
s->b_bidir_forw_mv_table[xy][0];
 
 3138                     s->mv[0][0][1] = 
s->b_bidir_forw_mv_table[xy][1];
 
 3139                     s->mv[1][0][0] = 
s->b_bidir_back_mv_table[xy][0];
 
 3140                     s->mv[1][0][1] = 
s->b_bidir_back_mv_table[xy][1];
 
 3142                                  &dmin, &next_block, 0, 0);
 
 3149                         j= 
s->field_select[0][
i] = 
s->b_field_select_table[0][
i][xy];
 
 3150                         s->mv[0][
i][0] = 
s->b_field_mv_table[0][
i][j][xy][0];
 
 3151                         s->mv[0][
i][1] = 
s->b_field_mv_table[0][
i][j][xy][1];
 
 3154                                  &dmin, &next_block, 0, 0);
 
 3161                         j= 
s->field_select[1][
i] = 
s->b_field_select_table[1][
i][xy];
 
 3162                         s->mv[1][
i][0] = 
s->b_field_mv_table[1][
i][j][xy][0];
 
 3163                         s->mv[1][
i][1] = 
s->b_field_mv_table[1][
i][j][xy][1];
 
 3166                                  &dmin, &next_block, 0, 0);
 
 3172                     for(dir=0; dir<2; dir++){
 
 3174                             j= 
s->field_select[dir][
i] = 
s->b_field_select_table[dir][
i][xy];
 
 3175                             s->mv[dir][
i][0] = 
s->b_field_mv_table[dir][
i][j][xy][0];
 
 3176                             s->mv[dir][
i][1] = 
s->b_field_mv_table[dir][
i][j][xy][1];
 
 3180                                  &dmin, &next_block, 0, 0);
 
 3189                                  &dmin, &next_block, 0, 0);
 
 3190                     if(
s->h263_pred || 
s->h263_aic){
 
 3192                             s->mbintra_table[
mb_x + 
mb_y*
s->mb_stride]=1;
 
 3200                         const int last_qp= backup_s.qscale;
 
 3204                         static const int dquant_tab[4]={-1,1,-2,2};
 
 3205                         int storecoefs = 
s->mb_intra && 
s->dc_val[0];
 
 3213                         s->mv[0][0][0] = best_s.
mv[0][0][0];
 
 3214                         s->mv[0][0][1] = best_s.
mv[0][0][1];
 
 3215                         s->mv[1][0][0] = best_s.
mv[1][0][0];
 
 3216                         s->mv[1][0][1] = best_s.
mv[1][0][1];
 
 3219                         for(; qpi<4; qpi++){
 
 3220                             int dquant= dquant_tab[qpi];
 
 3222                             if(qp < s->
avctx->
qmin || qp > 
s->avctx->qmax)
 
 3227                                     dc[
i]= 
s->dc_val[0][ 
s->block_index[
i] ];
 
 3228                                     memcpy(ac[
i], 
s->ac_val[0][
s->block_index[
i]], 
sizeof(int16_t)*16);
 
 3233                                          &dmin, &next_block, 
s->mv[mvdir][0][0], 
s->mv[mvdir][0][1]);
 
 3237                                         s->dc_val[0][ 
s->block_index[
i] ]= 
dc[
i];
 
 3238                                         memcpy(
s->ac_val[0][
s->block_index[
i]], ac[
i], 
sizeof(int16_t)*16);
 
 3246                     int mx= 
s->b_direct_mv_table[xy][0];
 
 3247                     int my= 
s->b_direct_mv_table[xy][1];
 
 3249                     backup_s.dquant = 0;
 
 3254                                  &dmin, &next_block, mx, my);
 
 3257                     backup_s.dquant = 0;
 
 3262                                  &dmin, &next_block, 0, 0);
 
 3267                         coded |= 
s->block_last_index[
i];
 
 3270                         memcpy(
s->mv, best_s.
mv, 
sizeof(
s->mv));
 
 3292                                         &dmin, &next_block, mx, my);
 
 3297                 s->current_picture.qscale_table[xy] = best_s.
qscale;
 
 3303                 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
 
 3306                 if(
s->data_partitioning){
 
 3309                     ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
 
 3310                     s->pb2= backup_s.pb2;
 
 3314                     ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
 
 3315                     s->tex_pb= backup_s.tex_pb;
 
 3319                 if (CONFIG_H263_ENCODER &&
 
 3324                     s->hdsp.put_pixels_tab[0][0](
s->dest[0], 
s->sc.rd_scratchpad                     , 
s->linesize  ,16);
 
 3325                     s->hdsp.put_pixels_tab[1][0](
s->dest[1], 
s->sc.rd_scratchpad + 16*
s->linesize    , 
s->uvlinesize, 8);
 
 3326                     s->hdsp.put_pixels_tab[1][0](
s->dest[2], 
s->sc.rd_scratchpad + 16*
s->linesize + 8, 
s->uvlinesize, 8);
 
 3332                 int motion_x = 0, motion_y = 0;
 
 3340                     motion_x= 
s->mv[0][0][0] = 0;
 
 3341                     motion_y= 
s->mv[0][0][1] = 0;
 
 3346                     motion_x= 
s->mv[0][0][0] = 
s->p_mv_table[xy][0];
 
 3347                     motion_y= 
s->mv[0][0][1] = 
s->p_mv_table[xy][1];
 
 3354                         j= 
s->field_select[0][
i] = 
s->p_field_select_table[
i][xy];
 
 3355                         s->mv[0][
i][0] = 
s->p_field_mv_table[
i][j][xy][0];
 
 3356                         s->mv[0][
i][1] = 
s->p_field_mv_table[
i][j][xy][1];
 
 3364                         s->mv[0][
i][0] = 
s->current_picture.motion_val[0][
s->block_index[
i]][0];
 
 3365                         s->mv[0][
i][1] = 
s->current_picture.motion_val[0][
s->block_index[
i]][1];
 
 3369                     if (CONFIG_MPEG4_ENCODER) {
 
 3372                         motion_x=
s->b_direct_mv_table[xy][0];
 
 3373                         motion_y=
s->b_direct_mv_table[xy][1];
 
 3378                     if (CONFIG_MPEG4_ENCODER) {
 
 3387                     s->mv[0][0][0] = 
s->b_bidir_forw_mv_table[xy][0];
 
 3388                     s->mv[0][0][1] = 
s->b_bidir_forw_mv_table[xy][1];
 
 3389                     s->mv[1][0][0] = 
s->b_bidir_back_mv_table[xy][0];
 
 3390                     s->mv[1][0][1] = 
s->b_bidir_back_mv_table[xy][1];
 
 3395                     motion_x= 
s->mv[1][0][0] = 
s->b_back_mv_table[xy][0];
 
 3396                     motion_y= 
s->mv[1][0][1] = 
s->b_back_mv_table[xy][1];
 
 3401                     motion_x= 
s->mv[0][0][0] = 
s->b_forw_mv_table[xy][0];
 
 3402                     motion_y= 
s->mv[0][0][1] = 
s->b_forw_mv_table[xy][1];
 
 3409                         j= 
s->field_select[0][
i] = 
s->b_field_select_table[0][
i][xy];
 
 3410                         s->mv[0][
i][0] = 
s->b_field_mv_table[0][
i][j][xy][0];
 
 3411                         s->mv[0][
i][1] = 
s->b_field_mv_table[0][
i][j][xy][1];
 
 3419                         j= 
s->field_select[1][
i] = 
s->b_field_select_table[1][
i][xy];
 
 3420                         s->mv[1][
i][0] = 
s->b_field_mv_table[1][
i][j][xy][0];
 
 3421                         s->mv[1][
i][1] = 
s->b_field_mv_table[1][
i][j][xy][1];
 
 3428                     for(dir=0; dir<2; dir++){
 
 3430                             j= 
s->field_select[dir][
i] = 
s->b_field_select_table[dir][
i][xy];
 
 3431                             s->mv[dir][
i][0] = 
s->b_field_mv_table[dir][
i][j][xy][0];
 
 3432                             s->mv[dir][
i][1] = 
s->b_field_mv_table[dir][
i][j][xy][1];
 
 3443                 s->last_mv_dir = 
s->mv_dir;
 
 3445                 if (CONFIG_H263_ENCODER &&
 
 3454                 s->p_mv_table[xy][0]=0;
 
 3455                 s->p_mv_table[xy][1]=0;
 
 3462                 if(
s->mb_x*16 + 16 > 
s->width ) 
w= 
s->width - 
s->mb_x*16;
 
 3463                 if(
s->mb_y*16 + 16 > 
s->height) 
h= 
s->height- 
s->mb_y*16;
 
 3465                 s->encoding_error[0] += 
sse(
 
 3466                     s, 
s->new_picture->data[0] + 
s->mb_x*16 + 
s->mb_y*
s->linesize*16,
 
 3467                     s->dest[0], 
w, 
h, 
s->linesize);
 
 3468                 s->encoding_error[1] += 
sse(
 
 3469                     s, 
s->new_picture->data[1] + 
s->mb_x*8  + 
s->mb_y*
s->uvlinesize*chr_h,
 
 3470                     s->dest[1], 
w>>1, 
h>>
s->chroma_y_shift, 
s->uvlinesize);
 
 3471                 s->encoding_error[2] += 
sse(
 
 3472                     s, 
s->new_picture->data[2] + 
s->mb_x*8  + 
s->mb_y*
s->uvlinesize*chr_h,
 
 3473                     s->dest[2], 
w>>1, 
h>>
s->chroma_y_shift, 
s->uvlinesize);
 
 3476                 if(CONFIG_H263_ENCODER && 
s->out_format == 
FMT_H263)
 
 3479             ff_dlog(
s->avctx, 
"MB %d %d bits\n",
 
 3485     if (CONFIG_MSMPEG4ENC && 
s->msmpeg4_version && 
s->msmpeg4_version<4 && 
s->pict_type == 
AV_PICTURE_TYPE_I)
 
 3493 #define MERGE(field) dst->field += src->field; src->field=0 
 3516         for(
i=0; 
i<64; 
i++){
 
 3529     if (
s->next_lambda){
 
 3530         s->current_picture_ptr->f->quality =
 
 3531         s->current_picture.f->quality = 
s->next_lambda;
 
 3532         if(!dry_run) 
s->next_lambda= 0;
 
 3533     } 
else if (!
s->fixed_qscale) {
 
 3535         s->current_picture_ptr->f->quality =
 
 3536         s->current_picture.f->quality = 
quality;
 
 3537         if (
s->current_picture.f->quality < 0)
 
 3541     if(
s->adaptive_quant){
 
 3542         switch(
s->codec_id){
 
 3544             if (CONFIG_MPEG4_ENCODER)
 
 3550             if (CONFIG_H263_ENCODER)
 
 3557         s->lambda= 
s->lambda_table[0];
 
 3560         s->lambda = 
s->current_picture.f->quality;
 
 3568     s->time = 
s->current_picture_ptr->f->pts * 
s->avctx->time_base.num;
 
 3571         s->pb_time= 
s->pp_time - (
s->last_non_b_time - 
s->time);
 
 3574         s->pp_time= 
s->time - 
s->last_non_b_time;
 
 3575         s->last_non_b_time= 
s->time;
 
 3584     int context_count = 
s->slice_context_count;
 
 3587     s->me.mb_var_sum_temp    =
 
 3588     s->me.mc_mb_var_sum_temp = 0;
 
 3592     if (
s->out_format == 
FMT_MPEG1 || (
s->h263_pred && !
s->msmpeg4_version))
 
 3597     s->me.scene_change_score=0;
 
 3602         if(
s->msmpeg4_version >= 3) 
s->no_rounding=1;
 
 3603         else                        s->no_rounding=0;
 
 3606             s->no_rounding ^= 1;
 
 3615             s->lambda= 
s->last_lambda_for[
s->pict_type];
 
 3617             s->lambda= 
s->last_lambda_for[
s->last_non_b_pict_type];
 
 3622         if(
s->q_chroma_intra_matrix   != 
s->q_intra_matrix  ) 
av_freep(&
s->q_chroma_intra_matrix);
 
 3623         if(
s->q_chroma_intra_matrix16 != 
s->q_intra_matrix16) 
av_freep(&
s->q_chroma_intra_matrix16);
 
 3624         s->q_chroma_intra_matrix   = 
s->q_intra_matrix;
 
 3625         s->q_chroma_intra_matrix16 = 
s->q_intra_matrix16;
 
 3629     for(
i=1; 
i<context_count; 
i++){
 
 3640         s->lambda  = (
s->lambda  * 
s->me_penalty_compensation + 128) >> 8;
 
 3641         s->lambda2 = (
s->lambda2 * (
int64_t) 
s->me_penalty_compensation + 128) >> 8;
 
 3652         for(
i=0; 
i<
s->mb_stride*
s->mb_height; 
i++)
 
 3655         if(!
s->fixed_qscale){
 
 3657             s->avctx->execute(
s->avctx, 
mb_var_thread, &
s->thread_context[0], 
NULL, context_count, 
sizeof(
void*));
 
 3660     for(
i=1; 
i<context_count; 
i++){
 
 3663     s->mc_mb_var_sum = 
s->me.mc_mb_var_sum_temp;
 
 3664     s->mb_var_sum    = 
s->me.   mb_var_sum_temp;
 
 3667     if (
s->me.scene_change_score > 
s->scenechange_threshold &&
 
 3670         for(
i=0; 
i<
s->mb_stride*
s->mb_height; 
i++)
 
 3672         if(
s->msmpeg4_version >= 3)
 
 3674         ff_dlog(
s, 
"Scene change detected, encoding as I Frame %"PRId64
" %"PRId64
"\n",
 
 3675                 s->mb_var_sum, 
s->mc_mb_var_sum);
 
 3716                 for(dir=0; dir<2; dir++){
 
 3722                                             s->b_field_mv_table[dir][
i][j], dir ? 
s->b_code : 
s->f_code, 
type, 1);
 
 3733     if (
s->qscale < 3 && 
s->max_qcoeff <= 128 &&
 
 3742         if (
s->avctx->intra_matrix) {
 
 3744             luma_matrix = 
s->avctx->intra_matrix;
 
 3746         if (
s->avctx->chroma_intra_matrix)
 
 3747             chroma_matrix = 
s->avctx->chroma_intra_matrix;
 
 3751             int j = 
s->idsp.idct_permutation[
i];
 
 3753             s->chroma_intra_matrix[j] = 
av_clip_uint8((chroma_matrix[
i] * 
s->qscale) >> 3);
 
 3756         s->y_dc_scale_table=
 
 3758         s->chroma_intra_matrix[0] =
 
 3761                        s->intra_matrix, 
s->intra_quant_bias, 8, 8, 1);
 
 3763                        s->chroma_intra_matrix, 
s->intra_quant_bias, 8, 8, 1);
 
 3767             static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
 
 3768             static const uint8_t 
c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
 
 3769             for (
int i = 1; 
i < 64; 
i++) {
 
 3775             s->y_dc_scale_table = y;
 
 3776             s->c_dc_scale_table = 
c;
 
 3777             s->intra_matrix[0] = 13;
 
 3778             s->chroma_intra_matrix[0] = 14;
 
 3780                               s->intra_matrix, 
s->intra_quant_bias, 8, 8, 1);
 
 3782                               s->chroma_intra_matrix, 
s->intra_quant_bias, 8, 8, 1);
 
 3795     s->current_picture_ptr->f->pict_type =
 
 3796     s->current_picture.f->pict_type = 
s->pict_type;
 
 3799         s->picture_in_gop_number=0;
 
 3801     s->mb_x = 
s->mb_y = 0;
 
 3803     switch(
s->out_format) {
 
 3804 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER 
 3810         if (CONFIG_SPEEDHQ_ENCODER)
 
 3814         if (CONFIG_H261_ENCODER)
 
 3820         else if (CONFIG_MSMPEG4ENC && 
s->msmpeg4_version)
 
 3822         else if (CONFIG_MPEG4_ENCODER && 
s->h263_pred) {
 
 3835         else if (CONFIG_H263_ENCODER)
 
 3839         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
 
 3846     s->header_bits= 
bits - 
s->last_bits;
 
 3848     for(
i=1; 
i<context_count; 
i++){
 
 3851     s->avctx->execute(
s->avctx, 
encode_thread, &
s->thread_context[0], 
NULL, context_count, 
sizeof(
void*));
 
 3852     for(
i=1; 
i<context_count; 
i++){
 
 3853         if (
s->pb.buf_end == 
s->thread_context[
i]->pb.buf)
 
 3862     const int intra= 
s->mb_intra;
 
 3865     s->dct_count[intra]++;
 
 3867     for(
i=0; 
i<64; 
i++){
 
 3872                 s->dct_error_sum[intra][
i] += 
level;
 
 3873                 level -= 
s->dct_offset[intra][
i];
 
 3876                 s->dct_error_sum[intra][
i] -= 
level;
 
 3877                 level += 
s->dct_offset[intra][
i];
 
 3886                                   int16_t *
block, 
int n,
 
 3890     const uint8_t *scantable;
 
 3891     const uint8_t *perm_scantable;
 
 3893     unsigned int threshold1, threshold2;
 
 3905     int coeff_count[64];
 
 3906     int qmul, qadd, start_i, last_non_zero, 
i, 
dc;
 
 3907     const int esc_length= 
s->ac_esc_length;
 
 3909     uint8_t * last_length;
 
 3915     if(
s->dct_error_sum)
 
 3921     else                 mpeg2_qscale = 
qscale << 1;
 
 3925         scantable= 
s->intra_scantable.scantable;
 
 3926         perm_scantable= 
s->intra_scantable.permutated;
 
 3943         qmat = n < 4 ? 
s->q_intra_matrix[
qscale] : 
s->q_chroma_intra_matrix[
qscale];
 
 3944         matrix = n < 4 ? 
s->intra_matrix : 
s->chroma_intra_matrix;
 
 3948         if (n > 3 && 
s->intra_chroma_ac_vlc_length) {
 
 3949             length     = 
s->intra_chroma_ac_vlc_length;
 
 3950             last_length= 
s->intra_chroma_ac_vlc_last_length;
 
 3952             length     = 
s->intra_ac_vlc_length;
 
 3953             last_length= 
s->intra_ac_vlc_last_length;
 
 3956         scantable= 
s->inter_scantable.scantable;
 
 3957         perm_scantable= 
s->inter_scantable.permutated;
 
 3960         qmat = 
s->q_inter_matrix[
qscale];
 
 3962         length     = 
s->inter_ac_vlc_length;
 
 3963         last_length= 
s->inter_ac_vlc_last_length;
 
 3968     threshold2= (threshold1<<1);
 
 3970     for(
i=63; 
i>=start_i; 
i--) {
 
 3971         const int j = scantable[
i];
 
 3974         if(((
unsigned)(
level+threshold1))>threshold2){
 
 3980     for(
i=start_i; 
i<=last_non_zero; 
i++) {
 
 3981         const int j = scantable[
i];
 
 3986         if(((
unsigned)(
level+threshold1))>threshold2){
 
 4009     if(last_non_zero < start_i){
 
 4010         memset(
block + start_i, 0, (64-start_i)*
sizeof(int16_t));
 
 4011         return last_non_zero;
 
 4014     score_tab[start_i]= 0;
 
 4015     survivor[0]= start_i;
 
 4018     for(
i=start_i; 
i<=last_non_zero; 
i++){
 
 4019         int level_index, j, zero_distortion;
 
 4021         int best_score=256*256*256*120;
 
 4025         zero_distortion= dct_coeff*dct_coeff;
 
 4027         for(level_index=0; level_index < coeff_count[
i]; level_index++){
 
 4036                 unquant_coeff= alevel*qmul + qadd;
 
 4038                 j = 
s->idsp.idct_permutation[scantable[
i]];
 
 4039                 unquant_coeff = alevel * 
matrix[j] * 8;
 
 4041                 j = 
s->idsp.idct_permutation[scantable[
i]]; 
 
 4043                         unquant_coeff = (
int)(  alevel  * mpeg2_qscale * 
matrix[j]) >> 4;
 
 4044                         unquant_coeff =   (unquant_coeff - 1) | 1;
 
 4046                         unquant_coeff = (((  alevel  << 1) + 1) * mpeg2_qscale * ((
int) 
matrix[j])) >> 5;
 
 4047                         unquant_coeff =   (unquant_coeff - 1) | 1;
 
 4052             distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
 
 4054             if((
level&(~127)) == 0){
 
 4055                 for(j=survivor_count-1; j>=0; j--){
 
 4056                     int run= 
i - survivor[j];
 
 4058                     score += score_tab[
i-
run];
 
 4060                     if(score < best_score){
 
 4063                         level_tab[
i+1]= 
level-64;
 
 4068                     for(j=survivor_count-1; j>=0; j--){
 
 4069                         int run= 
i - survivor[j];
 
 4071                         score += score_tab[
i-
run];
 
 4072                         if(score < last_score){
 
 4075                             last_level= 
level-64;
 
 4081                 distortion += esc_length*
lambda;
 
 4082                 for(j=survivor_count-1; j>=0; j--){
 
 4083                     int run= 
i - survivor[j];
 
 4084                     int score= distortion + score_tab[
i-
run];
 
 4086                     if(score < best_score){
 
 4089                         level_tab[
i+1]= 
level-64;
 
 4094                   for(j=survivor_count-1; j>=0; j--){
 
 4095                         int run= 
i - survivor[j];
 
 4096                         int score= distortion + score_tab[
i-
run];
 
 4097                         if(score < last_score){
 
 4100                             last_level= 
level-64;
 
 4108         score_tab[
i+1]= best_score;
 
 4111         if(last_non_zero <= 27){
 
 4112             for(; survivor_count; survivor_count--){
 
 4113                 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
 
 4117             for(; survivor_count; survivor_count--){
 
 4118                 if(score_tab[ survivor[survivor_count-1] ] <= best_score + 
lambda)
 
 4123         survivor[ survivor_count++ ]= 
i+1;
 
 4127         last_score= 256*256*256*120;
 
 4128         for(
i= survivor[0]; 
i<=last_non_zero + 1; 
i++){
 
 4129             int score= score_tab[
i];
 
 4133             if(score < last_score){
 
 4136                 last_level= level_tab[
i];
 
 4137                 last_run= run_tab[
i];
 
 4142     s->coded_score[n] = last_score;
 
 4145     last_non_zero= last_i - 1;
 
 4146     memset(
block + start_i, 0, (64-start_i)*
sizeof(int16_t));
 
 4148     if(last_non_zero < start_i)
 
 4149         return last_non_zero;
 
 4151     if(last_non_zero == 0 && start_i == 0){
 
 4153         int best_score= 
dc * 
dc;
 
 4155         for(
i=0; 
i<coeff_count[0]; 
i++){
 
 4158             int unquant_coeff, score, distortion;
 
 4161                     unquant_coeff= (alevel*qmul + qadd)>>3;
 
 4163                     unquant_coeff = (((  alevel  << 1) + 1) * mpeg2_qscale * ((
int) 
matrix[0])) >> 5;
 
 4164                     unquant_coeff =   (unquant_coeff - 1) | 1;
 
 4166             unquant_coeff = (unquant_coeff + 4) >> 3;
 
 4167             unquant_coeff<<= 3 + 3;
 
 4169             distortion= (unquant_coeff - 
dc) * (unquant_coeff - 
dc);
 
 4172             else                    score= distortion + esc_length*
lambda;
 
 4174             if(score < best_score){
 
 4176                 best_level= 
level - 64;
 
 4179         block[0]= best_level;
 
 4180         s->coded_score[n] = best_score - 
dc*
dc;
 
 4181         if(best_level == 0) 
return -1;
 
 4182         else                return last_non_zero;
 
 4188     block[ perm_scantable[last_non_zero] ]= last_level;
 
 4191     for(; 
i>start_i; 
i -= run_tab[
i] + 1){
 
 4192         block[ perm_scantable[
i-1] ]= level_tab[
i];
 
 4195     return last_non_zero;
 
 4210                     if(
i==0) 
s*= sqrt(0.5);
 
 4211                     if(j==0) 
s*= sqrt(0.5);
 
 4224     const uint8_t *scantable;
 
 4225     const uint8_t *perm_scantable;
 
 4231     int qmul, qadd, start_i, last_non_zero, 
i, 
dc;
 
 4233     uint8_t * last_length;
 
 4235     int rle_index, 
run, q = 1, sum; 
 
 4237     if(
basis[0][0] == 0)
 
 4243         scantable= 
s->intra_scantable.scantable;
 
 4244         perm_scantable= 
s->intra_scantable.permutated;
 
 4262         if (n > 3 && 
s->intra_chroma_ac_vlc_length) {
 
 4263             length     = 
s->intra_chroma_ac_vlc_length;
 
 4264             last_length= 
s->intra_chroma_ac_vlc_last_length;
 
 4266             length     = 
s->intra_ac_vlc_length;
 
 4267             last_length= 
s->intra_ac_vlc_last_length;
 
 4270         scantable= 
s->inter_scantable.scantable;
 
 4271         perm_scantable= 
s->inter_scantable.permutated;
 
 4274         length     = 
s->inter_ac_vlc_length;
 
 4275         last_length= 
s->inter_ac_vlc_last_length;
 
 4277     last_non_zero = 
s->block_last_index[n];
 
 4280     for(
i=0; 
i<64; 
i++){
 
 4285     for(
i=0; 
i<64; 
i++){
 
 4291         w= 15 + (48*qns*one + 
w/2)/
w; 
 
 4304     for(
i=start_i; 
i<=last_non_zero; 
i++){
 
 4305         int j= perm_scantable[
i];
 
 4312             run_tab[rle_index++]=
run;
 
 4322         int best_score = 
s->mpvencdsp.try_8x8basis(rem, 
weight, 
basis[0], 0);
 
 4325         int run2, best_unquant_change=0, analyze_gradient;
 
 4326         analyze_gradient = last_non_zero > 2 || 
s->quantizer_noise_shaping >= 3;
 
 4328         if(analyze_gradient){
 
 4329             for(
i=0; 
i<64; 
i++){
 
 4339             int change, old_coeff;
 
 4345             for(change=-1; change<=1; change+=2){
 
 4346                 int new_level= 
level + change;
 
 4347                 int score, new_coeff;
 
 4349                 new_coeff= q*new_level;
 
 4350                 if(new_coeff >= 2048 || new_coeff < 0)
 
 4353                 score = 
s->mpvencdsp.try_8x8basis(rem, 
weight, 
basis[0],
 
 4354                                                   new_coeff - old_coeff);
 
 4355                 if(score<best_score){
 
 4358                     best_change= change;
 
 4359                     best_unquant_change= new_coeff - old_coeff;
 
 4366         run2= run_tab[rle_index++];
 
 4370         for(
i=start_i; 
i<64; 
i++){
 
 4371             int j= perm_scantable[
i];
 
 4373             int change, old_coeff;
 
 4375             if(
s->quantizer_noise_shaping < 3 && 
i > last_non_zero + 1)
 
 4380                 else        old_coeff= qmul*
level + qadd;
 
 4381                 run2= run_tab[rle_index++]; 
 
 4388             for(change=-1; change<=1; change+=2){
 
 4389                 int new_level= 
level + change;
 
 4390                 int score, new_coeff, unquant_change;
 
 4397                     if(new_level<0) new_coeff= qmul*new_level - qadd;
 
 4398                     else            new_coeff= qmul*new_level + qadd;
 
 4399                     if(new_coeff >= 2048 || new_coeff <= -2048)
 
 4404                         if(level < 63 && level > -63){
 
 4405                             if(
i < last_non_zero)
 
 4415                         if(analyze_gradient){
 
 4416                             int g= d1[ scantable[
i] ];
 
 4417                             if(
g && (
g^new_level) >= 0)
 
 4421                         if(
i < last_non_zero){
 
 4422                             int next_i= 
i + run2 + 1;
 
 4423                             int next_level= 
block[ perm_scantable[next_i] ] + 64;
 
 4425                             if(next_level&(~127))
 
 4428                             if(next_i < last_non_zero)
 
 4448                     if(
i < last_non_zero){
 
 4449                         int next_i= 
i + run2 + 1;
 
 4450                         int next_level= 
block[ perm_scantable[next_i] ] + 64;
 
 4452                         if(next_level&(~127))
 
 4455                         if(next_i < last_non_zero)
 
 4474                 unquant_change= new_coeff - old_coeff;
 
 4477                 score += 
s->mpvencdsp.try_8x8basis(rem, 
weight, 
basis[j],
 
 4479                 if(score<best_score){
 
 4482                     best_change= change;
 
 4483                     best_unquant_change= unquant_change;
 
 4487                 prev_level= 
level + 64;
 
 4488                 if(prev_level&(~127))
 
 4498             int j= perm_scantable[ best_coeff ];
 
 4500             block[j] += best_change;
 
 4502             if(best_coeff > last_non_zero){
 
 4503                 last_non_zero= best_coeff;
 
 4506                 for(; last_non_zero>=start_i; last_non_zero--){
 
 4507                     if(
block[perm_scantable[last_non_zero]])
 
 4514             for(
i=start_i; 
i<=last_non_zero; 
i++){
 
 4515                 int j= perm_scantable[
i];
 
 4519                      run_tab[rle_index++]=
run;
 
 4526             s->mpvencdsp.add_8x8basis(rem, 
basis[j], best_unquant_change);
 
 4532     return last_non_zero;
 
 4547                       const uint8_t *scantable, 
int last)
 
 4558     for (
i = 0; 
i <= last; 
i++) {
 
 4559         const int j = scantable[
i];
 
 4564     for (
i = 0; 
i <= last; 
i++) {
 
 4565         const int j = scantable[
i];
 
 4566         const int perm_j = permutation[j];
 
 4572                         int16_t *
block, 
int n,
 
 4575     int i, j, 
level, last_non_zero, q, start_i;
 
 4577     const uint8_t *scantable;
 
 4580     unsigned int threshold1, threshold2;
 
 4584     if(
s->dct_error_sum)
 
 4588         scantable= 
s->intra_scantable.scantable;
 
 4603         qmat = n < 4 ? 
s->q_intra_matrix[
qscale] : 
s->q_chroma_intra_matrix[
qscale];
 
 4606         scantable= 
s->inter_scantable.scantable;
 
 4609         qmat = 
s->q_inter_matrix[
qscale];
 
 4613     threshold2= (threshold1<<1);
 
 4614     for(
i=63;
i>=start_i;
i--) {
 
 4618         if(((
unsigned)(
level+threshold1))>threshold2){
 
 4625     for(
i=start_i; 
i<=last_non_zero; 
i++) {
 
 4631         if(((
unsigned)(
level+threshold1))>threshold2){
 
 4649                       scantable, last_non_zero);
 
 4651     return last_non_zero;
 
  
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
void ff_speedhq_end_slice(MpegEncContext *s)
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
#define MV_TYPE_16X16
1 vector for the whole mb
#define AV_LOG_WARNING
Something somehow does not look correct.
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
const AVClass ff_mpv_enc_class
int data_partitioning
data partitioning flag from header
static void set_frame_distances(MpegEncContext *s)
static int get_bits_diff(MpegEncContext *s)
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
unsigned int lambda
Lagrange multiplier used in rate distortion.
#define H263_GOB_HEIGHT(h)
av_cold int ff_h261_encode_init(MpegEncContext *s)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
int b_code
backward MV resolution for B-frames (MPEG-4)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
int64_t rc_min_rate
minimum bitrate
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
void ff_speedhq_encode_picture_header(MpegEncContext *s)
int ff_wmv2_encode_picture_header(MpegEncContext *s)
#define AVERROR_EOF
End of file.
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
void ff_h261_encode_picture_header(MpegEncContext *s)
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
static int sse_mb(MpegEncContext *s)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
static int16_t basis[64][64]
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Picture current_picture
copy of the current picture structure.
static const uint8_t mv_bits[2][16][10]
static int estimate_motion_thread(AVCodecContext *c, void *arg)
float lumi_masking
luminance masking (0-> disabled)
static void update_noise_reduction(MpegEncContext *s)
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int partitioned_frame
is current frame partitioned
uint16_t(* dct_offset)[64]
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define INTERLACED_DCT(s)
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
#define CANDIDATE_MB_TYPE_INTER_I
int capabilities
Codec capabilities.
static const int32_t qmat16[MAT_SIZE]
static const int BUF_BITS
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
static int put_bytes_count(const PutBitContext *s, int round_up)
#define CANDIDATE_MB_TYPE_BACKWARD_I
const uint8_t ff_mpeg2_non_linear_qscale[32]
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
#define AV_LOG_VERBOSE
Detailed information.
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
void ff_init_block_index(MpegEncContext *s)
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
#define FF_MPV_FLAG_SKIP_RD
const uint8_t ff_mpeg12_dc_scale_table[4][32]
struct AVCodecContext * avctx
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
av_cold int ff_rate_control_init(MpegEncContext *s)
static double sqr(double in)
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
#define CANDIDATE_MB_TYPE_SKIPPED
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
PutBitContext pb
bit output
int mb_decision
macroblock decision mode
#define CANDIDATE_MB_TYPE_INTER
int qmax
maximum quantizer
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
void ff_flv_encode_picture_header(MpegEncContext *s)
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
static int encode_picture(MpegEncContext *s)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
#define MPEGVIDEO_MAX_PLANES
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second "         : depend...
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
#define CANDIDATE_MB_TYPE_FORWARD_I
#define FF_MPV_COMMON_MOTION_EST_OPTS
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
#define FF_MPV_COMMON_OPTS
static int estimate_qp(MpegEncContext *s, int dry_run)
void ff_mpeg_unref_picture(Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
const struct AVCodec * codec
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
static int skip_check(MpegEncContext *s, const Picture *p, const Picture *ref)
av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
#define CANDIDATE_MB_TYPE_BIDIR
static const struct twinvq_data tab
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
int flags
AV_CODEC_FLAG_*.
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
#define FF_MPV_FLAG_CBP_RD
void ff_h263_update_motion_val(MpegEncContext *s)
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
static void mpv_encode_init_static(void)
#define CANDIDATE_MB_TYPE_INTER4V
static int put_bytes_left(const PutBitContext *s, int round_up)
#define MAX_PICTURE_COUNT
av_cold int ff_dct_encode_init(MpegEncContext *s)
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
void ff_mpv_common_end(MpegEncContext *s)
static int frame_start(MpegEncContext *s)
static int ff_thread_once(char *control, void(*routine)(void))
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
static void update_mb_info(MpegEncContext *s, int startcode)
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
static uint8_t default_fcode_tab[MAX_MV *2+1]
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
static void build_basis(uint8_t *perm)
int has_b_frames
Size of the frame reordering buffer in the decoder.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
const uint8_t ff_h263_chroma_qscale_table[32]
static int get_sae(const uint8_t *src, int ref, int stride)
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
#define AV_CEIL_RSHIFT(a, b)
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
#define LOCAL_ALIGNED_16(t, v,...)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int64_t rc_max_rate
maximum bitrate
uint64_t error[AV_NUM_DATA_POINTERS]
error
This structure describes the bitrate properties of an encoded bitstream.
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
float p_masking
p block masking (0-> disabled)
static int mb_var_thread(AVCodecContext *c, void *arg)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
int rc_buffer_size
decoder bitstream buffer size
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
PutBitContext pb2
used for data partitioned VOPs
#define LIBAVUTIL_VERSION_INT
void ff_write_pass1_stats(MpegEncContext *s)
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Describe the class of an AVClass context structure.
#define PTRDIFF_SPECIFIER
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
static void write_mb_info(MpegEncContext *s)
int f_code
forward MV resolution
static int bias(int x, int c)
av_cold void ff_mpv_idct_init(MpegEncContext *s)
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
int ff_mpeg_ref_picture(Picture *dst, Picture *src)
struct AVCodecInternal * internal
Private context used for internal data.
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int64_t bit_rate
the average bitrate
#define ROUNDED_DIV(a, b)
void ff_faandct(int16_t *data)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
static void update_duplicate_context_after_me(MpegEncContext *dst, const MpegEncContext *src)
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
int ildct_cmp
interlaced DCT comparison function
int ff_vbv_update(MpegEncContext *s, int frame_size)
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
int trellis
trellis RD quantization
void ff_mpeg4_init_partitions(MpegEncContext *s)
void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s)
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
int ff_mpeg4_encode_picture_header(MpegEncContext *s)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
void ff_mpeg1_encode_init(MpegEncContext *s)
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
static int weight(int i, int blen, int offset)
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
void ff_mpeg4_merge_partitions(MpegEncContext *s)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
#define FF_DEBUG_DCT_COEFF
void ff_dct_encode_init_x86(MpegEncContext *s)
char * stats_out
pass1 encoding statistics output buffer
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
int display_picture_number
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
void ff_mpeg4_clean_buffers(MpegEncContext *s)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
static int shift(int a, int b)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
void ff_mpeg1_clean_buffers(MpegEncContext *s)
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
const int16_t ff_mpeg4_default_intra_matrix[64]
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
#define AV_NOPTS_VALUE
Undefined timestamp value.
static const AVOption mpv_generic_options[]
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
#define FF_MPV_FLAG_QP_RD
#define CANDIDATE_MB_TYPE_DIRECT0
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
const uint16_t ff_mpeg1_default_intra_matrix[256]
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
#define FF_COMPLIANCE_NORMAL
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
const int16_t ff_mpeg4_default_non_intra_matrix[64]
#define ALLOCZ_ARRAYS(p, mult, numb)
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define MV_TYPE_FIELD
2 vectors, one per field
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int flags
A combination of AV_PKT_FLAG values.
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
unsigned int byte_buffer_size
#define UNI_AC_ENC_INDEX(run, level)
#define AV_LOG_INFO
Standard information.
static void update_qscale(MpegEncContext *s)
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
void ff_msmpeg4_encode_picture_header(MpegEncContext *s)
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static int put_bits_count(PutBitContext *s)
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
uint64_t encoding_error[MPEGVIDEO_MAX_PLANES]
static int get_intra_count(MpegEncContext *s, const uint8_t *src, const uint8_t *ref, int stride)
static int encode_thread(AVCodecContext *c, void *arg)
void ff_jpeg_fdct_islow_8(int16_t *data)
const uint32_t ff_square_tab[512]
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
PutBitContext tex_pb
used for data partitioned VOPs
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
void ff_h261_reorder_mb_index(MpegEncContext *s)
void ff_jpeg_fdct_islow_10(int16_t *data)
void ff_h263_encode_init(MpegEncContext *s)
const uint16_t ff_h263_format[8][2]
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int encoding, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static void copy_context_before_encode(MpegEncContext *d, const MpegEncContext *s)
int ff_init_me(MpegEncContext *s)
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
av_cold void ff_rate_control_uninit(MpegEncContext *s)
#define CANDIDATE_MB_TYPE_DIRECT
double buffer_index
amount of bits in the video/audio buffer
const uint8_t ff_zigzag_direct[64]
#define AV_CODEC_FLAG_CLOSED_GOP
static int alloc_picture(MpegEncContext *s, Picture *pic)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
void ff_fdct_ifast(int16_t *data)
#define CANDIDATE_MB_TYPE_BIDIR_I
const uint16_t ff_inv_aanscales[64]
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
void ff_h263_loop_filter(MpegEncContext *s)
void ff_mpeg1_encode_picture_header(MpegEncContext *s)
#define AV_INPUT_BUFFER_PADDING_SIZE
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
void ff_set_mpeg4_time(MpegEncContext *s)
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
int dquant
qscale difference to prev qscale
float dark_masking
darkness masking (0-> disabled)
main external API structure.
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
#define CANDIDATE_MB_TYPE_INTRA
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
int qmin
minimum quantizer
static int select_input_picture(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
static void frame_end(MpegEncContext *s)
static int ref[MAX_W *MAX_W]
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
static float mean(const float *input, int size)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define CANDIDATE_MB_TYPE_FORWARD
#define FF_MB_DECISION_RD
rate distortion
void ff_h263_encode_picture_header(MpegEncContext *s)
@ AV_PICTURE_TYPE_P
Predicted.
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Undefined Behavior In the C some operations are like signed integer overflow
void(* fdct)(int16_t *block)
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
int ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
int ff_rv10_encode_picture_header(MpegEncContext *s)
static void copy_context_after_encode(MpegEncContext *d, const MpegEncContext *s)
int slices
Number of slices.
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
This structure stores compressed data.
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
static const double coeff[2][5]
The exact code depends on how similar the blocks are and how related they are to the block
static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
int misc_bits
cbp, mb_type
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
void ff_get_2pass_fcode(MpegEncContext *s)
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
#define FF_MPV_FLAG_STRICT_GOP
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
static const uint8_t sp5x_qscale_five_quant_table[][64]
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
void ff_rv20_encode_picture_header(MpegEncContext *s)
#define CANDIDATE_MB_TYPE_BACKWARD
int ff_get_best_fcode(MpegEncContext *s, const int16_t(*mv_table)[2], int type)
const uint16_t ff_aanscales[64]
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
static void write_slice_end(MpegEncContext *s)