00001 
00002 
00003 
00004 
00005 
00006 
00007 
00008 
00009 
00010 
00011 
00012 
00013 
00014 
00015 
00016 
00017 
00018 
00019 
00020 
00021 
00022 
00023 
00024 
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "xvmc_internal.h"
00041 #include "thread.h"
00042 #include <limits.h>
00043 
00044 
00045 
00046 
00047 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00048                                    DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00050                                    DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00052                                    DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00054                                    DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00056                                    DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00058                                   DCTELEM *block, int n, int qscale);
00059 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00060                                   DCTELEM *block, int n, int qscale);
00061 
00062 
00063 
00064 
00065 
00066 
00067 
00068 
00069 static const uint8_t ff_default_chroma_qscale_table[32] = {
00070 
00071      0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
00072     16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
00073 };
00074 
00075 const uint8_t ff_mpeg1_dc_scale_table[128] = {
00076 
00077     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00081     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00082     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00083     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00084     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00085 };
00086 
00087 static const uint8_t mpeg2_dc_scale_table1[128] = {
00088 
00089     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00090     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00091     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00092     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00093     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00094     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00095     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00096     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00097 };
00098 
00099 static const uint8_t mpeg2_dc_scale_table2[128] = {
00100 
00101     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00102     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00103     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00104     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00105     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00106     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00107     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00108     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00109 };
00110 
00111 static const uint8_t mpeg2_dc_scale_table3[128] = {
00112 
00113     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00114     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00115     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00116     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00117     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00118     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00119     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00120     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00121 };
00122 
00123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
00124     ff_mpeg1_dc_scale_table,
00125     mpeg2_dc_scale_table1,
00126     mpeg2_dc_scale_table2,
00127     mpeg2_dc_scale_table3,
00128 };
00129 
00130 const enum PixelFormat ff_pixfmt_list_420[] = {
00131     PIX_FMT_YUV420P,
00132     PIX_FMT_NONE
00133 };
00134 
00135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00136     PIX_FMT_DXVA2_VLD,
00137     PIX_FMT_VAAPI_VLD,
00138     PIX_FMT_VDA_VLD,
00139     PIX_FMT_YUV420P,
00140     PIX_FMT_NONE
00141 };
00142 
00143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
00144                                           const uint8_t *end,
00145                                           uint32_t * restrict state)
00146 {
00147     int i;
00148 
00149     assert(p <= end);
00150     if (p >= end)
00151         return end;
00152 
00153     for (i = 0; i < 3; i++) {
00154         uint32_t tmp = *state << 8;
00155         *state = tmp + *(p++);
00156         if (tmp == 0x100 || p == end)
00157             return p;
00158     }
00159 
00160     while (p < end) {
00161         if      (p[-1] > 1      ) p += 3;
00162         else if (p[-2]          ) p += 2;
00163         else if (p[-3]|(p[-1]-1)) p++;
00164         else {
00165             p++;
00166             break;
00167         }
00168     }
00169 
00170     p = FFMIN(p, end) - 4;
00171     *state = AV_RB32(p);
00172 
00173     return p + 4;
00174 }
00175 
00176 
00177 av_cold int ff_dct_common_init(MpegEncContext *s)
00178 {
00179     dsputil_init(&s->dsp, s->avctx);
00180 
00181     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00182     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00183     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00184     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00185     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00186     if (s->flags & CODEC_FLAG_BITEXACT)
00187         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00188     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00189 
00190 #if HAVE_MMX
00191     MPV_common_init_mmx(s);
00192 #elif ARCH_ALPHA
00193     MPV_common_init_axp(s);
00194 #elif CONFIG_MLIB
00195     MPV_common_init_mlib(s);
00196 #elif HAVE_MMI
00197     MPV_common_init_mmi(s);
00198 #elif ARCH_ARM
00199     MPV_common_init_arm(s);
00200 #elif HAVE_ALTIVEC
00201     MPV_common_init_altivec(s);
00202 #elif ARCH_BFIN
00203     MPV_common_init_bfin(s);
00204 #endif
00205 
00206     
00207 
00208 
00209     if (s->alternate_scan) {
00210         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
00211         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
00212     } else {
00213         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
00214         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
00215     }
00216     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00217     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00218 
00219     return 0;
00220 }
00221 
00222 void ff_copy_picture(Picture *dst, Picture *src)
00223 {
00224     *dst = *src;
00225     dst->f.type = FF_BUFFER_TYPE_COPY;
00226 }
00227 
00231 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00232 {
00233     
00234 
00235 
00236     if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00237         ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
00238     else
00239         avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
00240     av_freep(&pic->f.hwaccel_picture_private);
00241 }
00242 
00246 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00247 {
00248     int r;
00249 
00250     if (s->avctx->hwaccel) {
00251         assert(!pic->f.hwaccel_picture_private);
00252         if (s->avctx->hwaccel->priv_data_size) {
00253             pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00254             if (!pic->f.hwaccel_picture_private) {
00255                 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00256                 return -1;
00257             }
00258         }
00259     }
00260 
00261     if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
00262         r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
00263     else
00264         r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
00265 
00266     if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
00267         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
00268                r, pic->f.age, pic->f.type, pic->f.data[0]);
00269         av_freep(&pic->f.hwaccel_picture_private);
00270         return -1;
00271     }
00272 
00273     if (s->linesize && (s->linesize   != pic->f.linesize[0] ||
00274                         s->uvlinesize != pic->f.linesize[1])) {
00275         av_log(s->avctx, AV_LOG_ERROR,
00276                "get_buffer() failed (stride changed)\n");
00277         free_frame_buffer(s, pic);
00278         return -1;
00279     }
00280 
00281     if (pic->f.linesize[1] != pic->f.linesize[2]) {
00282         av_log(s->avctx, AV_LOG_ERROR,
00283                "get_buffer() failed (uv stride mismatch)\n");
00284         free_frame_buffer(s, pic);
00285         return -1;
00286     }
00287 
00288     return 0;
00289 }
00290 
00295 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
00296 {
00297     const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
00298 
00299     
00300 
00301     const int mb_array_size = s->mb_stride * s->mb_height;
00302     const int b8_array_size = s->b8_stride * s->mb_height * 2;
00303     const int b4_array_size = s->b4_stride * s->mb_height * 4;
00304     int i;
00305     int r = -1;
00306 
00307     if (shared) {
00308         assert(pic->f.data[0]);
00309         assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
00310         pic->f.type = FF_BUFFER_TYPE_SHARED;
00311     } else {
00312         assert(!pic->f.data[0]);
00313 
00314         if (alloc_frame_buffer(s, pic) < 0)
00315             return -1;
00316 
00317         s->linesize   = pic->f.linesize[0];
00318         s->uvlinesize = pic->f.linesize[1];
00319     }
00320 
00321     if (pic->f.qscale_table == NULL) {
00322         if (s->encoding) {
00323             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
00324                               mb_array_size * sizeof(int16_t), fail)
00325             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
00326                               mb_array_size * sizeof(int16_t), fail)
00327             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
00328                               mb_array_size * sizeof(int8_t ), fail)
00329         }
00330 
00331         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
00332                           mb_array_size * sizeof(uint8_t) + 2, fail)
00333         FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
00334                           (big_mb_num + s->mb_stride) * sizeof(uint8_t),
00335                           fail)
00336         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
00337                           (big_mb_num + s->mb_stride) * sizeof(uint32_t),
00338                           fail)
00339         pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
00340         pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
00341         if (s->out_format == FMT_H264) {
00342             for (i = 0; i < 2; i++) {
00343                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00344                                   2 * (b4_array_size + 4) * sizeof(int16_t),
00345                                   fail)
00346                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00347                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00348                                   4 * mb_array_size * sizeof(uint8_t), fail)
00349             }
00350             pic->f.motion_subsample_log2 = 2;
00351         } else if (s->out_format == FMT_H263 || s->encoding ||
00352                    (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
00353             for (i = 0; i < 2; i++) {
00354                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
00355                                   2 * (b8_array_size + 4) * sizeof(int16_t),
00356                                   fail)
00357                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
00358                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
00359                                   4 * mb_array_size * sizeof(uint8_t), fail)
00360             }
00361             pic->f.motion_subsample_log2 = 3;
00362         }
00363         if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00364             FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
00365                               64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
00366         }
00367         pic->f.qstride = s->mb_stride;
00368         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
00369                           1 * sizeof(AVPanScan), fail)
00370     }
00371 
00372     
00373 
00374     memmove(s->prev_pict_types + 1, s->prev_pict_types,
00375             PREV_PICT_TYPES_BUFFER_SIZE-1);
00376     s->prev_pict_types[0] =  s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
00377     if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE &&
00378         s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
00379         pic->f.age = INT_MAX; 
00380                               
00381     pic->owner2 = s;
00382 
00383     return 0;
00384 fail: 
00385     if (r >= 0)
00386         free_frame_buffer(s, pic);
00387     return -1;
00388 }
00389 
00393 static void free_picture(MpegEncContext *s, Picture *pic)
00394 {
00395     int i;
00396 
00397     if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
00398         free_frame_buffer(s, pic);
00399     }
00400 
00401     av_freep(&pic->mb_var);
00402     av_freep(&pic->mc_mb_var);
00403     av_freep(&pic->mb_mean);
00404     av_freep(&pic->f.mbskip_table);
00405     av_freep(&pic->qscale_table_base);
00406     av_freep(&pic->mb_type_base);
00407     av_freep(&pic->f.dct_coeff);
00408     av_freep(&pic->f.pan_scan);
00409     pic->f.mb_type = NULL;
00410     for (i = 0; i < 2; i++) {
00411         av_freep(&pic->motion_val_base[i]);
00412         av_freep(&pic->f.ref_index[i]);
00413     }
00414 
00415     if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
00416         for (i = 0; i < 4; i++) {
00417             pic->f.base[i] =
00418             pic->f.data[i] = NULL;
00419         }
00420         pic->f.type = 0;
00421     }
00422 }
00423 
00424 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
00425 {
00426     int y_size = s->b8_stride * (2 * s->mb_height + 1);
00427     int c_size = s->mb_stride * (s->mb_height + 1);
00428     int yc_size = y_size + 2 * c_size;
00429     int i;
00430 
00431     
00432     
00433     FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
00434                       (s->width + 64) * 2 * 21 * 2, fail);    
00435 
00436     
00437     
00438     FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
00439                       (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
00440     s->me.temp         = s->me.scratchpad;
00441     s->rd_scratchpad   = s->me.scratchpad;
00442     s->b_scratchpad    = s->me.scratchpad;
00443     s->obmc_scratchpad = s->me.scratchpad + 16;
00444     if (s->encoding) {
00445         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
00446                           ME_MAP_SIZE * sizeof(uint32_t), fail)
00447         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
00448                           ME_MAP_SIZE * sizeof(uint32_t), fail)
00449         if (s->avctx->noise_reduction) {
00450             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
00451                               2 * 64 * sizeof(int), fail)
00452         }
00453     }
00454     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
00455     s->block = s->blocks[0];
00456 
00457     for (i = 0; i < 12; i++) {
00458         s->pblocks[i] = &s->block[i];
00459     }
00460 
00461     if (s->out_format == FMT_H263) {
00462         
00463         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
00464                           yc_size * sizeof(int16_t) * 16, fail);
00465         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00466         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00467         s->ac_val[2] = s->ac_val[1] + c_size;
00468     }
00469 
00470     return 0;
00471 fail:
00472     return -1; 
00473 }
00474 
00475 static void free_duplicate_context(MpegEncContext *s)
00476 {
00477     if (s == NULL)
00478         return;
00479 
00480     av_freep(&s->edge_emu_buffer);
00481     av_freep(&s->me.scratchpad);
00482     s->me.temp =
00483     s->rd_scratchpad =
00484     s->b_scratchpad =
00485     s->obmc_scratchpad = NULL;
00486 
00487     av_freep(&s->dct_error_sum);
00488     av_freep(&s->me.map);
00489     av_freep(&s->me.score_map);
00490     av_freep(&s->blocks);
00491     av_freep(&s->ac_val_base);
00492     s->block = NULL;
00493 }
00494 
00495 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
00496 {
00497 #define COPY(a) bak->a = src->a
00498     COPY(edge_emu_buffer);
00499     COPY(me.scratchpad);
00500     COPY(me.temp);
00501     COPY(rd_scratchpad);
00502     COPY(b_scratchpad);
00503     COPY(obmc_scratchpad);
00504     COPY(me.map);
00505     COPY(me.score_map);
00506     COPY(blocks);
00507     COPY(block);
00508     COPY(start_mb_y);
00509     COPY(end_mb_y);
00510     COPY(me.map_generation);
00511     COPY(pb);
00512     COPY(dct_error_sum);
00513     COPY(dct_count[0]);
00514     COPY(dct_count[1]);
00515     COPY(ac_val_base);
00516     COPY(ac_val[0]);
00517     COPY(ac_val[1]);
00518     COPY(ac_val[2]);
00519 #undef COPY
00520 }
00521 
00522 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
00523 {
00524     MpegEncContext bak;
00525     int i;
00526     
00527     
00528     backup_duplicate_context(&bak, dst);
00529     memcpy(dst, src, sizeof(MpegEncContext));
00530     backup_duplicate_context(dst, &bak);
00531     for (i = 0; i < 12; i++) {
00532         dst->pblocks[i] = &dst->block[i];
00533     }
00534     
00535     
00536 }
00537 
00538 int ff_mpeg_update_thread_context(AVCodecContext *dst,
00539                                   const AVCodecContext *src)
00540 {
00541     MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00542 
00543     if (dst == src || !s1->context_initialized)
00544         return 0;
00545 
00546     
00547     
00548     if (!s->context_initialized) {
00549         memcpy(s, s1, sizeof(MpegEncContext));
00550 
00551         s->avctx                 = dst;
00552         s->picture_range_start  += MAX_PICTURE_COUNT;
00553         s->picture_range_end    += MAX_PICTURE_COUNT;
00554         s->bitstream_buffer      = NULL;
00555         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00556 
00557         MPV_common_init(s);
00558     }
00559 
00560     s->avctx->coded_height  = s1->avctx->coded_height;
00561     s->avctx->coded_width   = s1->avctx->coded_width;
00562     s->avctx->width         = s1->avctx->width;
00563     s->avctx->height        = s1->avctx->height;
00564 
00565     s->coded_picture_number = s1->coded_picture_number;
00566     s->picture_number       = s1->picture_number;
00567     s->input_picture_number = s1->input_picture_number;
00568 
00569     memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00570     memcpy(&s->last_picture, &s1->last_picture,
00571            (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
00572 
00573     s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
00574     s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00575     s->next_picture_ptr    = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
00576 
00577     memcpy(s->prev_pict_types, s1->prev_pict_types,
00578            PREV_PICT_TYPES_BUFFER_SIZE);
00579 
00580     
00581     s->next_p_frame_damaged = s1->next_p_frame_damaged;
00582     s->workaround_bugs      = s1->workaround_bugs;
00583     s->padding_bug_score    = s1->padding_bug_score;
00584 
00585     
00586     memcpy(&s->time_increment_bits, &s1->time_increment_bits,
00587            (char *) &s1->shape - (char *) &s1->time_increment_bits);
00588 
00589     
00590     s->max_b_frames = s1->max_b_frames;
00591     s->low_delay    = s1->low_delay;
00592     s->dropable     = s1->dropable;
00593 
00594     
00595     s->divx_packed  = s1->divx_packed;
00596 
00597     if (s1->bitstream_buffer) {
00598         if (s1->bitstream_buffer_size +
00599             FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00600             av_fast_malloc(&s->bitstream_buffer,
00601                            &s->allocated_bitstream_buffer_size,
00602                            s1->allocated_bitstream_buffer_size);
00603             s->bitstream_buffer_size = s1->bitstream_buffer_size;
00604         memcpy(s->bitstream_buffer, s1->bitstream_buffer,
00605                s1->bitstream_buffer_size);
00606         memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
00607                FF_INPUT_BUFFER_PADDING_SIZE);
00608     }
00609 
00610     
00611     memcpy(&s->progressive_sequence, &s1->progressive_sequence,
00612            (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
00613 
00614     if (!s1->first_field) {
00615         s->last_pict_type = s1->pict_type;
00616         if (s1->current_picture_ptr)
00617             s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
00618 
00619         if (s1->pict_type != AV_PICTURE_TYPE_B) {
00620             s->last_non_b_pict_type = s1->pict_type;
00621         }
00622     }
00623 
00624     return 0;
00625 }
00626 
00633 void MPV_common_defaults(MpegEncContext *s)
00634 {
00635     s->y_dc_scale_table      =
00636     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
00637     s->chroma_qscale_table   = ff_default_chroma_qscale_table;
00638     s->progressive_frame     = 1;
00639     s->progressive_sequence  = 1;
00640     s->picture_structure     = PICT_FRAME;
00641 
00642     s->coded_picture_number  = 0;
00643     s->picture_number        = 0;
00644     s->input_picture_number  = 0;
00645 
00646     s->picture_in_gop_number = 0;
00647 
00648     s->f_code                = 1;
00649     s->b_code                = 1;
00650 
00651     s->picture_range_start   = 0;
00652     s->picture_range_end     = MAX_PICTURE_COUNT;
00653 }
00654 
00660 void MPV_decode_defaults(MpegEncContext *s)
00661 {
00662     MPV_common_defaults(s);
00663 }
00664 
00669 av_cold int MPV_common_init(MpegEncContext *s)
00670 {
00671     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
00672         threads = (s->encoding ||
00673                    (HAVE_THREADS &&
00674                     s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
00675                   s->avctx->thread_count : 1;
00676 
00677     if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00678         s->mb_height = (s->height + 31) / 32 * 2;
00679     else if (s->codec_id != CODEC_ID_H264)
00680         s->mb_height = (s->height + 15) / 16;
00681 
00682     if (s->avctx->pix_fmt == PIX_FMT_NONE) {
00683         av_log(s->avctx, AV_LOG_ERROR,
00684                "decoding to PIX_FMT_NONE is not supported.\n");
00685         return -1;
00686     }
00687 
00688     if ((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
00689         (s->avctx->thread_count > MAX_THREADS ||
00690          (s->avctx->thread_count > s->mb_height && s->mb_height))) {
00691         int max_threads = FFMIN(MAX_THREADS, s->mb_height);
00692         av_log(s->avctx, AV_LOG_WARNING,
00693                "too many threads (%d), reducing to %d\n",
00694                s->avctx->thread_count, max_threads);
00695         threads = max_threads;
00696     }
00697 
00698     if ((s->width || s->height) &&
00699         av_image_check_size(s->width, s->height, 0, s->avctx))
00700         return -1;
00701 
00702     ff_dct_common_init(s);
00703 
00704     s->flags  = s->avctx->flags;
00705     s->flags2 = s->avctx->flags2;
00706 
00707     s->mb_width   = (s->width + 15) / 16;
00708     s->mb_stride  = s->mb_width + 1;
00709     s->b8_stride  = s->mb_width * 2 + 1;
00710     s->b4_stride  = s->mb_width * 4 + 1;
00711     mb_array_size = s->mb_height * s->mb_stride;
00712     mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
00713 
00714     
00715     avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
00716                                                     &(s->chroma_y_shift) );
00717 
00718     
00719     s->h_edge_pos = s->mb_width * 16;
00720     s->v_edge_pos = s->mb_height * 16;
00721 
00722     s->mb_num = s->mb_width * s->mb_height;
00723 
00724     s->block_wrap[0] =
00725     s->block_wrap[1] =
00726     s->block_wrap[2] =
00727     s->block_wrap[3] = s->b8_stride;
00728     s->block_wrap[4] =
00729     s->block_wrap[5] = s->mb_stride;
00730 
00731     y_size = s->b8_stride * (2 * s->mb_height + 1);
00732     c_size = s->mb_stride * (s->mb_height + 1);
00733     yc_size = y_size + 2 * c_size;
00734 
00735     
00736     s->codec_tag        = avpriv_toupper4(s->avctx->codec_tag);
00737     s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
00738 
00739     s->avctx->coded_frame = (AVFrame*)&s->current_picture;
00740 
00741     FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); 
00742     for (y = 0; y < s->mb_height; y++)
00743         for (x = 0; x < s->mb_width; x++)
00744             s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
00745 
00746     s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; 
00747 
00748     if (s->encoding) {
00749         
00750         FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t), fail)
00751         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
00752         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
00753         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00754         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00755         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t), fail)
00756         s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
00757         s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
00758         s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
00759         s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00760         s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00761         s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
00762 
00763         if(s->msmpeg4_version){
00764             FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
00765         }
00766         FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00767 
00768         
00769         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type  , mb_array_size * sizeof(uint16_t), fail) 
00770 
00771         FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
00772 
00773         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix         , 64*32   * sizeof(int), fail)
00774         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix  , 64*32   * sizeof(int), fail)
00775         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix         , 64*32   * sizeof(int), fail)
00776         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16       , 64*32*2 * sizeof(uint16_t), fail)
00777         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00778         FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16       , 64*32*2 * sizeof(uint16_t), fail)
00779         FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00780         FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00781 
00782         if(s->avctx->noise_reduction){
00783             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
00784         }
00785     }
00786 
00787     s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00788     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
00789                       s->picture_count * sizeof(Picture), fail);
00790     for (i = 0; i < s->picture_count; i++) {
00791         avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
00792     }
00793 
00794     FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
00795 
00796         if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
00797             
00798             for (i = 0; i < 2; i++) {
00799                 int j, k;
00800                 for (j = 0; j < 2; j++) {
00801                     for (k = 0; k < 2; k++) {
00802                         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k],  mv_table_size * 2 * sizeof(int16_t), fail)
00803                         s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
00804                     }
00805                     FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
00806                     FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
00807                     s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
00808                 }
00809                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
00810             }
00811         }
00812         if (s->out_format == FMT_H263) {
00813             
00814             FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00815             s->coded_block = s->coded_block_base + s->b8_stride + 1;
00816 
00817             
00818             FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table     , mb_array_size * sizeof(uint8_t), fail);
00819             FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
00820         }
00821 
00822         if (s->h263_pred || s->h263_plus || !s->encoding) {
00823             
00824             
00825             FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
00826             s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00827             s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00828             s->dc_val[2] = s->dc_val[1] + c_size;
00829             for (i = 0; i < yc_size; i++)
00830                 s->dc_val_base[i] = 1024;
00831         }
00832 
00833         
00834         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00835         memset(s->mbintra_table, 1, mb_array_size);
00836 
00837         
00838         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
00839         
00840         FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
00841 
00842         s->parse_context.state = -1;
00843         if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
00844             s->avctx->debug_mv) {
00845             s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
00846                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00847             s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
00848                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00849             s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
00850                         2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
00851         }
00852 
00853         s->context_initialized = 1;
00854         s->thread_context[0]   = s;
00855 
00856         if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
00857             for (i = 1; i < threads; i++) {
00858                 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
00859                 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00860             }
00861 
00862             for (i = 0; i < threads; i++) {
00863                 if (init_duplicate_context(s->thread_context[i], s) < 0)
00864                     goto fail;
00865                 s->thread_context[i]->start_mb_y = (s->mb_height*(i  ) + s->avctx->thread_count / 2) / s->avctx->thread_count;
00866                 s->thread_context[i]->end_mb_y   = (s->mb_height*(i+1) + s->avctx->thread_count / 2) / s->avctx->thread_count;
00867             }
00868         } else {
00869             if (init_duplicate_context(s, s) < 0)
00870                 goto fail;
00871             s->start_mb_y = 0;
00872             s->end_mb_y   = s->mb_height;
00873         }
00874 
00875     return 0;
00876  fail:
00877     MPV_common_end(s);
00878     return -1;
00879 }
00880 
00881 
00882 void MPV_common_end(MpegEncContext *s)
00883 {
00884     int i, j, k;
00885 
00886     if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_SLICE)) {
00887         for (i = 0; i < s->avctx->thread_count; i++) {
00888             free_duplicate_context(s->thread_context[i]);
00889         }
00890         for (i = 1; i < s->avctx->thread_count; i++) {
00891             av_freep(&s->thread_context[i]);
00892         }
00893     } else free_duplicate_context(s);
00894 
00895     av_freep(&s->parse_context.buffer);
00896     s->parse_context.buffer_size = 0;
00897 
00898     av_freep(&s->mb_type);
00899     av_freep(&s->p_mv_table_base);
00900     av_freep(&s->b_forw_mv_table_base);
00901     av_freep(&s->b_back_mv_table_base);
00902     av_freep(&s->b_bidir_forw_mv_table_base);
00903     av_freep(&s->b_bidir_back_mv_table_base);
00904     av_freep(&s->b_direct_mv_table_base);
00905     s->p_mv_table            = NULL;
00906     s->b_forw_mv_table       = NULL;
00907     s->b_back_mv_table       = NULL;
00908     s->b_bidir_forw_mv_table = NULL;
00909     s->b_bidir_back_mv_table = NULL;
00910     s->b_direct_mv_table     = NULL;
00911     for (i = 0; i < 2; i++) {
00912         for (j = 0; j < 2; j++) {
00913             for (k = 0; k < 2; k++) {
00914                 av_freep(&s->b_field_mv_table_base[i][j][k]);
00915                 s->b_field_mv_table[i][j][k] = NULL;
00916             }
00917             av_freep(&s->b_field_select_table[i][j]);
00918             av_freep(&s->p_field_mv_table_base[i][j]);
00919             s->p_field_mv_table[i][j] = NULL;
00920         }
00921         av_freep(&s->p_field_select_table[i]);
00922     }
00923 
00924     av_freep(&s->dc_val_base);
00925     av_freep(&s->coded_block_base);
00926     av_freep(&s->mbintra_table);
00927     av_freep(&s->cbp_table);
00928     av_freep(&s->pred_dir_table);
00929 
00930     av_freep(&s->mbskip_table);
00931     av_freep(&s->prev_pict_types);
00932     av_freep(&s->bitstream_buffer);
00933     s->allocated_bitstream_buffer_size = 0;
00934 
00935     av_freep(&s->avctx->stats_out);
00936     av_freep(&s->ac_stats);
00937     av_freep(&s->error_status_table);
00938     av_freep(&s->mb_index2xy);
00939     av_freep(&s->lambda_table);
00940     if(s->q_chroma_intra_matrix   != s->q_intra_matrix  ) av_freep(&s->q_chroma_intra_matrix);
00941     if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
00942     s->q_chroma_intra_matrix=   NULL;
00943     s->q_chroma_intra_matrix16= NULL;
00944     av_freep(&s->q_intra_matrix);
00945     av_freep(&s->q_inter_matrix);
00946     av_freep(&s->q_intra_matrix16);
00947     av_freep(&s->q_inter_matrix16);
00948     av_freep(&s->input_picture);
00949     av_freep(&s->reordered_input_picture);
00950     av_freep(&s->dct_offset);
00951 
00952     if (s->picture && !s->avctx->internal->is_copy) {
00953         for (i = 0; i < s->picture_count; i++) {
00954             free_picture(s, &s->picture[i]);
00955         }
00956     }
00957     av_freep(&s->picture);
00958     s->context_initialized      = 0;
00959     s->last_picture_ptr         =
00960     s->next_picture_ptr         =
00961     s->current_picture_ptr      = NULL;
00962     s->linesize = s->uvlinesize = 0;
00963 
00964     for (i = 0; i < 3; i++)
00965         av_freep(&s->visualization_buffer[i]);
00966 
00967     if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
00968         avcodec_default_free_buffers(s->avctx);
00969 }
00970 
00971 void init_rl(RLTable *rl,
00972              uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
00973 {
00974     int8_t  max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
00975     uint8_t index_run[MAX_RUN + 1];
00976     int last, run, level, start, end, i;
00977 
00978     
00979     if (static_store && rl->max_level[0])
00980         return;
00981 
00982     
00983     for (last = 0; last < 2; last++) {
00984         if (last == 0) {
00985             start = 0;
00986             end = rl->last;
00987         } else {
00988             start = rl->last;
00989             end = rl->n;
00990         }
00991 
00992         memset(max_level, 0, MAX_RUN + 1);
00993         memset(max_run, 0, MAX_LEVEL + 1);
00994         memset(index_run, rl->n, MAX_RUN + 1);
00995         for (i = start; i < end; i++) {
00996             run   = rl->table_run[i];
00997             level = rl->table_level[i];
00998             if (index_run[run] == rl->n)
00999                 index_run[run] = i;
01000             if (level > max_level[run])
01001                 max_level[run] = level;
01002             if (run > max_run[level])
01003                 max_run[level] = run;
01004         }
01005         if (static_store)
01006             rl->max_level[last] = static_store[last];
01007         else
01008             rl->max_level[last] = av_malloc(MAX_RUN + 1);
01009         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
01010         if (static_store)
01011             rl->max_run[last]   = static_store[last] + MAX_RUN + 1;
01012         else
01013             rl->max_run[last]   = av_malloc(MAX_LEVEL + 1);
01014         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
01015         if (static_store)
01016             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
01017         else
01018             rl->index_run[last] = av_malloc(MAX_RUN + 1);
01019         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
01020     }
01021 }
01022 
01023 void init_vlc_rl(RLTable *rl)
01024 {
01025     int i, q;
01026 
01027     for (q = 0; q < 32; q++) {
01028         int qmul = q * 2;
01029         int qadd = (q - 1) | 1;
01030 
01031         if (q == 0) {
01032             qmul = 1;
01033             qadd = 0;
01034         }
01035         for (i = 0; i < rl->vlc.table_size; i++) {
01036             int code = rl->vlc.table[i][0];
01037             int len  = rl->vlc.table[i][1];
01038             int level, run;
01039 
01040             if (len == 0) { 
01041                 run   = 66;
01042                 level = MAX_LEVEL;
01043             } else if (len < 0) { 
01044                 run   = 0;
01045                 level = code;
01046             } else {
01047                 if (code == rl->n) { 
01048                     run   = 66;
01049                     level =  0;
01050                 } else {
01051                     run   = rl->table_run[code] + 1;
01052                     level = rl->table_level[code] * qmul + qadd;
01053                     if (code >= rl->last) run += 192;
01054                 }
01055             }
01056             rl->rl_vlc[q][i].len   = len;
01057             rl->rl_vlc[q][i].level = level;
01058             rl->rl_vlc[q][i].run   = run;
01059         }
01060     }
01061 }
01062 
01063 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
01064 {
01065     int i;
01066 
01067     
01068     for (i = 0; i < s->picture_count; i++) {
01069         if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
01070             (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
01071             (remove_current || &s->picture[i] !=  s->current_picture_ptr)
01072             ) {
01073             free_frame_buffer(s, &s->picture[i]);
01074         }
01075     }
01076 }
01077 
01078 int ff_find_unused_picture(MpegEncContext *s, int shared)
01079 {
01080     int i;
01081 
01082     if (shared) {
01083         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01084             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
01085                 return i;
01086         }
01087     } else {
01088         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01089             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
01090                 return i; 
01091         }
01092         for (i = s->picture_range_start; i < s->picture_range_end; i++) {
01093             if (s->picture[i].f.data[0] == NULL)
01094                 return i;
01095         }
01096     }
01097 
01098     av_log(s->avctx, AV_LOG_FATAL,
01099            "Internal error, picture buffer overflow\n");
01100     
01101 
01102 
01103 
01104 
01105 
01106 
01107 
01108 
01109 
01110 
01111     abort();
01112     return -1;
01113 }
01114 
01115 static void update_noise_reduction(MpegEncContext *s){
01116     int intra, i;
01117 
01118     for(intra=0; intra<2; intra++){
01119         if(s->dct_count[intra] > (1<<16)){
01120             for(i=0; i<64; i++){
01121                 s->dct_error_sum[intra][i] >>=1;
01122             }
01123             s->dct_count[intra] >>= 1;
01124         }
01125 
01126         for(i=0; i<64; i++){
01127             s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
01128         }
01129     }
01130 }
01131 
01135 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01136 {
01137     int i;
01138     Picture *pic;
01139     s->mb_skipped = 0;
01140 
01141     assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
01142 
01143     
01144     if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
01145       if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
01146           if (s->last_picture_ptr->owner2 == s)
01147               free_frame_buffer(s, s->last_picture_ptr);
01148 
01149         
01150         
01151         if(!s->encoding){
01152             for(i=0; i<s->picture_count; i++){
01153                 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
01154                     if (!(avctx->active_thread_type & FF_THREAD_FRAME))
01155                         av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
01156                     free_frame_buffer(s, &s->picture[i]);
01157                 }
01158             }
01159         }
01160       }
01161     }
01162 
01163     if(!s->encoding){
01164         ff_release_unused_pictures(s, 1);
01165 
01166         if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
01167             pic= s->current_picture_ptr; 
01168         else{
01169             i= ff_find_unused_picture(s, 0);
01170             pic= &s->picture[i];
01171         }
01172 
01173         pic->f.reference = 0;
01174         if (!s->dropable){
01175             if (s->codec_id == CODEC_ID_H264)
01176                 pic->f.reference = s->picture_structure;
01177             else if (s->pict_type != AV_PICTURE_TYPE_B)
01178                 pic->f.reference = 3;
01179         }
01180 
01181         pic->f.coded_picture_number = s->coded_picture_number++;
01182 
01183         if(ff_alloc_picture(s, pic, 0) < 0)
01184             return -1;
01185 
01186         s->current_picture_ptr= pic;
01187         
01188         s->current_picture_ptr->f.top_field_first = s->top_field_first;
01189         if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
01190             if(s->picture_structure != PICT_FRAME)
01191                 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01192         }
01193         s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
01194         s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
01195     }
01196 
01197     s->current_picture_ptr->f.pict_type = s->pict_type;
01198 
01199   
01200     s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
01201 
01202     ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01203 
01204     if (s->pict_type != AV_PICTURE_TYPE_B) {
01205         s->last_picture_ptr= s->next_picture_ptr;
01206         if(!s->dropable)
01207             s->next_picture_ptr= s->current_picture_ptr;
01208     }
01209 
01210 
01211 
01212 
01213 
01214 
01215     if(s->codec_id != CODEC_ID_H264){
01216         if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
01217            (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
01218             if (s->pict_type != AV_PICTURE_TYPE_I)
01219                 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
01220             else if (s->picture_structure != PICT_FRAME)
01221                 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
01222 
01223             
01224             i= ff_find_unused_picture(s, 0);
01225             s->last_picture_ptr= &s->picture[i];
01226             s->last_picture_ptr->f.key_frame = 0;
01227             if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
01228                 return -1;
01229 
01230             if(s->codec_id == CODEC_ID_FLV1 || s->codec_id == CODEC_ID_H263){
01231                 for(i=0; i<avctx->height; i++)
01232                     memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
01233             }
01234 
01235             ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
01236             ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
01237         }
01238         if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
01239             
01240             i= ff_find_unused_picture(s, 0);
01241             s->next_picture_ptr= &s->picture[i];
01242             s->next_picture_ptr->f.key_frame = 0;
01243             if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
01244                 return -1;
01245             ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
01246             ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
01247         }
01248     }
01249 
01250     if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01251     if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01252 
01253     assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
01254 
01255     if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
01256         int i;
01257         for(i=0; i<4; i++){
01258             if(s->picture_structure == PICT_BOTTOM_FIELD){
01259                  s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
01260             }
01261             s->current_picture.f.linesize[i] *= 2;
01262             s->last_picture.f.linesize[i]    *= 2;
01263             s->next_picture.f.linesize[i]    *= 2;
01264         }
01265     }
01266 
01267     s->error_recognition= avctx->error_recognition;
01268 
01269     
01270 
01271     if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
01272         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01273         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01274     }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
01275         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01276         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01277     }else{
01278         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01279         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01280     }
01281 
01282     if(s->dct_error_sum){
01283         assert(s->avctx->noise_reduction && s->encoding);
01284 
01285         update_noise_reduction(s);
01286     }
01287 
01288     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01289         return ff_xvmc_field_start(s, avctx);
01290 
01291     return 0;
01292 }
01293 
01294 
01295 void MPV_frame_end(MpegEncContext *s)
01296 {
01297     int i;
01298     
01299     
01300     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
01301         ff_xvmc_field_end(s);
01302    }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
01303        && !s->avctx->hwaccel
01304        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
01305        && s->unrestricted_mv
01306        && s->current_picture.f.reference
01307        && !s->intra_only
01308        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
01309             int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
01310             int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
01311             s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
01312                               s->h_edge_pos             , s->v_edge_pos,
01313                               EDGE_WIDTH        , EDGE_WIDTH        , EDGE_TOP | EDGE_BOTTOM);
01314             s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
01315                               s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
01316                               EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
01317             s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
01318                               s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
01319                               EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
01320     }
01321 
01322     emms_c();
01323 
01324     s->last_pict_type    = s->pict_type;
01325     s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
01326     if(s->pict_type!=AV_PICTURE_TYPE_B){
01327         s->last_non_b_pict_type= s->pict_type;
01328     }
01329 #if 0
01330         
01331     for(i=0; i<MAX_PICTURE_COUNT; i++){
01332         if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
01333             s->picture[i]= s->current_picture;
01334             break;
01335         }
01336     }
01337     assert(i<MAX_PICTURE_COUNT);
01338 #endif
01339 
01340     if(s->encoding){
01341         
01342         for(i=0; i<s->picture_count; i++){
01343             if (s->picture[i].f.data[0] && !s->picture[i].f.reference ) {
01344                 free_frame_buffer(s, &s->picture[i]);
01345             }
01346         }
01347     }
01348     
01349 #if 0
01350     memset(&s->last_picture, 0, sizeof(Picture));
01351     memset(&s->next_picture, 0, sizeof(Picture));
01352     memset(&s->current_picture, 0, sizeof(Picture));
01353 #endif
01354     s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
01355 
01356     if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
01357         ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
01358     }
01359 }
01360 
01368 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01369     int x, y, fr, f;
01370 
01371     sx= av_clip(sx, 0, w-1);
01372     sy= av_clip(sy, 0, h-1);
01373     ex= av_clip(ex, 0, w-1);
01374     ey= av_clip(ey, 0, h-1);
01375 
01376     buf[sy*stride + sx]+= color;
01377 
01378     if(FFABS(ex - sx) > FFABS(ey - sy)){
01379         if(sx > ex){
01380             FFSWAP(int, sx, ex);
01381             FFSWAP(int, sy, ey);
01382         }
01383         buf+= sx + sy*stride;
01384         ex-= sx;
01385         f= ((ey-sy)<<16)/ex;
01386         for(x= 0; x <= ex; x++){
01387             y = (x*f)>>16;
01388             fr= (x*f)&0xFFFF;
01389             buf[ y   *stride + x]+= (color*(0x10000-fr))>>16;
01390             buf[(y+1)*stride + x]+= (color*         fr )>>16;
01391         }
01392     }else{
01393         if(sy > ey){
01394             FFSWAP(int, sx, ex);
01395             FFSWAP(int, sy, ey);
01396         }
01397         buf+= sx + sy*stride;
01398         ey-= sy;
01399         if(ey) f= ((ex-sx)<<16)/ey;
01400         else   f= 0;
01401         for(y= 0; y <= ey; y++){
01402             x = (y*f)>>16;
01403             fr= (y*f)&0xFFFF;
01404             buf[y*stride + x  ]+= (color*(0x10000-fr))>>16;
01405             buf[y*stride + x+1]+= (color*         fr )>>16;
01406         }
01407     }
01408 }
01409 
01417 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01418     int dx,dy;
01419 
01420     sx= av_clip(sx, -100, w+100);
01421     sy= av_clip(sy, -100, h+100);
01422     ex= av_clip(ex, -100, w+100);
01423     ey= av_clip(ey, -100, h+100);
01424 
01425     dx= ex - sx;
01426     dy= ey - sy;
01427 
01428     if(dx*dx + dy*dy > 3*3){
01429         int rx=  dx + dy;
01430         int ry= -dx + dy;
01431         int length= ff_sqrt((rx*rx + ry*ry)<<8);
01432 
01433         
01434         rx= ROUNDED_DIV(rx*3<<4, length);
01435         ry= ROUNDED_DIV(ry*3<<4, length);
01436 
01437         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01438         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01439     }
01440     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01441 }
01442 
01446 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
01447 
01448     if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
01449 
01450     if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
01451         int x,y;
01452 
01453         av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
01454                av_get_picture_type_char(pict->pict_type));
01455         for(y=0; y<s->mb_height; y++){
01456             for(x=0; x<s->mb_width; x++){
01457                 if(s->avctx->debug&FF_DEBUG_SKIP){
01458                     int count= s->mbskip_table[x + y*s->mb_stride];
01459                     if(count>9) count=9;
01460                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01461                 }
01462                 if(s->avctx->debug&FF_DEBUG_QP){
01463                     av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
01464                 }
01465                 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
01466                     int mb_type= pict->mb_type[x + y*s->mb_stride];
01467                     
01468                     if(IS_PCM(mb_type))
01469                         av_log(s->avctx, AV_LOG_DEBUG, "P");
01470                     else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01471                         av_log(s->avctx, AV_LOG_DEBUG, "A");
01472                     else if(IS_INTRA4x4(mb_type))
01473                         av_log(s->avctx, AV_LOG_DEBUG, "i");
01474                     else if(IS_INTRA16x16(mb_type))
01475                         av_log(s->avctx, AV_LOG_DEBUG, "I");
01476                     else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01477                         av_log(s->avctx, AV_LOG_DEBUG, "d");
01478                     else if(IS_DIRECT(mb_type))
01479                         av_log(s->avctx, AV_LOG_DEBUG, "D");
01480                     else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
01481                         av_log(s->avctx, AV_LOG_DEBUG, "g");
01482                     else if(IS_GMC(mb_type))
01483                         av_log(s->avctx, AV_LOG_DEBUG, "G");
01484                     else if(IS_SKIP(mb_type))
01485                         av_log(s->avctx, AV_LOG_DEBUG, "S");
01486                     else if(!USES_LIST(mb_type, 1))
01487                         av_log(s->avctx, AV_LOG_DEBUG, ">");
01488                     else if(!USES_LIST(mb_type, 0))
01489                         av_log(s->avctx, AV_LOG_DEBUG, "<");
01490                     else{
01491                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01492                         av_log(s->avctx, AV_LOG_DEBUG, "X");
01493                     }
01494 
01495                     
01496                     if(IS_8X8(mb_type))
01497                         av_log(s->avctx, AV_LOG_DEBUG, "+");
01498                     else if(IS_16X8(mb_type))
01499                         av_log(s->avctx, AV_LOG_DEBUG, "-");
01500                     else if(IS_8X16(mb_type))
01501                         av_log(s->avctx, AV_LOG_DEBUG, "|");
01502                     else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
01503                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01504                     else
01505                         av_log(s->avctx, AV_LOG_DEBUG, "?");
01506 
01507 
01508                     if(IS_INTERLACED(mb_type))
01509                         av_log(s->avctx, AV_LOG_DEBUG, "=");
01510                     else
01511                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01512                 }
01513 
01514             }
01515             av_log(s->avctx, AV_LOG_DEBUG, "\n");
01516         }
01517     }
01518 
01519     if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
01520         s->avctx->debug_mv) {
01521         const int shift= 1 + s->quarter_sample;
01522         int mb_y;
01523         uint8_t *ptr;
01524         int i;
01525         int h_chroma_shift, v_chroma_shift, block_height;
01526         const int width = s->avctx->width;
01527         const int height= s->avctx->height;
01528         const int mv_sample_log2= 4 - pict->motion_subsample_log2;
01529         const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01530         s->low_delay=0; 
01531 
01532         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
01533         for(i=0; i<3; i++){
01534             memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
01535             pict->data[i]= s->visualization_buffer[i];
01536         }
01537         pict->type= FF_BUFFER_TYPE_COPY;
01538         pict->opaque= NULL;
01539         ptr= pict->data[0];
01540         block_height = 16>>v_chroma_shift;
01541 
01542         for(mb_y=0; mb_y<s->mb_height; mb_y++){
01543             int mb_x;
01544             for(mb_x=0; mb_x<s->mb_width; mb_x++){
01545                 const int mb_index= mb_x + mb_y*s->mb_stride;
01546                 if (s->avctx->debug_mv && pict->motion_val) {
01547                   int type;
01548                   for(type=0; type<3; type++){
01549                     int direction = 0;
01550                     switch (type) {
01551                       case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
01552                                 continue;
01553                               direction = 0;
01554                               break;
01555                       case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
01556                                 continue;
01557                               direction = 0;
01558                               break;
01559                       case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
01560                                 continue;
01561                               direction = 1;
01562                               break;
01563                     }
01564                     if(!USES_LIST(pict->mb_type[mb_index], direction))
01565                         continue;
01566 
01567                     if(IS_8X8(pict->mb_type[mb_index])){
01568                       int i;
01569                       for(i=0; i<4; i++){
01570                         int sx= mb_x*16 + 4 + 8*(i&1);
01571                         int sy= mb_y*16 + 4 + 8*(i>>1);
01572                         int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01573                         int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01574                         int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01575                         draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01576                       }
01577                     }else if(IS_16X8(pict->mb_type[mb_index])){
01578                       int i;
01579                       for(i=0; i<2; i++){
01580                         int sx=mb_x*16 + 8;
01581                         int sy=mb_y*16 + 4 + 8*i;
01582                         int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
01583                         int mx=(pict->motion_val[direction][xy][0]>>shift);
01584                         int my=(pict->motion_val[direction][xy][1]>>shift);
01585 
01586                         if(IS_INTERLACED(pict->mb_type[mb_index]))
01587                             my*=2;
01588 
01589                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01590                       }
01591                     }else if(IS_8X16(pict->mb_type[mb_index])){
01592                       int i;
01593                       for(i=0; i<2; i++){
01594                         int sx=mb_x*16 + 4 + 8*i;
01595                         int sy=mb_y*16 + 8;
01596                         int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
01597                         int mx=(pict->motion_val[direction][xy][0]>>shift);
01598                         int my=(pict->motion_val[direction][xy][1]>>shift);
01599 
01600                         if(IS_INTERLACED(pict->mb_type[mb_index]))
01601                             my*=2;
01602 
01603                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01604                       }
01605                     }else{
01606                       int sx= mb_x*16 + 8;
01607                       int sy= mb_y*16 + 8;
01608                       int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
01609                       int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01610                       int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01611                       draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01612                     }
01613                   }
01614                 }
01615                 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
01616                     uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
01617                     int y;
01618                     for(y=0; y<block_height; y++){
01619                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
01620                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
01621                     }
01622                 }
01623                 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
01624                     int mb_type= pict->mb_type[mb_index];
01625                     uint64_t u,v;
01626                     int y;
01627 #define COLOR(theta, r)\
01628 u= (int)(128 + r*cos(theta*3.141592/180));\
01629 v= (int)(128 + r*sin(theta*3.141592/180));
01630 
01631 
01632                     u=v=128;
01633                     if(IS_PCM(mb_type)){
01634                         COLOR(120,48)
01635                     }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
01636                         COLOR(30,48)
01637                     }else if(IS_INTRA4x4(mb_type)){
01638                         COLOR(90,48)
01639                     }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
01640 
01641                     }else if(IS_DIRECT(mb_type)){
01642                         COLOR(150,48)
01643                     }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
01644                         COLOR(170,48)
01645                     }else if(IS_GMC(mb_type)){
01646                         COLOR(190,48)
01647                     }else if(IS_SKIP(mb_type)){
01648 
01649                     }else if(!USES_LIST(mb_type, 1)){
01650                         COLOR(240,48)
01651                     }else if(!USES_LIST(mb_type, 0)){
01652                         COLOR(0,48)
01653                     }else{
01654                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01655                         COLOR(300,48)
01656                     }
01657 
01658                     u*= 0x0101010101010101ULL;
01659                     v*= 0x0101010101010101ULL;
01660                     for(y=0; y<block_height; y++){
01661                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
01662                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
01663                     }
01664 
01665                     
01666                     if(IS_8X8(mb_type) || IS_16X8(mb_type)){
01667                         *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01668                         *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01669                     }
01670                     if(IS_8X8(mb_type) || IS_8X16(mb_type)){
01671                         for(y=0; y<16; y++)
01672                             pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
01673                     }
01674                     if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
01675                         int dm= 1 << (mv_sample_log2-2);
01676                         for(i=0; i<4; i++){
01677                             int sx= mb_x*16 + 8*(i&1);
01678                             int sy= mb_y*16 + 8*(i>>1);
01679                             int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01680                             
01681                             int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
01682                             if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
01683                                 for(y=0; y<8; y++)
01684                                     pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
01685                             if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
01686                                 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
01687                         }
01688                     }
01689 
01690                     if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
01691                         
01692                     }
01693                 }
01694                 s->mbskip_table[mb_index]=0;
01695             }
01696         }
01697     }
01698 }
01699 
01700 static inline int hpel_motion_lowres(MpegEncContext *s,
01701                                   uint8_t *dest, uint8_t *src,
01702                                   int field_based, int field_select,
01703                                   int src_x, int src_y,
01704                                   int width, int height, int stride,
01705                                   int h_edge_pos, int v_edge_pos,
01706                                   int w, int h, h264_chroma_mc_func *pix_op,
01707                                   int motion_x, int motion_y)
01708 {
01709     const int lowres= s->avctx->lowres;
01710     const int op_index= FFMIN(lowres, 2);
01711     const int s_mask= (2<<lowres)-1;
01712     int emu=0;
01713     int sx, sy;
01714 
01715     if(s->quarter_sample){
01716         motion_x/=2;
01717         motion_y/=2;
01718     }
01719 
01720     sx= motion_x & s_mask;
01721     sy= motion_y & s_mask;
01722     src_x += motion_x >> (lowres+1);
01723     src_y += motion_y >> (lowres+1);
01724 
01725     src += src_y * stride + src_x;
01726 
01727     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - w
01728        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01729         s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
01730                             src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01731         src= s->edge_emu_buffer;
01732         emu=1;
01733     }
01734 
01735     sx= (sx << 2) >> lowres;
01736     sy= (sy << 2) >> lowres;
01737     if(field_select)
01738         src += s->linesize;
01739     pix_op[op_index](dest, src, stride, h, sx, sy);
01740     return emu;
01741 }
01742 
01743 
01744 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01745                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01746                                int field_based, int bottom_field, int field_select,
01747                                uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
01748                                int motion_x, int motion_y, int h, int mb_y)
01749 {
01750     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01751     int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
01752     const int lowres= s->avctx->lowres;
01753     const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
01754     const int block_s= 8>>lowres;
01755     const int s_mask= (2<<lowres)-1;
01756     const int h_edge_pos = s->h_edge_pos >> lowres;
01757     const int v_edge_pos = s->v_edge_pos >> lowres;
01758     linesize   = s->current_picture.f.linesize[0] << field_based;
01759     uvlinesize = s->current_picture.f.linesize[1] << field_based;
01760 
01761     if(s->quarter_sample){ 
01762         motion_x/=2;
01763         motion_y/=2;
01764     }
01765 
01766     if(field_based){
01767         motion_y += (bottom_field - field_select)*((1<<lowres)-1);
01768     }
01769 
01770     sx= motion_x & s_mask;
01771     sy= motion_y & s_mask;
01772     src_x = s->mb_x*2*block_s               + (motion_x >> (lowres+1));
01773     src_y =(   mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
01774 
01775     if (s->out_format == FMT_H263) {
01776         uvsx = ((motion_x>>1) & s_mask) | (sx&1);
01777         uvsy = ((motion_y>>1) & s_mask) | (sy&1);
01778         uvsrc_x = src_x>>1;
01779         uvsrc_y = src_y>>1;
01780     }else if(s->out_format == FMT_H261){
01781         mx = motion_x / 4;
01782         my = motion_y / 4;
01783         uvsx = (2*mx) & s_mask;
01784         uvsy = (2*my) & s_mask;
01785         uvsrc_x = s->mb_x*block_s               + (mx >> lowres);
01786         uvsrc_y =    mb_y*block_s               + (my >> lowres);
01787     } else {
01788         if(s->chroma_y_shift){
01789             mx = motion_x / 2;
01790             my = motion_y / 2;
01791             uvsx = mx & s_mask;
01792             uvsy = my & s_mask;
01793             uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
01794             uvsrc_y =(   mb_y*block_s>>field_based) + (my >> (lowres+1));
01795         } else {
01796             if(s->chroma_x_shift){
01797             
01798                 mx = motion_x / 2;
01799                 uvsx = mx & s_mask;
01800                 uvsy = motion_y & s_mask;
01801                 uvsrc_y = src_y;
01802                 uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
01803             } else {
01804             
01805                 uvsx = motion_x & s_mask;
01806                 uvsy = motion_y & s_mask;
01807                 uvsrc_x = src_x;
01808                 uvsrc_y = src_y;
01809             }
01810         }
01811     }
01812 
01813     ptr_y  = ref_picture[0] + src_y * linesize + src_x;
01814     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01815     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01816 
01817     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - 2*block_s
01818        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01819             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
01820                              src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01821             ptr_y = s->edge_emu_buffer;
01822             if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01823                 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
01824                 s->dsp.emulated_edge_mc(uvbuf  , ptr_cb, s->uvlinesize, 9, 9+field_based,
01825                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01826                 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
01827                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01828                 ptr_cb= uvbuf;
01829                 ptr_cr= uvbuf+16;
01830             }
01831     }
01832 
01833     if(bottom_field){ 
01834         dest_y += s->linesize;
01835         dest_cb+= s->uvlinesize;
01836         dest_cr+= s->uvlinesize;
01837     }
01838 
01839     if(field_select){
01840         ptr_y += s->linesize;
01841         ptr_cb+= s->uvlinesize;
01842         ptr_cr+= s->uvlinesize;
01843     }
01844 
01845     sx= (sx << 2) >> lowres;
01846     sy= (sy << 2) >> lowres;
01847     pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
01848 
01849     if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01850         uvsx= (uvsx << 2) >> lowres;
01851         uvsy= (uvsy << 2) >> lowres;
01852         if(h >> s->chroma_y_shift){
01853             pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01854             pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01855         }
01856     }
01857     
01858 }
01859 
01860 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01861                                      uint8_t *dest_cb, uint8_t *dest_cr,
01862                                      uint8_t **ref_picture,
01863                                      h264_chroma_mc_func *pix_op,
01864                                      int mx, int my){
01865     const int lowres= s->avctx->lowres;
01866     const int op_index= FFMIN(lowres, 2);
01867     const int block_s= 8>>lowres;
01868     const int s_mask= (2<<lowres)-1;
01869     const int h_edge_pos = s->h_edge_pos >> (lowres+1);
01870     const int v_edge_pos = s->v_edge_pos >> (lowres+1);
01871     int emu=0, src_x, src_y, offset, sx, sy;
01872     uint8_t *ptr;
01873 
01874     if(s->quarter_sample){
01875         mx/=2;
01876         my/=2;
01877     }
01878 
01879     
01880 
01881     mx= ff_h263_round_chroma(mx);
01882     my= ff_h263_round_chroma(my);
01883 
01884     sx= mx & s_mask;
01885     sy= my & s_mask;
01886     src_x = s->mb_x*block_s + (mx >> (lowres+1));
01887     src_y = s->mb_y*block_s + (my >> (lowres+1));
01888 
01889     offset = src_y * s->uvlinesize + src_x;
01890     ptr = ref_picture[1] + offset;
01891     if(s->flags&CODEC_FLAG_EMU_EDGE){
01892         if(   (unsigned)src_x > h_edge_pos - (!!sx) - block_s
01893            || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
01894             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01895             ptr= s->edge_emu_buffer;
01896             emu=1;
01897         }
01898     }
01899     sx= (sx << 2) >> lowres;
01900     sy= (sy << 2) >> lowres;
01901     pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
01902 
01903     ptr = ref_picture[2] + offset;
01904     if(emu){
01905         s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01906         ptr= s->edge_emu_buffer;
01907     }
01908     pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
01909 }
01910 
01922 static inline void MPV_motion_lowres(MpegEncContext *s,
01923                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01924                               int dir, uint8_t **ref_picture,
01925                               h264_chroma_mc_func *pix_op)
01926 {
01927     int mx, my;
01928     int mb_x, mb_y, i;
01929     const int lowres= s->avctx->lowres;
01930     const int block_s= 8>>lowres;
01931 
01932     mb_x = s->mb_x;
01933     mb_y = s->mb_y;
01934 
01935     switch(s->mv_type) {
01936     case MV_TYPE_16X16:
01937         mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01938                     0, 0, 0,
01939                     ref_picture, pix_op,
01940                     s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
01941         break;
01942     case MV_TYPE_8X8:
01943         mx = 0;
01944         my = 0;
01945             for(i=0;i<4;i++) {
01946                 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
01947                             ref_picture[0], 0, 0,
01948                             (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
01949                             s->width, s->height, s->linesize,
01950                             s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
01951                             block_s, block_s, pix_op,
01952                             s->mv[dir][i][0], s->mv[dir][i][1]);
01953 
01954                 mx += s->mv[dir][i][0];
01955                 my += s->mv[dir][i][1];
01956             }
01957 
01958         if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
01959             chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
01960         break;
01961     case MV_TYPE_FIELD:
01962         if (s->picture_structure == PICT_FRAME) {
01963             
01964             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01965                         1, 0, s->field_select[dir][0],
01966                         ref_picture, pix_op,
01967                         s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
01968             
01969             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01970                         1, 1, s->field_select[dir][1],
01971                         ref_picture, pix_op,
01972                         s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
01973         } else {
01974             if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
01975                 ref_picture = s->current_picture_ptr->f.data;
01976             }
01977 
01978             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01979                         0, 0, s->field_select[dir][0],
01980                         ref_picture, pix_op,
01981                         s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
01982         }
01983         break;
01984     case MV_TYPE_16X8:
01985         for(i=0; i<2; i++){
01986             uint8_t ** ref2picture;
01987 
01988             if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
01989                 ref2picture= ref_picture;
01990             }else{
01991                 ref2picture = s->current_picture_ptr->f.data;
01992             }
01993 
01994             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01995                         0, 0, s->field_select[dir][i],
01996                         ref2picture, pix_op,
01997                         s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
01998 
01999             dest_y += 2*block_s*s->linesize;
02000             dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
02001             dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
02002         }
02003         break;
02004     case MV_TYPE_DMV:
02005         if(s->picture_structure == PICT_FRAME){
02006             for(i=0; i<2; i++){
02007                 int j;
02008                 for(j=0; j<2; j++){
02009                     mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02010                                 1, j, j^i,
02011                                 ref_picture, pix_op,
02012                                 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
02013                 }
02014                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02015             }
02016         }else{
02017             for(i=0; i<2; i++){
02018                 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
02019                             0, 0, s->picture_structure != i+1,
02020                             ref_picture, pix_op,
02021                             s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
02022 
02023                 
02024                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
02025 
02026                 
02027                 if(!s->first_field){
02028                     ref_picture = s->current_picture_ptr->f.data;
02029                 }
02030             }
02031         }
02032     break;
02033     default: assert(0);
02034     }
02035 }
02036 
02040 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
02041 {
02042     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
02043     int my, off, i, mvs;
02044 
02045     if (s->picture_structure != PICT_FRAME) goto unhandled;
02046 
02047     switch (s->mv_type) {
02048         case MV_TYPE_16X16:
02049             mvs = 1;
02050             break;
02051         case MV_TYPE_16X8:
02052             mvs = 2;
02053             break;
02054         case MV_TYPE_8X8:
02055             mvs = 4;
02056             break;
02057         default:
02058             goto unhandled;
02059     }
02060 
02061     for (i = 0; i < mvs; i++) {
02062         my = s->mv[dir][i][1]<<qpel_shift;
02063         my_max = FFMAX(my_max, my);
02064         my_min = FFMIN(my_min, my);
02065     }
02066 
02067     off = (FFMAX(-my_min, my_max) + 63) >> 6;
02068 
02069     return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
02070 unhandled:
02071     return s->mb_height-1;
02072 }
02073 
02074 
02075 static inline void put_dct(MpegEncContext *s,
02076                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02077 {
02078     s->dct_unquantize_intra(s, block, i, qscale);
02079     s->dsp.idct_put (dest, line_size, block);
02080 }
02081 
02082 
02083 static inline void add_dct(MpegEncContext *s,
02084                            DCTELEM *block, int i, uint8_t *dest, int line_size)
02085 {
02086     if (s->block_last_index[i] >= 0) {
02087         s->dsp.idct_add (dest, line_size, block);
02088     }
02089 }
02090 
02091 static inline void add_dequant_dct(MpegEncContext *s,
02092                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
02093 {
02094     if (s->block_last_index[i] >= 0) {
02095         s->dct_unquantize_inter(s, block, i, qscale);
02096 
02097         s->dsp.idct_add (dest, line_size, block);
02098     }
02099 }
02100 
02104 void ff_clean_intra_table_entries(MpegEncContext *s)
02105 {
02106     int wrap = s->b8_stride;
02107     int xy = s->block_index[0];
02108 
02109     s->dc_val[0][xy           ] =
02110     s->dc_val[0][xy + 1       ] =
02111     s->dc_val[0][xy     + wrap] =
02112     s->dc_val[0][xy + 1 + wrap] = 1024;
02113     
02114     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
02115     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
02116     if (s->msmpeg4_version>=3) {
02117         s->coded_block[xy           ] =
02118         s->coded_block[xy + 1       ] =
02119         s->coded_block[xy     + wrap] =
02120         s->coded_block[xy + 1 + wrap] = 0;
02121     }
02122     
02123     wrap = s->mb_stride;
02124     xy = s->mb_x + s->mb_y * wrap;
02125     s->dc_val[1][xy] =
02126     s->dc_val[2][xy] = 1024;
02127     
02128     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
02129     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
02130 
02131     s->mbintra_table[xy]= 0;
02132 }
02133 
02134 
02135 
02136 
02137 
02138 
02139 
02140 
02141 
02142 
02143 
02144 static av_always_inline
02145 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02146                             int lowres_flag, int is_mpeg12)
02147 {
02148     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02149     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02150         ff_xvmc_decode_mb(s);
02151         return;
02152     }
02153 
02154     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02155        
02156        int i,j;
02157        DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
02158        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02159        for(i=0; i<6; i++){
02160            for(j=0; j<64; j++){
02161                *dct++ = block[i][s->dsp.idct_permutation[j]];
02162                av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02163            }
02164            av_log(s->avctx, AV_LOG_DEBUG, "\n");
02165        }
02166     }
02167 
02168     s->current_picture.f.qscale_table[mb_xy] = s->qscale;
02169 
02170     
02171     if (!s->mb_intra) {
02172         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02173             if(s->mbintra_table[mb_xy])
02174                 ff_clean_intra_table_entries(s);
02175         } else {
02176             s->last_dc[0] =
02177             s->last_dc[1] =
02178             s->last_dc[2] = 128 << s->intra_dc_precision;
02179         }
02180     }
02181     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02182         s->mbintra_table[mb_xy]=1;
02183 
02184     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { 
02185         uint8_t *dest_y, *dest_cb, *dest_cr;
02186         int dct_linesize, dct_offset;
02187         op_pixels_func (*op_pix)[4];
02188         qpel_mc_func (*op_qpix)[16];
02189         const int linesize   = s->current_picture.f.linesize[0]; 
02190         const int uvlinesize = s->current_picture.f.linesize[1];
02191         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02192         const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02193 
02194         
02195         
02196         if(!s->encoding){
02197             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02198             const int age = s->current_picture.f.age;
02199 
02200             assert(age);
02201 
02202             if (s->mb_skipped) {
02203                 s->mb_skipped= 0;
02204                 assert(s->pict_type!=AV_PICTURE_TYPE_I);
02205 
02206                 (*mbskip_ptr) ++; 
02207                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
02208 
02209                 
02210                 if (*mbskip_ptr >= age && s->current_picture.f.reference){
02211                     return;
02212                 }
02213             } else if(!s->current_picture.f.reference) {
02214                 (*mbskip_ptr) ++; 
02215                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
02216             } else{
02217                 *mbskip_ptr = 0; 
02218             }
02219         }
02220 
02221         dct_linesize = linesize << s->interlaced_dct;
02222         dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
02223 
02224         if(readable){
02225             dest_y=  s->dest[0];
02226             dest_cb= s->dest[1];
02227             dest_cr= s->dest[2];
02228         }else{
02229             dest_y = s->b_scratchpad;
02230             dest_cb= s->b_scratchpad+16*linesize;
02231             dest_cr= s->b_scratchpad+32*linesize;
02232         }
02233 
02234         if (!s->mb_intra) {
02235             
02236             
02237             if(!s->encoding){
02238 
02239                 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02240                     if (s->mv_dir & MV_DIR_FORWARD) {
02241                         ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
02242                     }
02243                     if (s->mv_dir & MV_DIR_BACKWARD) {
02244                         ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
02245                     }
02246                 }
02247 
02248                 if(lowres_flag){
02249                     h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02250 
02251                     if (s->mv_dir & MV_DIR_FORWARD) {
02252                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
02253                         op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02254                     }
02255                     if (s->mv_dir & MV_DIR_BACKWARD) {
02256                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
02257                     }
02258                 }else{
02259                     op_qpix= s->me.qpel_put;
02260                     if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02261                         op_pix = s->dsp.put_pixels_tab;
02262                     }else{
02263                         op_pix = s->dsp.put_no_rnd_pixels_tab;
02264                     }
02265                     if (s->mv_dir & MV_DIR_FORWARD) {
02266                         MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
02267                         op_pix = s->dsp.avg_pixels_tab;
02268                         op_qpix= s->me.qpel_avg;
02269                     }
02270                     if (s->mv_dir & MV_DIR_BACKWARD) {
02271                         MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
02272                     }
02273                 }
02274             }
02275 
02276             
02277             if(s->avctx->skip_idct){
02278                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02279                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02280                    || s->avctx->skip_idct >= AVDISCARD_ALL)
02281                     goto skip_idct;
02282             }
02283 
02284             
02285             if(s->encoding || !(   s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
02286                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
02287                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02288                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02289                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02290                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02291 
02292                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02293                     if (s->chroma_y_shift){
02294                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02295                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02296                     }else{
02297                         dct_linesize >>= 1;
02298                         dct_offset >>=1;
02299                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02300                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02301                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02302                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02303                     }
02304                 }
02305             } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
02306                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
02307                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
02308                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
02309                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02310 
02311                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02312                     if(s->chroma_y_shift){
02313                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
02314                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
02315                     }else{
02316                         
02317                         dct_linesize = uvlinesize << s->interlaced_dct;
02318                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
02319 
02320                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
02321                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
02322                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02323                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02324                         if(!s->chroma_x_shift){
02325                             add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
02326                             add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
02327                             add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
02328                             add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
02329                         }
02330                     }
02331                 }
02332             }
02333             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02334                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02335             }
02336         } else {
02337             
02338             if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02339                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02340                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02341                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02342                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02343 
02344                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02345                     if(s->chroma_y_shift){
02346                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02347                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02348                     }else{
02349                         dct_offset >>=1;
02350                         dct_linesize >>=1;
02351                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02352                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02353                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02354                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02355                     }
02356                 }
02357             }else{
02358                 s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
02359                 s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
02360                 s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
02361                 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02362 
02363                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02364                     if(s->chroma_y_shift){
02365                         s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02366                         s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02367                     }else{
02368 
02369                         dct_linesize = uvlinesize << s->interlaced_dct;
02370                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
02371 
02372                         s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
02373                         s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
02374                         s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02375                         s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02376                         if(!s->chroma_x_shift){
02377                             s->dsp.idct_put(dest_cb + block_size,              dct_linesize, block[8]);
02378                             s->dsp.idct_put(dest_cr + block_size,              dct_linesize, block[9]);
02379                             s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
02380                             s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
02381                         }
02382                     }
02383                 }
02384             }
02385         }
02386 skip_idct:
02387         if(!readable){
02388             s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
02389             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02390             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02391         }
02392     }
02393 }
02394 
02395 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02396 #if !CONFIG_SMALL
02397     if(s->out_format == FMT_MPEG1) {
02398         if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02399         else                 MPV_decode_mb_internal(s, block, 0, 1);
02400     } else
02401 #endif
02402     if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02403     else                  MPV_decode_mb_internal(s, block, 0, 0);
02404 }
02405 
02410 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02411     const int field_pic= s->picture_structure != PICT_FRAME;
02412     if(field_pic){
02413         h <<= 1;
02414         y <<= 1;
02415     }
02416 
02417     if (!s->avctx->hwaccel
02418        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02419        && s->unrestricted_mv
02420        && s->current_picture.f.reference
02421        && !s->intra_only
02422        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02423         int sides = 0, edge_h;
02424         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
02425         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
02426         if (y==0) sides |= EDGE_TOP;
02427         if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02428 
02429         edge_h= FFMIN(h, s->v_edge_pos - y);
02430 
02431         s->dsp.draw_edges(s->current_picture_ptr->f.data[0] +  y         *s->linesize,
02432                           s->linesize,           s->h_edge_pos,         edge_h,
02433                           EDGE_WIDTH,            EDGE_WIDTH,            sides);
02434         s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
02435                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
02436                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
02437         s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
02438                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
02439                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
02440     }
02441 
02442     h= FFMIN(h, s->avctx->height - y);
02443 
02444     if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02445 
02446     if (s->avctx->draw_horiz_band) {
02447         AVFrame *src;
02448         int offset[AV_NUM_DATA_POINTERS];
02449         int i;
02450 
02451         if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02452             src= (AVFrame*)s->current_picture_ptr;
02453         else if(s->last_picture_ptr)
02454             src= (AVFrame*)s->last_picture_ptr;
02455         else
02456             return;
02457 
02458         if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02459             for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
02460                 offset[i] = 0;
02461         }else{
02462             offset[0]= y * s->linesize;
02463             offset[1]=
02464             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02465             for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
02466                 offset[i] = 0;
02467         }
02468 
02469         emms_c();
02470 
02471         s->avctx->draw_horiz_band(s->avctx, src, offset,
02472                                   y, s->picture_structure, h);
02473     }
02474 }
02475 
02476 void ff_init_block_index(MpegEncContext *s){ 
02477     const int linesize   = s->current_picture.f.linesize[0]; 
02478     const int uvlinesize = s->current_picture.f.linesize[1];
02479     const int mb_size= 4 - s->avctx->lowres;
02480 
02481     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
02482     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
02483     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02484     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02485     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02486     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02487     
02488 
02489     s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) <<  mb_size);
02490     s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02491     s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02492 
02493     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02494     {
02495         if(s->picture_structure==PICT_FRAME){
02496         s->dest[0] += s->mb_y *   linesize << mb_size;
02497         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02498         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02499         }else{
02500             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
02501             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02502             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02503             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02504         }
02505     }
02506 }
02507 
02508 void ff_mpeg_flush(AVCodecContext *avctx){
02509     int i;
02510     MpegEncContext *s = avctx->priv_data;
02511 
02512     if(s==NULL || s->picture==NULL)
02513         return;
02514 
02515     for(i=0; i<s->picture_count; i++){
02516        if (s->picture[i].f.data[0] &&
02517            (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
02518             s->picture[i].f.type == FF_BUFFER_TYPE_USER))
02519         free_frame_buffer(s, &s->picture[i]);
02520     }
02521     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02522 
02523     s->mb_x= s->mb_y= 0;
02524     s->closed_gop= 0;
02525 
02526     s->parse_context.state= -1;
02527     s->parse_context.frame_start_found= 0;
02528     s->parse_context.overread= 0;
02529     s->parse_context.overread_index= 0;
02530     s->parse_context.index= 0;
02531     s->parse_context.last_index= 0;
02532     s->bitstream_buffer_size=0;
02533     s->pp_time=0;
02534 }
02535 
02536 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02537                                    DCTELEM *block, int n, int qscale)
02538 {
02539     int i, level, nCoeffs;
02540     const uint16_t *quant_matrix;
02541 
02542     nCoeffs= s->block_last_index[n];
02543 
02544     if (n < 4)
02545         block[0] = block[0] * s->y_dc_scale;
02546     else
02547         block[0] = block[0] * s->c_dc_scale;
02548     
02549     quant_matrix = s->intra_matrix;
02550     for(i=1;i<=nCoeffs;i++) {
02551         int j= s->intra_scantable.permutated[i];
02552         level = block[j];
02553         if (level) {
02554             if (level < 0) {
02555                 level = -level;
02556                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02557                 level = (level - 1) | 1;
02558                 level = -level;
02559             } else {
02560                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02561                 level = (level - 1) | 1;
02562             }
02563             block[j] = level;
02564         }
02565     }
02566 }
02567 
02568 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02569                                    DCTELEM *block, int n, int qscale)
02570 {
02571     int i, level, nCoeffs;
02572     const uint16_t *quant_matrix;
02573 
02574     nCoeffs= s->block_last_index[n];
02575 
02576     quant_matrix = s->inter_matrix;
02577     for(i=0; i<=nCoeffs; i++) {
02578         int j= s->intra_scantable.permutated[i];
02579         level = block[j];
02580         if (level) {
02581             if (level < 0) {
02582                 level = -level;
02583                 level = (((level << 1) + 1) * qscale *
02584                          ((int) (quant_matrix[j]))) >> 4;
02585                 level = (level - 1) | 1;
02586                 level = -level;
02587             } else {
02588                 level = (((level << 1) + 1) * qscale *
02589                          ((int) (quant_matrix[j]))) >> 4;
02590                 level = (level - 1) | 1;
02591             }
02592             block[j] = level;
02593         }
02594     }
02595 }
02596 
02597 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02598                                    DCTELEM *block, int n, int qscale)
02599 {
02600     int i, level, nCoeffs;
02601     const uint16_t *quant_matrix;
02602 
02603     if(s->alternate_scan) nCoeffs= 63;
02604     else nCoeffs= s->block_last_index[n];
02605 
02606     if (n < 4)
02607         block[0] = block[0] * s->y_dc_scale;
02608     else
02609         block[0] = block[0] * s->c_dc_scale;
02610     quant_matrix = s->intra_matrix;
02611     for(i=1;i<=nCoeffs;i++) {
02612         int j= s->intra_scantable.permutated[i];
02613         level = block[j];
02614         if (level) {
02615             if (level < 0) {
02616                 level = -level;
02617                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02618                 level = -level;
02619             } else {
02620                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02621             }
02622             block[j] = level;
02623         }
02624     }
02625 }
02626 
02627 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02628                                    DCTELEM *block, int n, int qscale)
02629 {
02630     int i, level, nCoeffs;
02631     const uint16_t *quant_matrix;
02632     int sum=-1;
02633 
02634     if(s->alternate_scan) nCoeffs= 63;
02635     else nCoeffs= s->block_last_index[n];
02636 
02637     if (n < 4)
02638         block[0] = block[0] * s->y_dc_scale;
02639     else
02640         block[0] = block[0] * s->c_dc_scale;
02641     quant_matrix = s->intra_matrix;
02642     for(i=1;i<=nCoeffs;i++) {
02643         int j= s->intra_scantable.permutated[i];
02644         level = block[j];
02645         if (level) {
02646             if (level < 0) {
02647                 level = -level;
02648                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02649                 level = -level;
02650             } else {
02651                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02652             }
02653             block[j] = level;
02654             sum+=level;
02655         }
02656     }
02657     block[63]^=sum&1;
02658 }
02659 
02660 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02661                                    DCTELEM *block, int n, int qscale)
02662 {
02663     int i, level, nCoeffs;
02664     const uint16_t *quant_matrix;
02665     int sum=-1;
02666 
02667     if(s->alternate_scan) nCoeffs= 63;
02668     else nCoeffs= s->block_last_index[n];
02669 
02670     quant_matrix = s->inter_matrix;
02671     for(i=0; i<=nCoeffs; i++) {
02672         int j= s->intra_scantable.permutated[i];
02673         level = block[j];
02674         if (level) {
02675             if (level < 0) {
02676                 level = -level;
02677                 level = (((level << 1) + 1) * qscale *
02678                          ((int) (quant_matrix[j]))) >> 4;
02679                 level = -level;
02680             } else {
02681                 level = (((level << 1) + 1) * qscale *
02682                          ((int) (quant_matrix[j]))) >> 4;
02683             }
02684             block[j] = level;
02685             sum+=level;
02686         }
02687     }
02688     block[63]^=sum&1;
02689 }
02690 
02691 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02692                                   DCTELEM *block, int n, int qscale)
02693 {
02694     int i, level, qmul, qadd;
02695     int nCoeffs;
02696 
02697     assert(s->block_last_index[n]>=0);
02698 
02699     qmul = qscale << 1;
02700 
02701     if (!s->h263_aic) {
02702         if (n < 4)
02703             block[0] = block[0] * s->y_dc_scale;
02704         else
02705             block[0] = block[0] * s->c_dc_scale;
02706         qadd = (qscale - 1) | 1;
02707     }else{
02708         qadd = 0;
02709     }
02710     if(s->ac_pred)
02711         nCoeffs=63;
02712     else
02713         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02714 
02715     for(i=1; i<=nCoeffs; i++) {
02716         level = block[i];
02717         if (level) {
02718             if (level < 0) {
02719                 level = level * qmul - qadd;
02720             } else {
02721                 level = level * qmul + qadd;
02722             }
02723             block[i] = level;
02724         }
02725     }
02726 }
02727 
02728 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02729                                   DCTELEM *block, int n, int qscale)
02730 {
02731     int i, level, qmul, qadd;
02732     int nCoeffs;
02733 
02734     assert(s->block_last_index[n]>=0);
02735 
02736     qadd = (qscale - 1) | 1;
02737     qmul = qscale << 1;
02738 
02739     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02740 
02741     for(i=0; i<=nCoeffs; i++) {
02742         level = block[i];
02743         if (level) {
02744             if (level < 0) {
02745                 level = level * qmul - qadd;
02746             } else {
02747                 level = level * qmul + qadd;
02748             }
02749             block[i] = level;
02750         }
02751     }
02752 }
02753 
02757 void ff_set_qscale(MpegEncContext * s, int qscale)
02758 {
02759     if (qscale < 1)
02760         qscale = 1;
02761     else if (qscale > 31)
02762         qscale = 31;
02763 
02764     s->qscale = qscale;
02765     s->chroma_qscale= s->chroma_qscale_table[qscale];
02766 
02767     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02768     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02769 }
02770 
02771 void MPV_report_decode_progress(MpegEncContext *s)
02772 {
02773     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
02774         ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
02775 }