00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "avcodec.h"
00031 #include "dsputil.h"
00032 #include "mpegvideo.h"
00033 #include "mpegvideo_common.h"
00034 #include "mjpegenc.h"
00035 #include "msmpeg4.h"
00036 #include "faandct.h"
00037 #include "xvmc_internal.h"
00038 #include <limits.h>
00039
00040
00041
00042
00043 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00044 DCTELEM *block, int n, int qscale);
00045 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00046 DCTELEM *block, int n, int qscale);
00047 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00048 DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00050 DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00052 DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00054 DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00056 DCTELEM *block, int n, int qscale);
00057
00058
00059
00060
00061
00062
00063
00064
00065 static const uint8_t ff_default_chroma_qscale_table[32]={
00066
00067 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
00068 };
00069
00070 const uint8_t ff_mpeg1_dc_scale_table[128]={
00071
00072 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00073 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00074 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00075 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00076 };
00077
00078 const enum PixelFormat ff_pixfmt_list_420[] = {
00079 PIX_FMT_YUV420P,
00080 PIX_FMT_NONE
00081 };
00082
00083 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00084 PIX_FMT_YUV420P,
00085 PIX_FMT_NONE
00086 };
00087
00088 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
00089 int i;
00090
00091 assert(p<=end);
00092 if(p>=end)
00093 return end;
00094
00095 for(i=0; i<3; i++){
00096 uint32_t tmp= *state << 8;
00097 *state= tmp + *(p++);
00098 if(tmp == 0x100 || p==end)
00099 return p;
00100 }
00101
00102 while(p<end){
00103 if (p[-1] > 1 ) p+= 3;
00104 else if(p[-2] ) p+= 2;
00105 else if(p[-3]|(p[-1]-1)) p++;
00106 else{
00107 p++;
00108 break;
00109 }
00110 }
00111
00112 p= FFMIN(p, end)-4;
00113 *state= AV_RB32(p);
00114
00115 return p+4;
00116 }
00117
00118
00119 av_cold int ff_dct_common_init(MpegEncContext *s)
00120 {
00121 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00122 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00123 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00124 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00125 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00126 if(s->flags & CODEC_FLAG_BITEXACT)
00127 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00128 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00129
00130 #if HAVE_MMX
00131 MPV_common_init_mmx(s);
00132 #elif ARCH_ALPHA
00133 MPV_common_init_axp(s);
00134 #elif CONFIG_MLIB
00135 MPV_common_init_mlib(s);
00136 #elif HAVE_MMI
00137 MPV_common_init_mmi(s);
00138 #elif ARCH_ARM
00139 MPV_common_init_arm(s);
00140 #elif HAVE_ALTIVEC
00141 MPV_common_init_altivec(s);
00142 #elif ARCH_BFIN
00143 MPV_common_init_bfin(s);
00144 #endif
00145
00146
00147
00148
00149 if(s->alternate_scan){
00150 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
00151 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
00152 }else{
00153 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
00154 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
00155 }
00156 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00157 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00158
00159 return 0;
00160 }
00161
00162 void ff_copy_picture(Picture *dst, Picture *src){
00163 *dst = *src;
00164 dst->type= FF_BUFFER_TYPE_COPY;
00165 }
00166
00171 int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
00172 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1;
00173 const int mb_array_size= s->mb_stride*s->mb_height;
00174 const int b8_array_size= s->b8_stride*s->mb_height*2;
00175 const int b4_array_size= s->b4_stride*s->mb_height*4;
00176 int i;
00177 int r= -1;
00178
00179 if(shared){
00180 assert(pic->data[0]);
00181 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
00182 pic->type= FF_BUFFER_TYPE_SHARED;
00183 }else{
00184 assert(!pic->data[0]);
00185
00186 r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
00187
00188 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
00189 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
00190 return -1;
00191 }
00192
00193 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
00194 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
00195 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
00196 return -1;
00197 }
00198
00199 if(pic->linesize[1] != pic->linesize[2]){
00200 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
00201 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
00202 return -1;
00203 }
00204
00205 s->linesize = pic->linesize[0];
00206 s->uvlinesize= pic->linesize[1];
00207 }
00208
00209 if(pic->qscale_table==NULL){
00210 if (s->encoding) {
00211 CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
00212 CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
00213 CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
00214 }
00215
00216 CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2)
00217 CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
00218 CHECKED_ALLOCZ(pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t))
00219 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
00220 if(s->out_format == FMT_H264){
00221 for(i=0; i<2; i++){
00222 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t))
00223 pic->motion_val[i]= pic->motion_val_base[i]+4;
00224 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
00225 }
00226 pic->motion_subsample_log2= 2;
00227 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
00228 for(i=0; i<2; i++){
00229 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t))
00230 pic->motion_val[i]= pic->motion_val_base[i]+4;
00231 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
00232 }
00233 pic->motion_subsample_log2= 3;
00234 }
00235 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00236 CHECKED_ALLOCZ(pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6)
00237 }
00238 pic->qstride= s->mb_stride;
00239 CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
00240 }
00241
00242
00243
00244 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
00245 s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
00246 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
00247 pic->age= INT_MAX;
00248
00249 return 0;
00250 fail:
00251 if(r>=0)
00252 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
00253 return -1;
00254 }
00255
00259 static void free_picture(MpegEncContext *s, Picture *pic){
00260 int i;
00261
00262 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
00263 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
00264 }
00265
00266 av_freep(&pic->mb_var);
00267 av_freep(&pic->mc_mb_var);
00268 av_freep(&pic->mb_mean);
00269 av_freep(&pic->mbskip_table);
00270 av_freep(&pic->qscale_table);
00271 av_freep(&pic->mb_type_base);
00272 av_freep(&pic->dct_coeff);
00273 av_freep(&pic->pan_scan);
00274 pic->mb_type= NULL;
00275 for(i=0; i<2; i++){
00276 av_freep(&pic->motion_val_base[i]);
00277 av_freep(&pic->ref_index[i]);
00278 }
00279
00280 if(pic->type == FF_BUFFER_TYPE_SHARED){
00281 for(i=0; i<4; i++){
00282 pic->base[i]=
00283 pic->data[i]= NULL;
00284 }
00285 pic->type= 0;
00286 }
00287 }
00288
00289 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
00290 int i;
00291
00292
00293 CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*21*2);
00294 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
00295
00296
00297 CHECKED_ALLOCZ(s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t))
00298 s->me.temp= s->me.scratchpad;
00299 s->rd_scratchpad= s->me.scratchpad;
00300 s->b_scratchpad= s->me.scratchpad;
00301 s->obmc_scratchpad= s->me.scratchpad + 16;
00302 if (s->encoding) {
00303 CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t))
00304 CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
00305 if(s->avctx->noise_reduction){
00306 CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int))
00307 }
00308 }
00309 CHECKED_ALLOCZ(s->blocks, 64*12*2 * sizeof(DCTELEM))
00310 s->block= s->blocks[0];
00311
00312 for(i=0;i<12;i++){
00313 s->pblocks[i] = &s->block[i];
00314 }
00315 return 0;
00316 fail:
00317 return -1;
00318 }
00319
00320 static void free_duplicate_context(MpegEncContext *s){
00321 if(s==NULL) return;
00322
00323 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
00324 av_freep(&s->me.scratchpad);
00325 s->me.temp=
00326 s->rd_scratchpad=
00327 s->b_scratchpad=
00328 s->obmc_scratchpad= NULL;
00329
00330 av_freep(&s->dct_error_sum);
00331 av_freep(&s->me.map);
00332 av_freep(&s->me.score_map);
00333 av_freep(&s->blocks);
00334 s->block= NULL;
00335 }
00336
00337 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
00338 #define COPY(a) bak->a= src->a
00339 COPY(allocated_edge_emu_buffer);
00340 COPY(edge_emu_buffer);
00341 COPY(me.scratchpad);
00342 COPY(me.temp);
00343 COPY(rd_scratchpad);
00344 COPY(b_scratchpad);
00345 COPY(obmc_scratchpad);
00346 COPY(me.map);
00347 COPY(me.score_map);
00348 COPY(blocks);
00349 COPY(block);
00350 COPY(start_mb_y);
00351 COPY(end_mb_y);
00352 COPY(me.map_generation);
00353 COPY(pb);
00354 COPY(dct_error_sum);
00355 COPY(dct_count[0]);
00356 COPY(dct_count[1]);
00357 #undef COPY
00358 }
00359
00360 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
00361 MpegEncContext bak;
00362 int i;
00363
00364
00365 backup_duplicate_context(&bak, dst);
00366 memcpy(dst, src, sizeof(MpegEncContext));
00367 backup_duplicate_context(dst, &bak);
00368 for(i=0;i<12;i++){
00369 dst->pblocks[i] = &dst->block[i];
00370 }
00371
00372 }
00373
00378 void MPV_common_defaults(MpegEncContext *s){
00379 s->y_dc_scale_table=
00380 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
00381 s->chroma_qscale_table= ff_default_chroma_qscale_table;
00382 s->progressive_frame= 1;
00383 s->progressive_sequence= 1;
00384 s->picture_structure= PICT_FRAME;
00385
00386 s->coded_picture_number = 0;
00387 s->picture_number = 0;
00388 s->input_picture_number = 0;
00389
00390 s->picture_in_gop_number = 0;
00391
00392 s->f_code = 1;
00393 s->b_code = 1;
00394 }
00395
00400 void MPV_decode_defaults(MpegEncContext *s){
00401 MPV_common_defaults(s);
00402 }
00403
00408 av_cold int MPV_common_init(MpegEncContext *s)
00409 {
00410 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
00411
00412 s->mb_height = (s->height + 15) / 16;
00413
00414 if(s->avctx->pix_fmt == PIX_FMT_NONE){
00415 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
00416 return -1;
00417 }
00418
00419 if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
00420 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
00421 return -1;
00422 }
00423
00424 if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
00425 return -1;
00426
00427 dsputil_init(&s->dsp, s->avctx);
00428 ff_dct_common_init(s);
00429
00430 s->flags= s->avctx->flags;
00431 s->flags2= s->avctx->flags2;
00432
00433 s->mb_width = (s->width + 15) / 16;
00434 s->mb_stride = s->mb_width + 1;
00435 s->b8_stride = s->mb_width*2 + 1;
00436 s->b4_stride = s->mb_width*4 + 1;
00437 mb_array_size= s->mb_height * s->mb_stride;
00438 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
00439
00440
00441 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
00442 &(s->chroma_y_shift) );
00443
00444
00445 s->h_edge_pos= s->mb_width*16;
00446 s->v_edge_pos= s->mb_height*16;
00447
00448 s->mb_num = s->mb_width * s->mb_height;
00449
00450 s->block_wrap[0]=
00451 s->block_wrap[1]=
00452 s->block_wrap[2]=
00453 s->block_wrap[3]= s->b8_stride;
00454 s->block_wrap[4]=
00455 s->block_wrap[5]= s->mb_stride;
00456
00457 y_size = s->b8_stride * (2 * s->mb_height + 1);
00458 c_size = s->mb_stride * (s->mb_height + 1);
00459 yc_size = y_size + 2 * c_size;
00460
00461
00462 s->codec_tag= toupper( s->avctx->codec_tag &0xFF)
00463 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
00464 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
00465 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
00466
00467 s->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
00468 + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
00469 + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
00470 + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
00471
00472 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
00473
00474 CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int))
00475 for(y=0; y<s->mb_height; y++){
00476 for(x=0; x<s->mb_width; x++){
00477 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
00478 }
00479 }
00480 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width;
00481
00482 if (s->encoding) {
00483
00484 CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
00485 CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
00486 CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
00487 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
00488 CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
00489 CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
00490 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
00491 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
00492 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
00493 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00494 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00495 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
00496
00497 if(s->msmpeg4_version){
00498 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
00499 }
00500 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
00501
00502
00503 CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint16_t))
00504
00505 CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
00506
00507 CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int))
00508 CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int))
00509 CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t))
00510 CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t))
00511 CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
00512 CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
00513
00514 if(s->avctx->noise_reduction){
00515 CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t))
00516 }
00517 }
00518 CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture))
00519
00520 CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
00521
00522 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
00523
00524 for(i=0; i<2; i++){
00525 int j, k;
00526 for(j=0; j<2; j++){
00527 for(k=0; k<2; k++){
00528 CHECKED_ALLOCZ(s->b_field_mv_table_base[i][j][k] , mv_table_size * 2 * sizeof(int16_t))
00529 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
00530 }
00531 CHECKED_ALLOCZ(s->b_field_select_table[i][j] , mb_array_size * 2 * sizeof(uint8_t))
00532 CHECKED_ALLOCZ(s->p_field_mv_table_base[i][j] , mv_table_size * 2 * sizeof(int16_t))
00533 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
00534 }
00535 CHECKED_ALLOCZ(s->p_field_select_table[i] , mb_array_size * 2 * sizeof(uint8_t))
00536 }
00537 }
00538 if (s->out_format == FMT_H263) {
00539
00540 CHECKED_ALLOCZ(s->ac_val_base, yc_size * sizeof(int16_t) * 16);
00541 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00542 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00543 s->ac_val[2] = s->ac_val[1] + c_size;
00544
00545
00546 CHECKED_ALLOCZ(s->coded_block_base, y_size);
00547 s->coded_block= s->coded_block_base + s->b8_stride + 1;
00548
00549
00550 CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
00551 CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
00552 }
00553
00554 if (s->h263_pred || s->h263_plus || !s->encoding) {
00555
00556
00557 CHECKED_ALLOCZ(s->dc_val_base, yc_size * sizeof(int16_t));
00558 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00559 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00560 s->dc_val[2] = s->dc_val[1] + c_size;
00561 for(i=0;i<yc_size;i++)
00562 s->dc_val_base[i] = 1024;
00563 }
00564
00565
00566 CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
00567 memset(s->mbintra_table, 1, mb_array_size);
00568
00569
00570 CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
00571
00572 CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
00573
00574 s->parse_context.state= -1;
00575 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
00576 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00577 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00578 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00579 }
00580
00581 s->context_initialized = 1;
00582
00583 s->thread_context[0]= s;
00584 threads = s->avctx->thread_count;
00585
00586 for(i=1; i<threads; i++){
00587 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
00588 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00589 }
00590
00591 for(i=0; i<threads; i++){
00592 if(init_duplicate_context(s->thread_context[i], s) < 0)
00593 goto fail;
00594 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
00595 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
00596 }
00597
00598 return 0;
00599 fail:
00600 MPV_common_end(s);
00601 return -1;
00602 }
00603
00604
00605 void MPV_common_end(MpegEncContext *s)
00606 {
00607 int i, j, k;
00608
00609 for(i=0; i<s->avctx->thread_count; i++){
00610 free_duplicate_context(s->thread_context[i]);
00611 }
00612 for(i=1; i<s->avctx->thread_count; i++){
00613 av_freep(&s->thread_context[i]);
00614 }
00615
00616 av_freep(&s->parse_context.buffer);
00617 s->parse_context.buffer_size=0;
00618
00619 av_freep(&s->mb_type);
00620 av_freep(&s->p_mv_table_base);
00621 av_freep(&s->b_forw_mv_table_base);
00622 av_freep(&s->b_back_mv_table_base);
00623 av_freep(&s->b_bidir_forw_mv_table_base);
00624 av_freep(&s->b_bidir_back_mv_table_base);
00625 av_freep(&s->b_direct_mv_table_base);
00626 s->p_mv_table= NULL;
00627 s->b_forw_mv_table= NULL;
00628 s->b_back_mv_table= NULL;
00629 s->b_bidir_forw_mv_table= NULL;
00630 s->b_bidir_back_mv_table= NULL;
00631 s->b_direct_mv_table= NULL;
00632 for(i=0; i<2; i++){
00633 for(j=0; j<2; j++){
00634 for(k=0; k<2; k++){
00635 av_freep(&s->b_field_mv_table_base[i][j][k]);
00636 s->b_field_mv_table[i][j][k]=NULL;
00637 }
00638 av_freep(&s->b_field_select_table[i][j]);
00639 av_freep(&s->p_field_mv_table_base[i][j]);
00640 s->p_field_mv_table[i][j]=NULL;
00641 }
00642 av_freep(&s->p_field_select_table[i]);
00643 }
00644
00645 av_freep(&s->dc_val_base);
00646 av_freep(&s->ac_val_base);
00647 av_freep(&s->coded_block_base);
00648 av_freep(&s->mbintra_table);
00649 av_freep(&s->cbp_table);
00650 av_freep(&s->pred_dir_table);
00651
00652 av_freep(&s->mbskip_table);
00653 av_freep(&s->prev_pict_types);
00654 av_freep(&s->bitstream_buffer);
00655 s->allocated_bitstream_buffer_size=0;
00656
00657 av_freep(&s->avctx->stats_out);
00658 av_freep(&s->ac_stats);
00659 av_freep(&s->error_status_table);
00660 av_freep(&s->mb_index2xy);
00661 av_freep(&s->lambda_table);
00662 av_freep(&s->q_intra_matrix);
00663 av_freep(&s->q_inter_matrix);
00664 av_freep(&s->q_intra_matrix16);
00665 av_freep(&s->q_inter_matrix16);
00666 av_freep(&s->input_picture);
00667 av_freep(&s->reordered_input_picture);
00668 av_freep(&s->dct_offset);
00669
00670 if(s->picture){
00671 for(i=0; i<MAX_PICTURE_COUNT; i++){
00672 free_picture(s, &s->picture[i]);
00673 }
00674 }
00675 av_freep(&s->picture);
00676 s->context_initialized = 0;
00677 s->last_picture_ptr=
00678 s->next_picture_ptr=
00679 s->current_picture_ptr= NULL;
00680 s->linesize= s->uvlinesize= 0;
00681
00682 for(i=0; i<3; i++)
00683 av_freep(&s->visualization_buffer[i]);
00684
00685 avcodec_default_free_buffers(s->avctx);
00686 }
00687
00688 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
00689 {
00690 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
00691 uint8_t index_run[MAX_RUN+1];
00692 int last, run, level, start, end, i;
00693
00694
00695 if(static_store && rl->max_level[0])
00696 return;
00697
00698
00699 for(last=0;last<2;last++) {
00700 if (last == 0) {
00701 start = 0;
00702 end = rl->last;
00703 } else {
00704 start = rl->last;
00705 end = rl->n;
00706 }
00707
00708 memset(max_level, 0, MAX_RUN + 1);
00709 memset(max_run, 0, MAX_LEVEL + 1);
00710 memset(index_run, rl->n, MAX_RUN + 1);
00711 for(i=start;i<end;i++) {
00712 run = rl->table_run[i];
00713 level = rl->table_level[i];
00714 if (index_run[run] == rl->n)
00715 index_run[run] = i;
00716 if (level > max_level[run])
00717 max_level[run] = level;
00718 if (run > max_run[level])
00719 max_run[level] = run;
00720 }
00721 if(static_store)
00722 rl->max_level[last] = static_store[last];
00723 else
00724 rl->max_level[last] = av_malloc(MAX_RUN + 1);
00725 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
00726 if(static_store)
00727 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
00728 else
00729 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
00730 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
00731 if(static_store)
00732 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
00733 else
00734 rl->index_run[last] = av_malloc(MAX_RUN + 1);
00735 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
00736 }
00737 }
00738
00739 void init_vlc_rl(RLTable *rl)
00740 {
00741 int i, q;
00742
00743 for(q=0; q<32; q++){
00744 int qmul= q*2;
00745 int qadd= (q-1)|1;
00746
00747 if(q==0){
00748 qmul=1;
00749 qadd=0;
00750 }
00751 for(i=0; i<rl->vlc.table_size; i++){
00752 int code= rl->vlc.table[i][0];
00753 int len = rl->vlc.table[i][1];
00754 int level, run;
00755
00756 if(len==0){
00757 run= 66;
00758 level= MAX_LEVEL;
00759 }else if(len<0){
00760 run= 0;
00761 level= code;
00762 }else{
00763 if(code==rl->n){
00764 run= 66;
00765 level= 0;
00766 }else{
00767 run= rl->table_run [code] + 1;
00768 level= rl->table_level[code] * qmul + qadd;
00769 if(code >= rl->last) run+=192;
00770 }
00771 }
00772 rl->rl_vlc[q][i].len= len;
00773 rl->rl_vlc[q][i].level= level;
00774 rl->rl_vlc[q][i].run= run;
00775 }
00776 }
00777 }
00778
00779 int ff_find_unused_picture(MpegEncContext *s, int shared){
00780 int i;
00781
00782 if(shared){
00783 for(i=0; i<MAX_PICTURE_COUNT; i++){
00784 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
00785 }
00786 }else{
00787 for(i=0; i<MAX_PICTURE_COUNT; i++){
00788 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i;
00789 }
00790 for(i=0; i<MAX_PICTURE_COUNT; i++){
00791 if(s->picture[i].data[0]==NULL) return i;
00792 }
00793 }
00794
00795 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
00796
00797
00798
00799
00800
00801
00802
00803
00804
00805
00806
00807 abort();
00808 return -1;
00809 }
00810
00811 static void update_noise_reduction(MpegEncContext *s){
00812 int intra, i;
00813
00814 for(intra=0; intra<2; intra++){
00815 if(s->dct_count[intra] > (1<<16)){
00816 for(i=0; i<64; i++){
00817 s->dct_error_sum[intra][i] >>=1;
00818 }
00819 s->dct_count[intra] >>= 1;
00820 }
00821
00822 for(i=0; i<64; i++){
00823 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
00824 }
00825 }
00826 }
00827
00831 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
00832 {
00833 int i;
00834 AVFrame *pic;
00835 s->mb_skipped = 0;
00836
00837 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
00838
00839
00840 if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
00841 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
00842 avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
00843
00844
00845
00846 if(!s->encoding){
00847 for(i=0; i<MAX_PICTURE_COUNT; i++){
00848 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
00849 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
00850 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
00851 }
00852 }
00853 }
00854 }
00855 }
00856 alloc:
00857 if(!s->encoding){
00858
00859 for(i=0; i<MAX_PICTURE_COUNT; i++){
00860 if(s->picture[i].data[0] && !s->picture[i].reference ){
00861 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
00862 }
00863 }
00864
00865 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
00866 pic= (AVFrame*)s->current_picture_ptr;
00867 else{
00868 i= ff_find_unused_picture(s, 0);
00869 pic= (AVFrame*)&s->picture[i];
00870 }
00871
00872 pic->reference= 0;
00873 if (!s->dropable){
00874 if (s->codec_id == CODEC_ID_H264)
00875 pic->reference = s->picture_structure;
00876 else if (s->pict_type != FF_B_TYPE)
00877 pic->reference = 3;
00878 }
00879
00880 pic->coded_picture_number= s->coded_picture_number++;
00881
00882 if( alloc_picture(s, (Picture*)pic, 0) < 0)
00883 return -1;
00884
00885 s->current_picture_ptr= (Picture*)pic;
00886 s->current_picture_ptr->top_field_first= s->top_field_first;
00887 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
00888 }
00889
00890 s->current_picture_ptr->pict_type= s->pict_type;
00891
00892
00893 s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
00894
00895 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
00896
00897 if (s->pict_type != FF_B_TYPE) {
00898 s->last_picture_ptr= s->next_picture_ptr;
00899 if(!s->dropable)
00900 s->next_picture_ptr= s->current_picture_ptr;
00901 }
00902
00903
00904
00905
00906
00907
00908 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
00909 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
00910
00911 if(s->pict_type != FF_I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && !s->dropable && s->codec_id != CODEC_ID_H264){
00912 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
00913 assert(s->pict_type != FF_B_TYPE);
00914 goto alloc;
00915 }
00916
00917 assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
00918
00919 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
00920 int i;
00921 for(i=0; i<4; i++){
00922 if(s->picture_structure == PICT_BOTTOM_FIELD){
00923 s->current_picture.data[i] += s->current_picture.linesize[i];
00924 }
00925 s->current_picture.linesize[i] *= 2;
00926 s->last_picture.linesize[i] *=2;
00927 s->next_picture.linesize[i] *=2;
00928 }
00929 }
00930
00931 s->hurry_up= s->avctx->hurry_up;
00932 s->error_recognition= avctx->error_recognition;
00933
00934
00935
00936 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
00937 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
00938 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
00939 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
00940 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
00941 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
00942 }else{
00943 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
00944 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
00945 }
00946
00947 if(s->dct_error_sum){
00948 assert(s->avctx->noise_reduction && s->encoding);
00949
00950 update_noise_reduction(s);
00951 }
00952
00953 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
00954 return ff_xvmc_field_start(s, avctx);
00955
00956 return 0;
00957 }
00958
00959
00960 void MPV_frame_end(MpegEncContext *s)
00961 {
00962 int i;
00963
00964
00965 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
00966 ff_xvmc_field_end(s);
00967 }else if(!s->avctx->hwaccel
00968 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
00969 && s->unrestricted_mv
00970 && s->current_picture.reference
00971 && !s->intra_only
00972 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
00973 s->dsp.draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
00974 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
00975 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
00976 }
00977 emms_c();
00978
00979 s->last_pict_type = s->pict_type;
00980 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
00981 if(s->pict_type!=FF_B_TYPE){
00982 s->last_non_b_pict_type= s->pict_type;
00983 }
00984 #if 0
00985
00986 for(i=0; i<MAX_PICTURE_COUNT; i++){
00987 if(s->picture[i].data[0] == s->current_picture.data[0]){
00988 s->picture[i]= s->current_picture;
00989 break;
00990 }
00991 }
00992 assert(i<MAX_PICTURE_COUNT);
00993 #endif
00994
00995 if(s->encoding){
00996
00997 for(i=0; i<MAX_PICTURE_COUNT; i++){
00998 if(s->picture[i].data[0] && !s->picture[i].reference ){
00999 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
01000 }
01001 }
01002 }
01003
01004 #if 0
01005 memset(&s->last_picture, 0, sizeof(Picture));
01006 memset(&s->next_picture, 0, sizeof(Picture));
01007 memset(&s->current_picture, 0, sizeof(Picture));
01008 #endif
01009 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
01010 }
01011
01019 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01020 int x, y, fr, f;
01021
01022 sx= av_clip(sx, 0, w-1);
01023 sy= av_clip(sy, 0, h-1);
01024 ex= av_clip(ex, 0, w-1);
01025 ey= av_clip(ey, 0, h-1);
01026
01027 buf[sy*stride + sx]+= color;
01028
01029 if(FFABS(ex - sx) > FFABS(ey - sy)){
01030 if(sx > ex){
01031 FFSWAP(int, sx, ex);
01032 FFSWAP(int, sy, ey);
01033 }
01034 buf+= sx + sy*stride;
01035 ex-= sx;
01036 f= ((ey-sy)<<16)/ex;
01037 for(x= 0; x <= ex; x++){
01038 y = (x*f)>>16;
01039 fr= (x*f)&0xFFFF;
01040 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
01041 buf[(y+1)*stride + x]+= (color* fr )>>16;
01042 }
01043 }else{
01044 if(sy > ey){
01045 FFSWAP(int, sx, ex);
01046 FFSWAP(int, sy, ey);
01047 }
01048 buf+= sx + sy*stride;
01049 ey-= sy;
01050 if(ey) f= ((ex-sx)<<16)/ey;
01051 else f= 0;
01052 for(y= 0; y <= ey; y++){
01053 x = (y*f)>>16;
01054 fr= (y*f)&0xFFFF;
01055 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
01056 buf[y*stride + x+1]+= (color* fr )>>16;
01057 }
01058 }
01059 }
01060
01068 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01069 int dx,dy;
01070
01071 sx= av_clip(sx, -100, w+100);
01072 sy= av_clip(sy, -100, h+100);
01073 ex= av_clip(ex, -100, w+100);
01074 ey= av_clip(ey, -100, h+100);
01075
01076 dx= ex - sx;
01077 dy= ey - sy;
01078
01079 if(dx*dx + dy*dy > 3*3){
01080 int rx= dx + dy;
01081 int ry= -dx + dy;
01082 int length= ff_sqrt((rx*rx + ry*ry)<<8);
01083
01084
01085 rx= ROUNDED_DIV(rx*3<<4, length);
01086 ry= ROUNDED_DIV(ry*3<<4, length);
01087
01088 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01089 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01090 }
01091 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01092 }
01093
01097 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
01098
01099 if(s->avctx->hwaccel) return;
01100 if(!pict || !pict->mb_type) return;
01101
01102 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
01103 int x,y;
01104
01105 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
01106 switch (pict->pict_type) {
01107 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
01108 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
01109 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
01110 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
01111 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
01112 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
01113 }
01114 for(y=0; y<s->mb_height; y++){
01115 for(x=0; x<s->mb_width; x++){
01116 if(s->avctx->debug&FF_DEBUG_SKIP){
01117 int count= s->mbskip_table[x + y*s->mb_stride];
01118 if(count>9) count=9;
01119 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01120 }
01121 if(s->avctx->debug&FF_DEBUG_QP){
01122 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
01123 }
01124 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
01125 int mb_type= pict->mb_type[x + y*s->mb_stride];
01126
01127 if(IS_PCM(mb_type))
01128 av_log(s->avctx, AV_LOG_DEBUG, "P");
01129 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01130 av_log(s->avctx, AV_LOG_DEBUG, "A");
01131 else if(IS_INTRA4x4(mb_type))
01132 av_log(s->avctx, AV_LOG_DEBUG, "i");
01133 else if(IS_INTRA16x16(mb_type))
01134 av_log(s->avctx, AV_LOG_DEBUG, "I");
01135 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01136 av_log(s->avctx, AV_LOG_DEBUG, "d");
01137 else if(IS_DIRECT(mb_type))
01138 av_log(s->avctx, AV_LOG_DEBUG, "D");
01139 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
01140 av_log(s->avctx, AV_LOG_DEBUG, "g");
01141 else if(IS_GMC(mb_type))
01142 av_log(s->avctx, AV_LOG_DEBUG, "G");
01143 else if(IS_SKIP(mb_type))
01144 av_log(s->avctx, AV_LOG_DEBUG, "S");
01145 else if(!USES_LIST(mb_type, 1))
01146 av_log(s->avctx, AV_LOG_DEBUG, ">");
01147 else if(!USES_LIST(mb_type, 0))
01148 av_log(s->avctx, AV_LOG_DEBUG, "<");
01149 else{
01150 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01151 av_log(s->avctx, AV_LOG_DEBUG, "X");
01152 }
01153
01154
01155 if(IS_8X8(mb_type))
01156 av_log(s->avctx, AV_LOG_DEBUG, "+");
01157 else if(IS_16X8(mb_type))
01158 av_log(s->avctx, AV_LOG_DEBUG, "-");
01159 else if(IS_8X16(mb_type))
01160 av_log(s->avctx, AV_LOG_DEBUG, "|");
01161 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
01162 av_log(s->avctx, AV_LOG_DEBUG, " ");
01163 else
01164 av_log(s->avctx, AV_LOG_DEBUG, "?");
01165
01166
01167 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
01168 av_log(s->avctx, AV_LOG_DEBUG, "=");
01169 else
01170 av_log(s->avctx, AV_LOG_DEBUG, " ");
01171 }
01172
01173 }
01174 av_log(s->avctx, AV_LOG_DEBUG, "\n");
01175 }
01176 }
01177
01178 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
01179 const int shift= 1 + s->quarter_sample;
01180 int mb_y;
01181 uint8_t *ptr;
01182 int i;
01183 int h_chroma_shift, v_chroma_shift, block_height;
01184 const int width = s->avctx->width;
01185 const int height= s->avctx->height;
01186 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
01187 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01188 s->low_delay=0;
01189
01190 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
01191 for(i=0; i<3; i++){
01192 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
01193 pict->data[i]= s->visualization_buffer[i];
01194 }
01195 pict->type= FF_BUFFER_TYPE_COPY;
01196 ptr= pict->data[0];
01197 block_height = 16>>v_chroma_shift;
01198
01199 for(mb_y=0; mb_y<s->mb_height; mb_y++){
01200 int mb_x;
01201 for(mb_x=0; mb_x<s->mb_width; mb_x++){
01202 const int mb_index= mb_x + mb_y*s->mb_stride;
01203 if((s->avctx->debug_mv) && pict->motion_val){
01204 int type;
01205 for(type=0; type<3; type++){
01206 int direction = 0;
01207 switch (type) {
01208 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
01209 continue;
01210 direction = 0;
01211 break;
01212 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
01213 continue;
01214 direction = 0;
01215 break;
01216 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
01217 continue;
01218 direction = 1;
01219 break;
01220 }
01221 if(!USES_LIST(pict->mb_type[mb_index], direction))
01222 continue;
01223
01224 if(IS_8X8(pict->mb_type[mb_index])){
01225 int i;
01226 for(i=0; i<4; i++){
01227 int sx= mb_x*16 + 4 + 8*(i&1);
01228 int sy= mb_y*16 + 4 + 8*(i>>1);
01229 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01230 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01231 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01232 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01233 }
01234 }else if(IS_16X8(pict->mb_type[mb_index])){
01235 int i;
01236 for(i=0; i<2; i++){
01237 int sx=mb_x*16 + 8;
01238 int sy=mb_y*16 + 4 + 8*i;
01239 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
01240 int mx=(pict->motion_val[direction][xy][0]>>shift);
01241 int my=(pict->motion_val[direction][xy][1]>>shift);
01242
01243 if(IS_INTERLACED(pict->mb_type[mb_index]))
01244 my*=2;
01245
01246 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01247 }
01248 }else if(IS_8X16(pict->mb_type[mb_index])){
01249 int i;
01250 for(i=0; i<2; i++){
01251 int sx=mb_x*16 + 4 + 8*i;
01252 int sy=mb_y*16 + 8;
01253 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
01254 int mx=(pict->motion_val[direction][xy][0]>>shift);
01255 int my=(pict->motion_val[direction][xy][1]>>shift);
01256
01257 if(IS_INTERLACED(pict->mb_type[mb_index]))
01258 my*=2;
01259
01260 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01261 }
01262 }else{
01263 int sx= mb_x*16 + 8;
01264 int sy= mb_y*16 + 8;
01265 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
01266 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01267 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01268 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01269 }
01270 }
01271 }
01272 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
01273 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
01274 int y;
01275 for(y=0; y<block_height; y++){
01276 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
01277 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
01278 }
01279 }
01280 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
01281 int mb_type= pict->mb_type[mb_index];
01282 uint64_t u,v;
01283 int y;
01284 #define COLOR(theta, r)\
01285 u= (int)(128 + r*cos(theta*3.141592/180));\
01286 v= (int)(128 + r*sin(theta*3.141592/180));
01287
01288
01289 u=v=128;
01290 if(IS_PCM(mb_type)){
01291 COLOR(120,48)
01292 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
01293 COLOR(30,48)
01294 }else if(IS_INTRA4x4(mb_type)){
01295 COLOR(90,48)
01296 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
01297
01298 }else if(IS_DIRECT(mb_type)){
01299 COLOR(150,48)
01300 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
01301 COLOR(170,48)
01302 }else if(IS_GMC(mb_type)){
01303 COLOR(190,48)
01304 }else if(IS_SKIP(mb_type)){
01305
01306 }else if(!USES_LIST(mb_type, 1)){
01307 COLOR(240,48)
01308 }else if(!USES_LIST(mb_type, 0)){
01309 COLOR(0,48)
01310 }else{
01311 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01312 COLOR(300,48)
01313 }
01314
01315 u*= 0x0101010101010101ULL;
01316 v*= 0x0101010101010101ULL;
01317 for(y=0; y<block_height; y++){
01318 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
01319 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
01320 }
01321
01322
01323 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
01324 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01325 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01326 }
01327 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
01328 for(y=0; y<16; y++)
01329 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
01330 }
01331 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
01332 int dm= 1 << (mv_sample_log2-2);
01333 for(i=0; i<4; i++){
01334 int sx= mb_x*16 + 8*(i&1);
01335 int sy= mb_y*16 + 8*(i>>1);
01336 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01337
01338 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
01339 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
01340 for(y=0; y<8; y++)
01341 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
01342 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
01343 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
01344 }
01345 }
01346
01347 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
01348
01349 }
01350 }
01351 s->mbskip_table[mb_index]=0;
01352 }
01353 }
01354 }
01355 }
01356
01357 static inline int hpel_motion_lowres(MpegEncContext *s,
01358 uint8_t *dest, uint8_t *src,
01359 int field_based, int field_select,
01360 int src_x, int src_y,
01361 int width, int height, int stride,
01362 int h_edge_pos, int v_edge_pos,
01363 int w, int h, h264_chroma_mc_func *pix_op,
01364 int motion_x, int motion_y)
01365 {
01366 const int lowres= s->avctx->lowres;
01367 const int s_mask= (2<<lowres)-1;
01368 int emu=0;
01369 int sx, sy;
01370
01371 if(s->quarter_sample){
01372 motion_x/=2;
01373 motion_y/=2;
01374 }
01375
01376 sx= motion_x & s_mask;
01377 sy= motion_y & s_mask;
01378 src_x += motion_x >> (lowres+1);
01379 src_y += motion_y >> (lowres+1);
01380
01381 src += src_y * stride + src_x;
01382
01383 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
01384 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01385 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
01386 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01387 src= s->edge_emu_buffer;
01388 emu=1;
01389 }
01390
01391 sx <<= 2 - lowres;
01392 sy <<= 2 - lowres;
01393 if(field_select)
01394 src += s->linesize;
01395 pix_op[lowres](dest, src, stride, h, sx, sy);
01396 return emu;
01397 }
01398
01399
01400 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01401 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01402 int field_based, int bottom_field, int field_select,
01403 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
01404 int motion_x, int motion_y, int h)
01405 {
01406 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01407 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
01408 const int lowres= s->avctx->lowres;
01409 const int block_s= 8>>lowres;
01410 const int s_mask= (2<<lowres)-1;
01411 const int h_edge_pos = s->h_edge_pos >> lowres;
01412 const int v_edge_pos = s->v_edge_pos >> lowres;
01413 linesize = s->current_picture.linesize[0] << field_based;
01414 uvlinesize = s->current_picture.linesize[1] << field_based;
01415
01416 if(s->quarter_sample){
01417 motion_x/=2;
01418 motion_y/=2;
01419 }
01420
01421 if(field_based){
01422 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
01423 }
01424
01425 sx= motion_x & s_mask;
01426 sy= motion_y & s_mask;
01427 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
01428 src_y =(s->mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
01429
01430 if (s->out_format == FMT_H263) {
01431 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
01432 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
01433 uvsrc_x = src_x>>1;
01434 uvsrc_y = src_y>>1;
01435 }else if(s->out_format == FMT_H261){
01436 mx = motion_x / 4;
01437 my = motion_y / 4;
01438 uvsx = (2*mx) & s_mask;
01439 uvsy = (2*my) & s_mask;
01440 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
01441 uvsrc_y = s->mb_y*block_s + (my >> lowres);
01442 } else {
01443 mx = motion_x / 2;
01444 my = motion_y / 2;
01445 uvsx = mx & s_mask;
01446 uvsy = my & s_mask;
01447 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
01448 uvsrc_y =(s->mb_y*block_s>>field_based) + (my >> (lowres+1));
01449 }
01450
01451 ptr_y = ref_picture[0] + src_y * linesize + src_x;
01452 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01453 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01454
01455 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
01456 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01457 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
01458 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01459 ptr_y = s->edge_emu_buffer;
01460 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01461 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
01462 ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
01463 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01464 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
01465 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01466 ptr_cb= uvbuf;
01467 ptr_cr= uvbuf+16;
01468 }
01469 }
01470
01471 if(bottom_field){
01472 dest_y += s->linesize;
01473 dest_cb+= s->uvlinesize;
01474 dest_cr+= s->uvlinesize;
01475 }
01476
01477 if(field_select){
01478 ptr_y += s->linesize;
01479 ptr_cb+= s->uvlinesize;
01480 ptr_cr+= s->uvlinesize;
01481 }
01482
01483 sx <<= 2 - lowres;
01484 sy <<= 2 - lowres;
01485 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
01486
01487 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01488 uvsx <<= 2 - lowres;
01489 uvsy <<= 2 - lowres;
01490 pix_op[lowres](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01491 pix_op[lowres](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01492 }
01493
01494 }
01495
01496 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01497 uint8_t *dest_cb, uint8_t *dest_cr,
01498 uint8_t **ref_picture,
01499 h264_chroma_mc_func *pix_op,
01500 int mx, int my){
01501 const int lowres= s->avctx->lowres;
01502 const int block_s= 8>>lowres;
01503 const int s_mask= (2<<lowres)-1;
01504 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
01505 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
01506 int emu=0, src_x, src_y, offset, sx, sy;
01507 uint8_t *ptr;
01508
01509 if(s->quarter_sample){
01510 mx/=2;
01511 my/=2;
01512 }
01513
01514
01515
01516 mx= ff_h263_round_chroma(mx);
01517 my= ff_h263_round_chroma(my);
01518
01519 sx= mx & s_mask;
01520 sy= my & s_mask;
01521 src_x = s->mb_x*block_s + (mx >> (lowres+1));
01522 src_y = s->mb_y*block_s + (my >> (lowres+1));
01523
01524 offset = src_y * s->uvlinesize + src_x;
01525 ptr = ref_picture[1] + offset;
01526 if(s->flags&CODEC_FLAG_EMU_EDGE){
01527 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
01528 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
01529 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01530 ptr= s->edge_emu_buffer;
01531 emu=1;
01532 }
01533 }
01534 sx <<= 2 - lowres;
01535 sy <<= 2 - lowres;
01536 pix_op[lowres](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
01537
01538 ptr = ref_picture[2] + offset;
01539 if(emu){
01540 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01541 ptr= s->edge_emu_buffer;
01542 }
01543 pix_op[lowres](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
01544 }
01545
01557 static inline void MPV_motion_lowres(MpegEncContext *s,
01558 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01559 int dir, uint8_t **ref_picture,
01560 h264_chroma_mc_func *pix_op)
01561 {
01562 int mx, my;
01563 int mb_x, mb_y, i;
01564 const int lowres= s->avctx->lowres;
01565 const int block_s= 8>>lowres;
01566
01567 mb_x = s->mb_x;
01568 mb_y = s->mb_y;
01569
01570 switch(s->mv_type) {
01571 case MV_TYPE_16X16:
01572 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01573 0, 0, 0,
01574 ref_picture, pix_op,
01575 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s);
01576 break;
01577 case MV_TYPE_8X8:
01578 mx = 0;
01579 my = 0;
01580 for(i=0;i<4;i++) {
01581 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
01582 ref_picture[0], 0, 0,
01583 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
01584 s->width, s->height, s->linesize,
01585 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
01586 block_s, block_s, pix_op,
01587 s->mv[dir][i][0], s->mv[dir][i][1]);
01588
01589 mx += s->mv[dir][i][0];
01590 my += s->mv[dir][i][1];
01591 }
01592
01593 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
01594 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
01595 break;
01596 case MV_TYPE_FIELD:
01597 if (s->picture_structure == PICT_FRAME) {
01598
01599 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01600 1, 0, s->field_select[dir][0],
01601 ref_picture, pix_op,
01602 s->mv[dir][0][0], s->mv[dir][0][1], block_s);
01603
01604 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01605 1, 1, s->field_select[dir][1],
01606 ref_picture, pix_op,
01607 s->mv[dir][1][0], s->mv[dir][1][1], block_s);
01608 } else {
01609 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
01610 ref_picture= s->current_picture_ptr->data;
01611 }
01612
01613 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01614 0, 0, s->field_select[dir][0],
01615 ref_picture, pix_op,
01616 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s);
01617 }
01618 break;
01619 case MV_TYPE_16X8:
01620 for(i=0; i<2; i++){
01621 uint8_t ** ref2picture;
01622
01623 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
01624 ref2picture= ref_picture;
01625 }else{
01626 ref2picture= s->current_picture_ptr->data;
01627 }
01628
01629 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01630 0, 0, s->field_select[dir][i],
01631 ref2picture, pix_op,
01632 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s);
01633
01634 dest_y += 2*block_s*s->linesize;
01635 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01636 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01637 }
01638 break;
01639 case MV_TYPE_DMV:
01640 if(s->picture_structure == PICT_FRAME){
01641 for(i=0; i<2; i++){
01642 int j;
01643 for(j=0; j<2; j++){
01644 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01645 1, j, j^i,
01646 ref_picture, pix_op,
01647 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s);
01648 }
01649 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01650 }
01651 }else{
01652 for(i=0; i<2; i++){
01653 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01654 0, 0, s->picture_structure != i+1,
01655 ref_picture, pix_op,
01656 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s);
01657
01658
01659 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01660
01661
01662 if(!s->first_field){
01663 ref_picture = s->current_picture_ptr->data;
01664 }
01665 }
01666 }
01667 break;
01668 default: assert(0);
01669 }
01670 }
01671
01672
01673 static inline void put_dct(MpegEncContext *s,
01674 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01675 {
01676 s->dct_unquantize_intra(s, block, i, qscale);
01677 s->dsp.idct_put (dest, line_size, block);
01678 }
01679
01680
01681 static inline void add_dct(MpegEncContext *s,
01682 DCTELEM *block, int i, uint8_t *dest, int line_size)
01683 {
01684 if (s->block_last_index[i] >= 0) {
01685 s->dsp.idct_add (dest, line_size, block);
01686 }
01687 }
01688
01689 static inline void add_dequant_dct(MpegEncContext *s,
01690 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01691 {
01692 if (s->block_last_index[i] >= 0) {
01693 s->dct_unquantize_inter(s, block, i, qscale);
01694
01695 s->dsp.idct_add (dest, line_size, block);
01696 }
01697 }
01698
01702 void ff_clean_intra_table_entries(MpegEncContext *s)
01703 {
01704 int wrap = s->b8_stride;
01705 int xy = s->block_index[0];
01706
01707 s->dc_val[0][xy ] =
01708 s->dc_val[0][xy + 1 ] =
01709 s->dc_val[0][xy + wrap] =
01710 s->dc_val[0][xy + 1 + wrap] = 1024;
01711
01712 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
01713 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
01714 if (s->msmpeg4_version>=3) {
01715 s->coded_block[xy ] =
01716 s->coded_block[xy + 1 ] =
01717 s->coded_block[xy + wrap] =
01718 s->coded_block[xy + 1 + wrap] = 0;
01719 }
01720
01721 wrap = s->mb_stride;
01722 xy = s->mb_x + s->mb_y * wrap;
01723 s->dc_val[1][xy] =
01724 s->dc_val[2][xy] = 1024;
01725
01726 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
01727 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
01728
01729 s->mbintra_table[xy]= 0;
01730 }
01731
01732
01733
01734
01735
01736
01737
01738
01739
01740
01741
01742 static av_always_inline
01743 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
01744 int lowres_flag, int is_mpeg12)
01745 {
01746 int mb_x, mb_y;
01747 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
01748 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
01749 ff_xvmc_decode_mb(s);
01750 return;
01751 }
01752
01753 mb_x = s->mb_x;
01754 mb_y = s->mb_y;
01755
01756 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
01757
01758 int i,j;
01759 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
01760 for(i=0; i<6; i++)
01761 for(j=0; j<64; j++)
01762 *dct++ = block[i][s->dsp.idct_permutation[j]];
01763 }
01764
01765 s->current_picture.qscale_table[mb_xy]= s->qscale;
01766
01767
01768 if (!s->mb_intra) {
01769 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
01770 if(s->mbintra_table[mb_xy])
01771 ff_clean_intra_table_entries(s);
01772 } else {
01773 s->last_dc[0] =
01774 s->last_dc[1] =
01775 s->last_dc[2] = 128 << s->intra_dc_precision;
01776 }
01777 }
01778 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
01779 s->mbintra_table[mb_xy]=1;
01780
01781 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) {
01782 uint8_t *dest_y, *dest_cb, *dest_cr;
01783 int dct_linesize, dct_offset;
01784 op_pixels_func (*op_pix)[4];
01785 qpel_mc_func (*op_qpix)[16];
01786 const int linesize= s->current_picture.linesize[0];
01787 const int uvlinesize= s->current_picture.linesize[1];
01788 const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
01789 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
01790
01791
01792
01793 if(!s->encoding){
01794 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
01795 const int age= s->current_picture.age;
01796
01797 assert(age);
01798
01799 if (s->mb_skipped) {
01800 s->mb_skipped= 0;
01801 assert(s->pict_type!=FF_I_TYPE);
01802
01803 (*mbskip_ptr) ++;
01804 if(*mbskip_ptr >99) *mbskip_ptr= 99;
01805
01806
01807 if (*mbskip_ptr >= age && s->current_picture.reference){
01808 return;
01809 }
01810 } else if(!s->current_picture.reference){
01811 (*mbskip_ptr) ++;
01812 if(*mbskip_ptr >99) *mbskip_ptr= 99;
01813 } else{
01814 *mbskip_ptr = 0;
01815 }
01816 }
01817
01818 dct_linesize = linesize << s->interlaced_dct;
01819 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
01820
01821 if(readable){
01822 dest_y= s->dest[0];
01823 dest_cb= s->dest[1];
01824 dest_cr= s->dest[2];
01825 }else{
01826 dest_y = s->b_scratchpad;
01827 dest_cb= s->b_scratchpad+16*linesize;
01828 dest_cr= s->b_scratchpad+32*linesize;
01829 }
01830
01831 if (!s->mb_intra) {
01832
01833
01834 if(!s->encoding){
01835 if(lowres_flag){
01836 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
01837
01838 if (s->mv_dir & MV_DIR_FORWARD) {
01839 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
01840 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
01841 }
01842 if (s->mv_dir & MV_DIR_BACKWARD) {
01843 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
01844 }
01845 }else{
01846 op_qpix= s->me.qpel_put;
01847 if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
01848 op_pix = s->dsp.put_pixels_tab;
01849 }else{
01850 op_pix = s->dsp.put_no_rnd_pixels_tab;
01851 }
01852 if (s->mv_dir & MV_DIR_FORWARD) {
01853 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
01854 op_pix = s->dsp.avg_pixels_tab;
01855 op_qpix= s->me.qpel_avg;
01856 }
01857 if (s->mv_dir & MV_DIR_BACKWARD) {
01858 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
01859 }
01860 }
01861 }
01862
01863
01864 if(s->hurry_up>1) goto skip_idct;
01865 if(s->avctx->skip_idct){
01866 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
01867 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
01868 || s->avctx->skip_idct >= AVDISCARD_ALL)
01869 goto skip_idct;
01870 }
01871
01872
01873 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
01874 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
01875 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
01876 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
01877 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
01878 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
01879
01880 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01881 if (s->chroma_y_shift){
01882 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
01883 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
01884 }else{
01885 dct_linesize >>= 1;
01886 dct_offset >>=1;
01887 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
01888 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
01889 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
01890 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
01891 }
01892 }
01893 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
01894 add_dct(s, block[0], 0, dest_y , dct_linesize);
01895 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
01896 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
01897 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
01898
01899 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01900 if(s->chroma_y_shift){
01901 add_dct(s, block[4], 4, dest_cb, uvlinesize);
01902 add_dct(s, block[5], 5, dest_cr, uvlinesize);
01903 }else{
01904
01905 dct_linesize = uvlinesize << s->interlaced_dct;
01906 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
01907
01908 add_dct(s, block[4], 4, dest_cb, dct_linesize);
01909 add_dct(s, block[5], 5, dest_cr, dct_linesize);
01910 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
01911 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
01912 if(!s->chroma_x_shift){
01913 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
01914 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
01915 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
01916 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
01917 }
01918 }
01919 }
01920 }
01921 else if (CONFIG_WMV2) {
01922 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
01923 }
01924 } else {
01925
01926 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
01927 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
01928 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
01929 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
01930 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
01931
01932 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01933 if(s->chroma_y_shift){
01934 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
01935 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
01936 }else{
01937 dct_offset >>=1;
01938 dct_linesize >>=1;
01939 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
01940 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
01941 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
01942 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
01943 }
01944 }
01945 }else{
01946 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
01947 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
01948 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
01949 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
01950
01951 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01952 if(s->chroma_y_shift){
01953 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
01954 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
01955 }else{
01956
01957 dct_linesize = uvlinesize << s->interlaced_dct;
01958 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
01959
01960 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
01961 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
01962 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
01963 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
01964 if(!s->chroma_x_shift){
01965 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
01966 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
01967 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
01968 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
01969 }
01970 }
01971 }
01972 }
01973 }
01974 skip_idct:
01975 if(!readable){
01976 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
01977 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
01978 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
01979 }
01980 }
01981 }
01982
01983 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
01984 #if !CONFIG_SMALL
01985 if(s->out_format == FMT_MPEG1) {
01986 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
01987 else MPV_decode_mb_internal(s, block, 0, 1);
01988 } else
01989 #endif
01990 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
01991 else MPV_decode_mb_internal(s, block, 0, 0);
01992 }
01993
01998 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
01999 if (s->avctx->draw_horiz_band) {
02000 AVFrame *src;
02001 int offset[4];
02002
02003 if(s->picture_structure != PICT_FRAME){
02004 h <<= 1;
02005 y <<= 1;
02006 if(s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02007 }
02008
02009 h= FFMIN(h, s->avctx->height - y);
02010
02011 if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02012 src= (AVFrame*)s->current_picture_ptr;
02013 else if(s->last_picture_ptr)
02014 src= (AVFrame*)s->last_picture_ptr;
02015 else
02016 return;
02017
02018 if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02019 offset[0]=
02020 offset[1]=
02021 offset[2]=
02022 offset[3]= 0;
02023 }else{
02024 offset[0]= y * s->linesize;
02025 offset[1]=
02026 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02027 offset[3]= 0;
02028 }
02029
02030 emms_c();
02031
02032 s->avctx->draw_horiz_band(s->avctx, src, offset,
02033 y, s->picture_structure, h);
02034 }
02035 }
02036
02037 void ff_init_block_index(MpegEncContext *s){
02038 const int linesize= s->current_picture.linesize[0];
02039 const int uvlinesize= s->current_picture.linesize[1];
02040 const int mb_size= 4 - s->avctx->lowres;
02041
02042 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
02043 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
02044 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02045 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02046 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02047 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02048
02049
02050 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
02051 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02052 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02053
02054 if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02055 {
02056 s->dest[0] += s->mb_y * linesize << mb_size;
02057 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02058 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02059 }
02060 }
02061
02062 void ff_mpeg_flush(AVCodecContext *avctx){
02063 int i;
02064 MpegEncContext *s = avctx->priv_data;
02065
02066 if(s==NULL || s->picture==NULL)
02067 return;
02068
02069 for(i=0; i<MAX_PICTURE_COUNT; i++){
02070 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
02071 || s->picture[i].type == FF_BUFFER_TYPE_USER))
02072 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
02073 }
02074 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02075
02076 s->mb_x= s->mb_y= 0;
02077
02078 s->parse_context.state= -1;
02079 s->parse_context.frame_start_found= 0;
02080 s->parse_context.overread= 0;
02081 s->parse_context.overread_index= 0;
02082 s->parse_context.index= 0;
02083 s->parse_context.last_index= 0;
02084 s->bitstream_buffer_size=0;
02085 s->pp_time=0;
02086 }
02087
02088 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02089 DCTELEM *block, int n, int qscale)
02090 {
02091 int i, level, nCoeffs;
02092 const uint16_t *quant_matrix;
02093
02094 nCoeffs= s->block_last_index[n];
02095
02096 if (n < 4)
02097 block[0] = block[0] * s->y_dc_scale;
02098 else
02099 block[0] = block[0] * s->c_dc_scale;
02100
02101 quant_matrix = s->intra_matrix;
02102 for(i=1;i<=nCoeffs;i++) {
02103 int j= s->intra_scantable.permutated[i];
02104 level = block[j];
02105 if (level) {
02106 if (level < 0) {
02107 level = -level;
02108 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02109 level = (level - 1) | 1;
02110 level = -level;
02111 } else {
02112 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02113 level = (level - 1) | 1;
02114 }
02115 block[j] = level;
02116 }
02117 }
02118 }
02119
02120 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02121 DCTELEM *block, int n, int qscale)
02122 {
02123 int i, level, nCoeffs;
02124 const uint16_t *quant_matrix;
02125
02126 nCoeffs= s->block_last_index[n];
02127
02128 quant_matrix = s->inter_matrix;
02129 for(i=0; i<=nCoeffs; i++) {
02130 int j= s->intra_scantable.permutated[i];
02131 level = block[j];
02132 if (level) {
02133 if (level < 0) {
02134 level = -level;
02135 level = (((level << 1) + 1) * qscale *
02136 ((int) (quant_matrix[j]))) >> 4;
02137 level = (level - 1) | 1;
02138 level = -level;
02139 } else {
02140 level = (((level << 1) + 1) * qscale *
02141 ((int) (quant_matrix[j]))) >> 4;
02142 level = (level - 1) | 1;
02143 }
02144 block[j] = level;
02145 }
02146 }
02147 }
02148
02149 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02150 DCTELEM *block, int n, int qscale)
02151 {
02152 int i, level, nCoeffs;
02153 const uint16_t *quant_matrix;
02154
02155 if(s->alternate_scan) nCoeffs= 63;
02156 else nCoeffs= s->block_last_index[n];
02157
02158 if (n < 4)
02159 block[0] = block[0] * s->y_dc_scale;
02160 else
02161 block[0] = block[0] * s->c_dc_scale;
02162 quant_matrix = s->intra_matrix;
02163 for(i=1;i<=nCoeffs;i++) {
02164 int j= s->intra_scantable.permutated[i];
02165 level = block[j];
02166 if (level) {
02167 if (level < 0) {
02168 level = -level;
02169 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02170 level = -level;
02171 } else {
02172 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02173 }
02174 block[j] = level;
02175 }
02176 }
02177 }
02178
02179 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02180 DCTELEM *block, int n, int qscale)
02181 {
02182 int i, level, nCoeffs;
02183 const uint16_t *quant_matrix;
02184 int sum=-1;
02185
02186 if(s->alternate_scan) nCoeffs= 63;
02187 else nCoeffs= s->block_last_index[n];
02188
02189 if (n < 4)
02190 block[0] = block[0] * s->y_dc_scale;
02191 else
02192 block[0] = block[0] * s->c_dc_scale;
02193 quant_matrix = s->intra_matrix;
02194 for(i=1;i<=nCoeffs;i++) {
02195 int j= s->intra_scantable.permutated[i];
02196 level = block[j];
02197 if (level) {
02198 if (level < 0) {
02199 level = -level;
02200 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02201 level = -level;
02202 } else {
02203 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02204 }
02205 block[j] = level;
02206 sum+=level;
02207 }
02208 }
02209 block[63]^=sum&1;
02210 }
02211
02212 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02213 DCTELEM *block, int n, int qscale)
02214 {
02215 int i, level, nCoeffs;
02216 const uint16_t *quant_matrix;
02217 int sum=-1;
02218
02219 if(s->alternate_scan) nCoeffs= 63;
02220 else nCoeffs= s->block_last_index[n];
02221
02222 quant_matrix = s->inter_matrix;
02223 for(i=0; i<=nCoeffs; i++) {
02224 int j= s->intra_scantable.permutated[i];
02225 level = block[j];
02226 if (level) {
02227 if (level < 0) {
02228 level = -level;
02229 level = (((level << 1) + 1) * qscale *
02230 ((int) (quant_matrix[j]))) >> 4;
02231 level = -level;
02232 } else {
02233 level = (((level << 1) + 1) * qscale *
02234 ((int) (quant_matrix[j]))) >> 4;
02235 }
02236 block[j] = level;
02237 sum+=level;
02238 }
02239 }
02240 block[63]^=sum&1;
02241 }
02242
02243 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02244 DCTELEM *block, int n, int qscale)
02245 {
02246 int i, level, qmul, qadd;
02247 int nCoeffs;
02248
02249 assert(s->block_last_index[n]>=0);
02250
02251 qmul = qscale << 1;
02252
02253 if (!s->h263_aic) {
02254 if (n < 4)
02255 block[0] = block[0] * s->y_dc_scale;
02256 else
02257 block[0] = block[0] * s->c_dc_scale;
02258 qadd = (qscale - 1) | 1;
02259 }else{
02260 qadd = 0;
02261 }
02262 if(s->ac_pred)
02263 nCoeffs=63;
02264 else
02265 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02266
02267 for(i=1; i<=nCoeffs; i++) {
02268 level = block[i];
02269 if (level) {
02270 if (level < 0) {
02271 level = level * qmul - qadd;
02272 } else {
02273 level = level * qmul + qadd;
02274 }
02275 block[i] = level;
02276 }
02277 }
02278 }
02279
02280 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02281 DCTELEM *block, int n, int qscale)
02282 {
02283 int i, level, qmul, qadd;
02284 int nCoeffs;
02285
02286 assert(s->block_last_index[n]>=0);
02287
02288 qadd = (qscale - 1) | 1;
02289 qmul = qscale << 1;
02290
02291 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02292
02293 for(i=0; i<=nCoeffs; i++) {
02294 level = block[i];
02295 if (level) {
02296 if (level < 0) {
02297 level = level * qmul - qadd;
02298 } else {
02299 level = level * qmul + qadd;
02300 }
02301 block[i] = level;
02302 }
02303 }
02304 }
02305
02309 void ff_set_qscale(MpegEncContext * s, int qscale)
02310 {
02311 if (qscale < 1)
02312 qscale = 1;
02313 else if (qscale > 31)
02314 qscale = 31;
02315
02316 s->qscale = qscale;
02317 s->chroma_qscale= s->chroma_qscale_table[qscale];
02318
02319 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02320 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02321 }