00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "xvmc_internal.h"
00041 #include "thread.h"
00042 #include <limits.h>
00043
00044
00045
00046
00047 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00048 DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00050 DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00052 DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00054 DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00056 DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00058 DCTELEM *block, int n, int qscale);
00059 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00060 DCTELEM *block, int n, int qscale);
00061
00062
00063
00064
00065
00066
00067
00068
00069 static const uint8_t ff_default_chroma_qscale_table[32]={
00070
00071 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
00072 };
00073
00074 const uint8_t ff_mpeg1_dc_scale_table[128]={
00075
00076 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00077 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080 };
00081
00082 static const uint8_t mpeg2_dc_scale_table1[128]={
00083
00084 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00085 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00086 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00087 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00088 };
00089
00090 static const uint8_t mpeg2_dc_scale_table2[128]={
00091
00092 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00093 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00094 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00095 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00096 };
00097
00098 static const uint8_t mpeg2_dc_scale_table3[128]={
00099
00100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00104 };
00105
00106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
00107 ff_mpeg1_dc_scale_table,
00108 mpeg2_dc_scale_table1,
00109 mpeg2_dc_scale_table2,
00110 mpeg2_dc_scale_table3,
00111 };
00112
00113 const enum PixelFormat ff_pixfmt_list_420[] = {
00114 PIX_FMT_YUV420P,
00115 PIX_FMT_NONE
00116 };
00117
00118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00119 PIX_FMT_DXVA2_VLD,
00120 PIX_FMT_VAAPI_VLD,
00121 PIX_FMT_YUV420P,
00122 PIX_FMT_NONE
00123 };
00124
00125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
00126 int i;
00127
00128 assert(p<=end);
00129 if(p>=end)
00130 return end;
00131
00132 for(i=0; i<3; i++){
00133 uint32_t tmp= *state << 8;
00134 *state= tmp + *(p++);
00135 if(tmp == 0x100 || p==end)
00136 return p;
00137 }
00138
00139 while(p<end){
00140 if (p[-1] > 1 ) p+= 3;
00141 else if(p[-2] ) p+= 2;
00142 else if(p[-3]|(p[-1]-1)) p++;
00143 else{
00144 p++;
00145 break;
00146 }
00147 }
00148
00149 p= FFMIN(p, end)-4;
00150 *state= AV_RB32(p);
00151
00152 return p+4;
00153 }
00154
00155
00156 av_cold int ff_dct_common_init(MpegEncContext *s)
00157 {
00158 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00159 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00160 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00161 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00162 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00163 if(s->flags & CODEC_FLAG_BITEXACT)
00164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00165 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00166
00167 #if HAVE_MMX
00168 MPV_common_init_mmx(s);
00169 #elif ARCH_ALPHA
00170 MPV_common_init_axp(s);
00171 #elif CONFIG_MLIB
00172 MPV_common_init_mlib(s);
00173 #elif HAVE_MMI
00174 MPV_common_init_mmi(s);
00175 #elif ARCH_ARM
00176 MPV_common_init_arm(s);
00177 #elif HAVE_ALTIVEC
00178 MPV_common_init_altivec(s);
00179 #elif ARCH_BFIN
00180 MPV_common_init_bfin(s);
00181 #endif
00182
00183
00184
00185
00186 if(s->alternate_scan){
00187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
00188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
00189 }else{
00190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
00191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
00192 }
00193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00195
00196 return 0;
00197 }
00198
00199 void ff_copy_picture(Picture *dst, Picture *src){
00200 *dst = *src;
00201 dst->type= FF_BUFFER_TYPE_COPY;
00202 }
00203
00207 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00208 {
00209 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
00210 av_freep(&pic->hwaccel_picture_private);
00211 }
00212
00216 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00217 {
00218 int r;
00219
00220 if (s->avctx->hwaccel) {
00221 assert(!pic->hwaccel_picture_private);
00222 if (s->avctx->hwaccel->priv_data_size) {
00223 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00224 if (!pic->hwaccel_picture_private) {
00225 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00226 return -1;
00227 }
00228 }
00229 }
00230
00231 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
00232
00233 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
00234 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
00235 av_freep(&pic->hwaccel_picture_private);
00236 return -1;
00237 }
00238
00239 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
00240 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
00241 free_frame_buffer(s, pic);
00242 return -1;
00243 }
00244
00245 if (pic->linesize[1] != pic->linesize[2]) {
00246 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
00247 free_frame_buffer(s, pic);
00248 return -1;
00249 }
00250
00251 return 0;
00252 }
00253
00258 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
00259 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1;
00260 const int mb_array_size= s->mb_stride*s->mb_height;
00261 const int b8_array_size= s->b8_stride*s->mb_height*2;
00262 const int b4_array_size= s->b4_stride*s->mb_height*4;
00263 int i;
00264 int r= -1;
00265
00266 if(shared){
00267 assert(pic->data[0]);
00268 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
00269 pic->type= FF_BUFFER_TYPE_SHARED;
00270 }else{
00271 assert(!pic->data[0]);
00272
00273 if (alloc_frame_buffer(s, pic) < 0)
00274 return -1;
00275
00276 s->linesize = pic->linesize[0];
00277 s->uvlinesize= pic->linesize[1];
00278 }
00279
00280 if(pic->qscale_table==NULL){
00281 if (s->encoding) {
00282 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
00283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
00284 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
00285 }
00286
00287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail)
00288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
00289 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
00290 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
00291 pic->qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
00292 if(s->out_format == FMT_H264){
00293 for(i=0; i<2; i++){
00294 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
00295 pic->motion_val[i]= pic->motion_val_base[i]+4;
00296 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00297 }
00298 pic->motion_subsample_log2= 2;
00299 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
00300 for(i=0; i<2; i++){
00301 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
00302 pic->motion_val[i]= pic->motion_val_base[i]+4;
00303 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00304 }
00305 pic->motion_subsample_log2= 3;
00306 }
00307 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00308 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
00309 }
00310 pic->qstride= s->mb_stride;
00311 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
00312 }
00313
00314
00315
00316 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
00317 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
00318 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
00319 pic->age= INT_MAX;
00320 pic->owner2 = NULL;
00321
00322 return 0;
00323 fail:
00324 if(r>=0)
00325 free_frame_buffer(s, pic);
00326 return -1;
00327 }
00328
00332 static void free_picture(MpegEncContext *s, Picture *pic){
00333 int i;
00334
00335 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
00336 free_frame_buffer(s, pic);
00337 }
00338
00339 av_freep(&pic->mb_var);
00340 av_freep(&pic->mc_mb_var);
00341 av_freep(&pic->mb_mean);
00342 av_freep(&pic->mbskip_table);
00343 av_freep(&pic->qscale_table_base);
00344 av_freep(&pic->mb_type_base);
00345 av_freep(&pic->dct_coeff);
00346 av_freep(&pic->pan_scan);
00347 pic->mb_type= NULL;
00348 for(i=0; i<2; i++){
00349 av_freep(&pic->motion_val_base[i]);
00350 av_freep(&pic->ref_index[i]);
00351 }
00352
00353 if(pic->type == FF_BUFFER_TYPE_SHARED){
00354 for(i=0; i<4; i++){
00355 pic->base[i]=
00356 pic->data[i]= NULL;
00357 }
00358 pic->type= 0;
00359 }
00360 }
00361
00362 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
00363 int y_size = s->b8_stride * (2 * s->mb_height + 1);
00364 int c_size = s->mb_stride * (s->mb_height + 1);
00365 int yc_size = y_size + 2 * c_size;
00366 int i;
00367
00368
00369 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, (s->width+64)*2*21*2*2, fail);
00370
00371
00372
00373 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
00374 s->me.temp= s->me.scratchpad;
00375 s->rd_scratchpad= s->me.scratchpad;
00376 s->b_scratchpad= s->me.scratchpad;
00377 s->obmc_scratchpad= s->me.scratchpad + 16;
00378 if (s->encoding) {
00379 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
00380 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
00381 if(s->avctx->noise_reduction){
00382 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
00383 }
00384 }
00385 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
00386 s->block= s->blocks[0];
00387
00388 for(i=0;i<12;i++){
00389 s->pblocks[i] = &s->block[i];
00390 }
00391
00392 if (s->out_format == FMT_H263) {
00393
00394 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
00395 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00396 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00397 s->ac_val[2] = s->ac_val[1] + c_size;
00398 }
00399
00400 return 0;
00401 fail:
00402 return -1;
00403 }
00404
00405 static void free_duplicate_context(MpegEncContext *s){
00406 if(s==NULL) return;
00407
00408 av_freep(&s->edge_emu_buffer);
00409 av_freep(&s->me.scratchpad);
00410 s->me.temp=
00411 s->rd_scratchpad=
00412 s->b_scratchpad=
00413 s->obmc_scratchpad= NULL;
00414
00415 av_freep(&s->dct_error_sum);
00416 av_freep(&s->me.map);
00417 av_freep(&s->me.score_map);
00418 av_freep(&s->blocks);
00419 av_freep(&s->ac_val_base);
00420 s->block= NULL;
00421 }
00422
00423 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
00424 #define COPY(a) bak->a= src->a
00425 COPY(edge_emu_buffer);
00426 COPY(me.scratchpad);
00427 COPY(me.temp);
00428 COPY(rd_scratchpad);
00429 COPY(b_scratchpad);
00430 COPY(obmc_scratchpad);
00431 COPY(me.map);
00432 COPY(me.score_map);
00433 COPY(blocks);
00434 COPY(block);
00435 COPY(start_mb_y);
00436 COPY(end_mb_y);
00437 COPY(me.map_generation);
00438 COPY(pb);
00439 COPY(dct_error_sum);
00440 COPY(dct_count[0]);
00441 COPY(dct_count[1]);
00442 COPY(ac_val_base);
00443 COPY(ac_val[0]);
00444 COPY(ac_val[1]);
00445 COPY(ac_val[2]);
00446 #undef COPY
00447 }
00448
00449 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
00450 MpegEncContext bak;
00451 int i;
00452
00453
00454 backup_duplicate_context(&bak, dst);
00455 memcpy(dst, src, sizeof(MpegEncContext));
00456 backup_duplicate_context(dst, &bak);
00457 for(i=0;i<12;i++){
00458 dst->pblocks[i] = &dst->block[i];
00459 }
00460
00461 }
00462
00463 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
00464 {
00465 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00466
00467 if(dst == src || !s1->context_initialized) return 0;
00468
00469
00470 if(!s->context_initialized){
00471 memcpy(s, s1, sizeof(MpegEncContext));
00472
00473 s->avctx = dst;
00474 s->picture_range_start += MAX_PICTURE_COUNT;
00475 s->picture_range_end += MAX_PICTURE_COUNT;
00476 s->bitstream_buffer = NULL;
00477 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00478
00479 MPV_common_init(s);
00480 }
00481
00482 s->avctx->coded_height = s1->avctx->coded_height;
00483 s->avctx->coded_width = s1->avctx->coded_width;
00484 s->avctx->width = s1->avctx->width;
00485 s->avctx->height = s1->avctx->height;
00486
00487 s->coded_picture_number = s1->coded_picture_number;
00488 s->picture_number = s1->picture_number;
00489 s->input_picture_number = s1->input_picture_number;
00490
00491 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00492 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
00493
00494 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
00495 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00496 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
00497
00498 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
00499
00500
00501 s->next_p_frame_damaged = s1->next_p_frame_damaged;
00502 s->workaround_bugs = s1->workaround_bugs;
00503
00504
00505 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
00506
00507
00508 s->max_b_frames = s1->max_b_frames;
00509 s->low_delay = s1->low_delay;
00510 s->dropable = s1->dropable;
00511
00512
00513 s->divx_packed = s1->divx_packed;
00514
00515 if(s1->bitstream_buffer){
00516 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00517 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
00518 s->bitstream_buffer_size = s1->bitstream_buffer_size;
00519 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
00520 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
00521 }
00522
00523
00524 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
00525
00526 if(!s1->first_field){
00527 s->last_pict_type= s1->pict_type;
00528 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
00529
00530 if(s1->pict_type!=FF_B_TYPE){
00531 s->last_non_b_pict_type= s1->pict_type;
00532 }
00533 }
00534
00535 return 0;
00536 }
00537
00542 void MPV_common_defaults(MpegEncContext *s){
00543 s->y_dc_scale_table=
00544 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
00545 s->chroma_qscale_table= ff_default_chroma_qscale_table;
00546 s->progressive_frame= 1;
00547 s->progressive_sequence= 1;
00548 s->picture_structure= PICT_FRAME;
00549
00550 s->coded_picture_number = 0;
00551 s->picture_number = 0;
00552 s->input_picture_number = 0;
00553
00554 s->picture_in_gop_number = 0;
00555
00556 s->f_code = 1;
00557 s->b_code = 1;
00558
00559 s->picture_range_start = 0;
00560 s->picture_range_end = MAX_PICTURE_COUNT;
00561 }
00562
00567 void MPV_decode_defaults(MpegEncContext *s){
00568 MPV_common_defaults(s);
00569 }
00570
00575 av_cold int MPV_common_init(MpegEncContext *s)
00576 {
00577 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
00578
00579 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00580 s->mb_height = (s->height + 31) / 32 * 2;
00581 else if (s->codec_id != CODEC_ID_H264)
00582 s->mb_height = (s->height + 15) / 16;
00583
00584 if(s->avctx->pix_fmt == PIX_FMT_NONE){
00585 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
00586 return -1;
00587 }
00588
00589 if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
00590 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
00591 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
00592 return -1;
00593 }
00594
00595 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
00596 return -1;
00597
00598 dsputil_init(&s->dsp, s->avctx);
00599 ff_dct_common_init(s);
00600
00601 s->flags= s->avctx->flags;
00602 s->flags2= s->avctx->flags2;
00603
00604 s->mb_width = (s->width + 15) / 16;
00605 s->mb_stride = s->mb_width + 1;
00606 s->b8_stride = s->mb_width*2 + 1;
00607 s->b4_stride = s->mb_width*4 + 1;
00608 mb_array_size= s->mb_height * s->mb_stride;
00609 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
00610
00611
00612 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
00613 &(s->chroma_y_shift) );
00614
00615
00616 s->h_edge_pos= s->mb_width*16;
00617 s->v_edge_pos= s->mb_height*16;
00618
00619 s->mb_num = s->mb_width * s->mb_height;
00620
00621 s->block_wrap[0]=
00622 s->block_wrap[1]=
00623 s->block_wrap[2]=
00624 s->block_wrap[3]= s->b8_stride;
00625 s->block_wrap[4]=
00626 s->block_wrap[5]= s->mb_stride;
00627
00628 y_size = s->b8_stride * (2 * s->mb_height + 1);
00629 c_size = s->mb_stride * (s->mb_height + 1);
00630 yc_size = y_size + 2 * c_size;
00631
00632
00633 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
00634
00635 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
00636
00637 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
00638
00639 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail)
00640 for(y=0; y<s->mb_height; y++){
00641 for(x=0; x<s->mb_width; x++){
00642 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
00643 }
00644 }
00645 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width;
00646
00647 if (s->encoding) {
00648
00649 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00650 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00651 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00652 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00653 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00654 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00655 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
00656 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
00657 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
00658 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00659 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00660 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
00661
00662 if(s->msmpeg4_version){
00663 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
00664 }
00665 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00666
00667
00668 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail)
00669
00670 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
00671
00672 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
00673 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
00674 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00675 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00676 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00677 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00678
00679 if(s->avctx->noise_reduction){
00680 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
00681 }
00682 }
00683
00684 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00685 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
00686 for(i = 0; i < s->picture_count; i++) {
00687 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
00688 }
00689
00690 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
00691
00692 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
00693
00694 for(i=0; i<2; i++){
00695 int j, k;
00696 for(j=0; j<2; j++){
00697 for(k=0; k<2; k++){
00698 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
00699 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
00700 }
00701 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
00702 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
00703 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
00704 }
00705 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
00706 }
00707 }
00708 if (s->out_format == FMT_H263) {
00709
00710 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00711 s->coded_block= s->coded_block_base + s->b8_stride + 1;
00712
00713
00714 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
00715 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
00716 }
00717
00718 if (s->h263_pred || s->h263_plus || !s->encoding) {
00719
00720
00721 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
00722 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00723 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00724 s->dc_val[2] = s->dc_val[1] + c_size;
00725 for(i=0;i<yc_size;i++)
00726 s->dc_val_base[i] = 1024;
00727 }
00728
00729
00730 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00731 memset(s->mbintra_table, 1, mb_array_size);
00732
00733
00734 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
00735
00736 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
00737
00738 s->parse_context.state= -1;
00739 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
00740 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00741 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00742 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00743 }
00744
00745 s->context_initialized = 1;
00746 s->thread_context[0]= s;
00747
00748 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
00749 threads = s->avctx->thread_count;
00750
00751 for(i=1; i<threads; i++){
00752 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
00753 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00754 }
00755
00756 for(i=0; i<threads; i++){
00757 if(init_duplicate_context(s->thread_context[i], s) < 0)
00758 goto fail;
00759 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
00760 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
00761 }
00762 } else {
00763 if(init_duplicate_context(s, s) < 0) goto fail;
00764 s->start_mb_y = 0;
00765 s->end_mb_y = s->mb_height;
00766
00767 }
00768
00769 return 0;
00770 fail:
00771 MPV_common_end(s);
00772 return -1;
00773 }
00774
00775
00776 void MPV_common_end(MpegEncContext *s)
00777 {
00778 int i, j, k;
00779
00780 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
00781 for(i=0; i<s->avctx->thread_count; i++){
00782 free_duplicate_context(s->thread_context[i]);
00783 }
00784 for(i=1; i<s->avctx->thread_count; i++){
00785 av_freep(&s->thread_context[i]);
00786 }
00787 } else free_duplicate_context(s);
00788
00789 av_freep(&s->parse_context.buffer);
00790 s->parse_context.buffer_size=0;
00791
00792 av_freep(&s->mb_type);
00793 av_freep(&s->p_mv_table_base);
00794 av_freep(&s->b_forw_mv_table_base);
00795 av_freep(&s->b_back_mv_table_base);
00796 av_freep(&s->b_bidir_forw_mv_table_base);
00797 av_freep(&s->b_bidir_back_mv_table_base);
00798 av_freep(&s->b_direct_mv_table_base);
00799 s->p_mv_table= NULL;
00800 s->b_forw_mv_table= NULL;
00801 s->b_back_mv_table= NULL;
00802 s->b_bidir_forw_mv_table= NULL;
00803 s->b_bidir_back_mv_table= NULL;
00804 s->b_direct_mv_table= NULL;
00805 for(i=0; i<2; i++){
00806 for(j=0; j<2; j++){
00807 for(k=0; k<2; k++){
00808 av_freep(&s->b_field_mv_table_base[i][j][k]);
00809 s->b_field_mv_table[i][j][k]=NULL;
00810 }
00811 av_freep(&s->b_field_select_table[i][j]);
00812 av_freep(&s->p_field_mv_table_base[i][j]);
00813 s->p_field_mv_table[i][j]=NULL;
00814 }
00815 av_freep(&s->p_field_select_table[i]);
00816 }
00817
00818 av_freep(&s->dc_val_base);
00819 av_freep(&s->coded_block_base);
00820 av_freep(&s->mbintra_table);
00821 av_freep(&s->cbp_table);
00822 av_freep(&s->pred_dir_table);
00823
00824 av_freep(&s->mbskip_table);
00825 av_freep(&s->prev_pict_types);
00826 av_freep(&s->bitstream_buffer);
00827 s->allocated_bitstream_buffer_size=0;
00828
00829 av_freep(&s->avctx->stats_out);
00830 av_freep(&s->ac_stats);
00831 av_freep(&s->error_status_table);
00832 av_freep(&s->mb_index2xy);
00833 av_freep(&s->lambda_table);
00834 av_freep(&s->q_intra_matrix);
00835 av_freep(&s->q_inter_matrix);
00836 av_freep(&s->q_intra_matrix16);
00837 av_freep(&s->q_inter_matrix16);
00838 av_freep(&s->input_picture);
00839 av_freep(&s->reordered_input_picture);
00840 av_freep(&s->dct_offset);
00841
00842 if(s->picture && !s->avctx->is_copy){
00843 for(i=0; i<s->picture_count; i++){
00844 free_picture(s, &s->picture[i]);
00845 }
00846 }
00847 av_freep(&s->picture);
00848 s->context_initialized = 0;
00849 s->last_picture_ptr=
00850 s->next_picture_ptr=
00851 s->current_picture_ptr= NULL;
00852 s->linesize= s->uvlinesize= 0;
00853
00854 for(i=0; i<3; i++)
00855 av_freep(&s->visualization_buffer[i]);
00856
00857 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
00858 avcodec_default_free_buffers(s->avctx);
00859 }
00860
00861 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
00862 {
00863 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
00864 uint8_t index_run[MAX_RUN+1];
00865 int last, run, level, start, end, i;
00866
00867
00868 if(static_store && rl->max_level[0])
00869 return;
00870
00871
00872 for(last=0;last<2;last++) {
00873 if (last == 0) {
00874 start = 0;
00875 end = rl->last;
00876 } else {
00877 start = rl->last;
00878 end = rl->n;
00879 }
00880
00881 memset(max_level, 0, MAX_RUN + 1);
00882 memset(max_run, 0, MAX_LEVEL + 1);
00883 memset(index_run, rl->n, MAX_RUN + 1);
00884 for(i=start;i<end;i++) {
00885 run = rl->table_run[i];
00886 level = rl->table_level[i];
00887 if (index_run[run] == rl->n)
00888 index_run[run] = i;
00889 if (level > max_level[run])
00890 max_level[run] = level;
00891 if (run > max_run[level])
00892 max_run[level] = run;
00893 }
00894 if(static_store)
00895 rl->max_level[last] = static_store[last];
00896 else
00897 rl->max_level[last] = av_malloc(MAX_RUN + 1);
00898 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
00899 if(static_store)
00900 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
00901 else
00902 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
00903 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
00904 if(static_store)
00905 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
00906 else
00907 rl->index_run[last] = av_malloc(MAX_RUN + 1);
00908 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
00909 }
00910 }
00911
00912 void init_vlc_rl(RLTable *rl)
00913 {
00914 int i, q;
00915
00916 for(q=0; q<32; q++){
00917 int qmul= q*2;
00918 int qadd= (q-1)|1;
00919
00920 if(q==0){
00921 qmul=1;
00922 qadd=0;
00923 }
00924 for(i=0; i<rl->vlc.table_size; i++){
00925 int code= rl->vlc.table[i][0];
00926 int len = rl->vlc.table[i][1];
00927 int level, run;
00928
00929 if(len==0){
00930 run= 66;
00931 level= MAX_LEVEL;
00932 }else if(len<0){
00933 run= 0;
00934 level= code;
00935 }else{
00936 if(code==rl->n){
00937 run= 66;
00938 level= 0;
00939 }else{
00940 run= rl->table_run [code] + 1;
00941 level= rl->table_level[code] * qmul + qadd;
00942 if(code >= rl->last) run+=192;
00943 }
00944 }
00945 rl->rl_vlc[q][i].len= len;
00946 rl->rl_vlc[q][i].level= level;
00947 rl->rl_vlc[q][i].run= run;
00948 }
00949 }
00950 }
00951
00952 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
00953 {
00954 int i;
00955
00956
00957 for(i=0; i<s->picture_count; i++){
00958 if(s->picture[i].data[0] && !s->picture[i].reference
00959 && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
00960 && (remove_current || &s->picture[i] != s->current_picture_ptr)
00961 ){
00962 free_frame_buffer(s, &s->picture[i]);
00963 }
00964 }
00965 }
00966
00967 int ff_find_unused_picture(MpegEncContext *s, int shared){
00968 int i;
00969
00970 if(shared){
00971 for(i=s->picture_range_start; i<s->picture_range_end; i++){
00972 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
00973 }
00974 }else{
00975 for(i=s->picture_range_start; i<s->picture_range_end; i++){
00976 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i;
00977 }
00978 for(i=s->picture_range_start; i<s->picture_range_end; i++){
00979 if(s->picture[i].data[0]==NULL) return i;
00980 }
00981 }
00982
00983 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
00984
00985
00986
00987
00988
00989
00990
00991
00992
00993
00994
00995 abort();
00996 return -1;
00997 }
00998
00999 static void update_noise_reduction(MpegEncContext *s){
01000 int intra, i;
01001
01002 for(intra=0; intra<2; intra++){
01003 if(s->dct_count[intra] > (1<<16)){
01004 for(i=0; i<64; i++){
01005 s->dct_error_sum[intra][i] >>=1;
01006 }
01007 s->dct_count[intra] >>= 1;
01008 }
01009
01010 for(i=0; i<64; i++){
01011 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
01012 }
01013 }
01014 }
01015
01019 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01020 {
01021 int i;
01022 Picture *pic;
01023 s->mb_skipped = 0;
01024
01025 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
01026
01027
01028 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
01029 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
01030 free_frame_buffer(s, s->last_picture_ptr);
01031
01032
01033
01034 if(!s->encoding){
01035 for(i=0; i<s->picture_count; i++){
01036 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
01037 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
01038 free_frame_buffer(s, &s->picture[i]);
01039 }
01040 }
01041 }
01042 }
01043 }
01044
01045 if(!s->encoding){
01046 ff_release_unused_pictures(s, 1);
01047
01048 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
01049 pic= s->current_picture_ptr;
01050 else{
01051 i= ff_find_unused_picture(s, 0);
01052 pic= &s->picture[i];
01053 }
01054
01055 pic->reference= 0;
01056 if (!s->dropable){
01057 if (s->codec_id == CODEC_ID_H264)
01058 pic->reference = s->picture_structure;
01059 else if (s->pict_type != AV_PICTURE_TYPE_B)
01060 pic->reference = 3;
01061 }
01062
01063 pic->coded_picture_number= s->coded_picture_number++;
01064
01065 if(ff_alloc_picture(s, pic, 0) < 0)
01066 return -1;
01067
01068 s->current_picture_ptr= pic;
01069
01070 s->current_picture_ptr->top_field_first= s->top_field_first;
01071 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
01072 if(s->picture_structure != PICT_FRAME)
01073 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01074 }
01075 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
01076 s->current_picture_ptr->field_picture= s->picture_structure != PICT_FRAME;
01077 }
01078
01079 s->current_picture_ptr->pict_type= s->pict_type;
01080
01081
01082 s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
01083
01084 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01085
01086 if (s->pict_type != AV_PICTURE_TYPE_B) {
01087 s->last_picture_ptr= s->next_picture_ptr;
01088 if(!s->dropable)
01089 s->next_picture_ptr= s->current_picture_ptr;
01090 }
01091
01092
01093
01094
01095
01096
01097 if(s->codec_id != CODEC_ID_H264){
01098 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) &&
01099 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
01100 if (s->pict_type != AV_PICTURE_TYPE_I)
01101 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
01102 else if (s->picture_structure != PICT_FRAME)
01103 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
01104
01105
01106 i= ff_find_unused_picture(s, 0);
01107 s->last_picture_ptr= &s->picture[i];
01108 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
01109 return -1;
01110 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
01111 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
01112 }
01113 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
01114
01115 i= ff_find_unused_picture(s, 0);
01116 s->next_picture_ptr= &s->picture[i];
01117 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
01118 return -1;
01119 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
01120 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
01121 }
01122 }
01123
01124 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01125 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01126
01127 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
01128
01129 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
01130 int i;
01131 for(i=0; i<4; i++){
01132 if(s->picture_structure == PICT_BOTTOM_FIELD){
01133 s->current_picture.data[i] += s->current_picture.linesize[i];
01134 }
01135 s->current_picture.linesize[i] *= 2;
01136 s->last_picture.linesize[i] *=2;
01137 s->next_picture.linesize[i] *=2;
01138 }
01139 }
01140
01141 #if FF_API_HURRY_UP
01142 s->hurry_up= s->avctx->hurry_up;
01143 #endif
01144 s->error_recognition= avctx->error_recognition;
01145
01146
01147
01148 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
01149 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01150 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01151 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
01152 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01153 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01154 }else{
01155 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01156 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01157 }
01158
01159 if(s->dct_error_sum){
01160 assert(s->avctx->noise_reduction && s->encoding);
01161
01162 update_noise_reduction(s);
01163 }
01164
01165 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01166 return ff_xvmc_field_start(s, avctx);
01167
01168 return 0;
01169 }
01170
01171
01172 void MPV_frame_end(MpegEncContext *s)
01173 {
01174 int i;
01175
01176
01177 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
01178 ff_xvmc_field_end(s);
01179 }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
01180 && !s->avctx->hwaccel
01181 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
01182 && s->unrestricted_mv
01183 && s->current_picture.reference
01184 && !s->intra_only
01185 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
01186 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
01187 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
01188 s->dsp.draw_edges(s->current_picture.data[0], s->linesize ,
01189 s->h_edge_pos , s->v_edge_pos,
01190 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
01191 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize,
01192 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
01193 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
01194 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize,
01195 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
01196 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
01197 }
01198
01199 emms_c();
01200
01201 s->last_pict_type = s->pict_type;
01202 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
01203 if(s->pict_type!=AV_PICTURE_TYPE_B){
01204 s->last_non_b_pict_type= s->pict_type;
01205 }
01206 #if 0
01207
01208 for(i=0; i<MAX_PICTURE_COUNT; i++){
01209 if(s->picture[i].data[0] == s->current_picture.data[0]){
01210 s->picture[i]= s->current_picture;
01211 break;
01212 }
01213 }
01214 assert(i<MAX_PICTURE_COUNT);
01215 #endif
01216
01217 if(s->encoding){
01218
01219 for(i=0; i<s->picture_count; i++){
01220 if(s->picture[i].data[0] && !s->picture[i].reference ){
01221 free_frame_buffer(s, &s->picture[i]);
01222 }
01223 }
01224 }
01225
01226 #if 0
01227 memset(&s->last_picture, 0, sizeof(Picture));
01228 memset(&s->next_picture, 0, sizeof(Picture));
01229 memset(&s->current_picture, 0, sizeof(Picture));
01230 #endif
01231 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
01232
01233 if (s->codec_id != CODEC_ID_H264 && s->current_picture.reference) {
01234 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
01235 }
01236 }
01237
01245 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01246 int x, y, fr, f;
01247
01248 sx= av_clip(sx, 0, w-1);
01249 sy= av_clip(sy, 0, h-1);
01250 ex= av_clip(ex, 0, w-1);
01251 ey= av_clip(ey, 0, h-1);
01252
01253 buf[sy*stride + sx]+= color;
01254
01255 if(FFABS(ex - sx) > FFABS(ey - sy)){
01256 if(sx > ex){
01257 FFSWAP(int, sx, ex);
01258 FFSWAP(int, sy, ey);
01259 }
01260 buf+= sx + sy*stride;
01261 ex-= sx;
01262 f= ((ey-sy)<<16)/ex;
01263 for(x= 0; x <= ex; x++){
01264 y = (x*f)>>16;
01265 fr= (x*f)&0xFFFF;
01266 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
01267 buf[(y+1)*stride + x]+= (color* fr )>>16;
01268 }
01269 }else{
01270 if(sy > ey){
01271 FFSWAP(int, sx, ex);
01272 FFSWAP(int, sy, ey);
01273 }
01274 buf+= sx + sy*stride;
01275 ey-= sy;
01276 if(ey) f= ((ex-sx)<<16)/ey;
01277 else f= 0;
01278 for(y= 0; y <= ey; y++){
01279 x = (y*f)>>16;
01280 fr= (y*f)&0xFFFF;
01281 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
01282 buf[y*stride + x+1]+= (color* fr )>>16;
01283 }
01284 }
01285 }
01286
01294 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01295 int dx,dy;
01296
01297 sx= av_clip(sx, -100, w+100);
01298 sy= av_clip(sy, -100, h+100);
01299 ex= av_clip(ex, -100, w+100);
01300 ey= av_clip(ey, -100, h+100);
01301
01302 dx= ex - sx;
01303 dy= ey - sy;
01304
01305 if(dx*dx + dy*dy > 3*3){
01306 int rx= dx + dy;
01307 int ry= -dx + dy;
01308 int length= ff_sqrt((rx*rx + ry*ry)<<8);
01309
01310
01311 rx= ROUNDED_DIV(rx*3<<4, length);
01312 ry= ROUNDED_DIV(ry*3<<4, length);
01313
01314 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01315 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01316 }
01317 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01318 }
01319
01323 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
01324
01325 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
01326
01327 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
01328 int x,y;
01329
01330 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
01331 av_get_picture_type_char(pict->pict_type));
01332 for(y=0; y<s->mb_height; y++){
01333 for(x=0; x<s->mb_width; x++){
01334 if(s->avctx->debug&FF_DEBUG_SKIP){
01335 int count= s->mbskip_table[x + y*s->mb_stride];
01336 if(count>9) count=9;
01337 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01338 }
01339 if(s->avctx->debug&FF_DEBUG_QP){
01340 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
01341 }
01342 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
01343 int mb_type= pict->mb_type[x + y*s->mb_stride];
01344
01345 if(IS_PCM(mb_type))
01346 av_log(s->avctx, AV_LOG_DEBUG, "P");
01347 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01348 av_log(s->avctx, AV_LOG_DEBUG, "A");
01349 else if(IS_INTRA4x4(mb_type))
01350 av_log(s->avctx, AV_LOG_DEBUG, "i");
01351 else if(IS_INTRA16x16(mb_type))
01352 av_log(s->avctx, AV_LOG_DEBUG, "I");
01353 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01354 av_log(s->avctx, AV_LOG_DEBUG, "d");
01355 else if(IS_DIRECT(mb_type))
01356 av_log(s->avctx, AV_LOG_DEBUG, "D");
01357 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
01358 av_log(s->avctx, AV_LOG_DEBUG, "g");
01359 else if(IS_GMC(mb_type))
01360 av_log(s->avctx, AV_LOG_DEBUG, "G");
01361 else if(IS_SKIP(mb_type))
01362 av_log(s->avctx, AV_LOG_DEBUG, "S");
01363 else if(!USES_LIST(mb_type, 1))
01364 av_log(s->avctx, AV_LOG_DEBUG, ">");
01365 else if(!USES_LIST(mb_type, 0))
01366 av_log(s->avctx, AV_LOG_DEBUG, "<");
01367 else{
01368 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01369 av_log(s->avctx, AV_LOG_DEBUG, "X");
01370 }
01371
01372
01373 if(IS_8X8(mb_type))
01374 av_log(s->avctx, AV_LOG_DEBUG, "+");
01375 else if(IS_16X8(mb_type))
01376 av_log(s->avctx, AV_LOG_DEBUG, "-");
01377 else if(IS_8X16(mb_type))
01378 av_log(s->avctx, AV_LOG_DEBUG, "|");
01379 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
01380 av_log(s->avctx, AV_LOG_DEBUG, " ");
01381 else
01382 av_log(s->avctx, AV_LOG_DEBUG, "?");
01383
01384
01385 if(IS_INTERLACED(mb_type))
01386 av_log(s->avctx, AV_LOG_DEBUG, "=");
01387 else
01388 av_log(s->avctx, AV_LOG_DEBUG, " ");
01389 }
01390
01391 }
01392 av_log(s->avctx, AV_LOG_DEBUG, "\n");
01393 }
01394 }
01395
01396 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
01397 const int shift= 1 + s->quarter_sample;
01398 int mb_y;
01399 uint8_t *ptr;
01400 int i;
01401 int h_chroma_shift, v_chroma_shift, block_height;
01402 const int width = s->avctx->width;
01403 const int height= s->avctx->height;
01404 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
01405 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01406 s->low_delay=0;
01407
01408 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
01409 for(i=0; i<3; i++){
01410 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
01411 pict->data[i]= s->visualization_buffer[i];
01412 }
01413 pict->type= FF_BUFFER_TYPE_COPY;
01414 pict->opaque= NULL;
01415 ptr= pict->data[0];
01416 block_height = 16>>v_chroma_shift;
01417
01418 for(mb_y=0; mb_y<s->mb_height; mb_y++){
01419 int mb_x;
01420 for(mb_x=0; mb_x<s->mb_width; mb_x++){
01421 const int mb_index= mb_x + mb_y*s->mb_stride;
01422 if((s->avctx->debug_mv) && pict->motion_val){
01423 int type;
01424 for(type=0; type<3; type++){
01425 int direction = 0;
01426 switch (type) {
01427 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
01428 continue;
01429 direction = 0;
01430 break;
01431 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
01432 continue;
01433 direction = 0;
01434 break;
01435 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
01436 continue;
01437 direction = 1;
01438 break;
01439 }
01440 if(!USES_LIST(pict->mb_type[mb_index], direction))
01441 continue;
01442
01443 if(IS_8X8(pict->mb_type[mb_index])){
01444 int i;
01445 for(i=0; i<4; i++){
01446 int sx= mb_x*16 + 4 + 8*(i&1);
01447 int sy= mb_y*16 + 4 + 8*(i>>1);
01448 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01449 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01450 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01451 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01452 }
01453 }else if(IS_16X8(pict->mb_type[mb_index])){
01454 int i;
01455 for(i=0; i<2; i++){
01456 int sx=mb_x*16 + 8;
01457 int sy=mb_y*16 + 4 + 8*i;
01458 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
01459 int mx=(pict->motion_val[direction][xy][0]>>shift);
01460 int my=(pict->motion_val[direction][xy][1]>>shift);
01461
01462 if(IS_INTERLACED(pict->mb_type[mb_index]))
01463 my*=2;
01464
01465 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01466 }
01467 }else if(IS_8X16(pict->mb_type[mb_index])){
01468 int i;
01469 for(i=0; i<2; i++){
01470 int sx=mb_x*16 + 4 + 8*i;
01471 int sy=mb_y*16 + 8;
01472 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
01473 int mx=(pict->motion_val[direction][xy][0]>>shift);
01474 int my=(pict->motion_val[direction][xy][1]>>shift);
01475
01476 if(IS_INTERLACED(pict->mb_type[mb_index]))
01477 my*=2;
01478
01479 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01480 }
01481 }else{
01482 int sx= mb_x*16 + 8;
01483 int sy= mb_y*16 + 8;
01484 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
01485 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01486 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01487 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01488 }
01489 }
01490 }
01491 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
01492 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
01493 int y;
01494 for(y=0; y<block_height; y++){
01495 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
01496 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
01497 }
01498 }
01499 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
01500 int mb_type= pict->mb_type[mb_index];
01501 uint64_t u,v;
01502 int y;
01503 #define COLOR(theta, r)\
01504 u= (int)(128 + r*cos(theta*3.141592/180));\
01505 v= (int)(128 + r*sin(theta*3.141592/180));
01506
01507
01508 u=v=128;
01509 if(IS_PCM(mb_type)){
01510 COLOR(120,48)
01511 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
01512 COLOR(30,48)
01513 }else if(IS_INTRA4x4(mb_type)){
01514 COLOR(90,48)
01515 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
01516
01517 }else if(IS_DIRECT(mb_type)){
01518 COLOR(150,48)
01519 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
01520 COLOR(170,48)
01521 }else if(IS_GMC(mb_type)){
01522 COLOR(190,48)
01523 }else if(IS_SKIP(mb_type)){
01524
01525 }else if(!USES_LIST(mb_type, 1)){
01526 COLOR(240,48)
01527 }else if(!USES_LIST(mb_type, 0)){
01528 COLOR(0,48)
01529 }else{
01530 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01531 COLOR(300,48)
01532 }
01533
01534 u*= 0x0101010101010101ULL;
01535 v*= 0x0101010101010101ULL;
01536 for(y=0; y<block_height; y++){
01537 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
01538 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
01539 }
01540
01541
01542 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
01543 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01544 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01545 }
01546 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
01547 for(y=0; y<16; y++)
01548 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
01549 }
01550 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
01551 int dm= 1 << (mv_sample_log2-2);
01552 for(i=0; i<4; i++){
01553 int sx= mb_x*16 + 8*(i&1);
01554 int sy= mb_y*16 + 8*(i>>1);
01555 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01556
01557 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
01558 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
01559 for(y=0; y<8; y++)
01560 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
01561 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
01562 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
01563 }
01564 }
01565
01566 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
01567
01568 }
01569 }
01570 s->mbskip_table[mb_index]=0;
01571 }
01572 }
01573 }
01574 }
01575
01576 static inline int hpel_motion_lowres(MpegEncContext *s,
01577 uint8_t *dest, uint8_t *src,
01578 int field_based, int field_select,
01579 int src_x, int src_y,
01580 int width, int height, int stride,
01581 int h_edge_pos, int v_edge_pos,
01582 int w, int h, h264_chroma_mc_func *pix_op,
01583 int motion_x, int motion_y)
01584 {
01585 const int lowres= s->avctx->lowres;
01586 const int op_index= FFMIN(lowres, 2);
01587 const int s_mask= (2<<lowres)-1;
01588 int emu=0;
01589 int sx, sy;
01590
01591 if(s->quarter_sample){
01592 motion_x/=2;
01593 motion_y/=2;
01594 }
01595
01596 sx= motion_x & s_mask;
01597 sy= motion_y & s_mask;
01598 src_x += motion_x >> (lowres+1);
01599 src_y += motion_y >> (lowres+1);
01600
01601 src += src_y * stride + src_x;
01602
01603 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
01604 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01605 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
01606 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01607 src= s->edge_emu_buffer;
01608 emu=1;
01609 }
01610
01611 sx= (sx << 2) >> lowres;
01612 sy= (sy << 2) >> lowres;
01613 if(field_select)
01614 src += s->linesize;
01615 pix_op[op_index](dest, src, stride, h, sx, sy);
01616 return emu;
01617 }
01618
01619
01620 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01621 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01622 int field_based, int bottom_field, int field_select,
01623 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
01624 int motion_x, int motion_y, int h, int mb_y)
01625 {
01626 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01627 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
01628 const int lowres= s->avctx->lowres;
01629 const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
01630 const int block_s= 8>>lowres;
01631 const int s_mask= (2<<lowres)-1;
01632 const int h_edge_pos = s->h_edge_pos >> lowres;
01633 const int v_edge_pos = s->v_edge_pos >> lowres;
01634 linesize = s->current_picture.linesize[0] << field_based;
01635 uvlinesize = s->current_picture.linesize[1] << field_based;
01636
01637 if(s->quarter_sample){
01638 motion_x/=2;
01639 motion_y/=2;
01640 }
01641
01642 if(field_based){
01643 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
01644 }
01645
01646 sx= motion_x & s_mask;
01647 sy= motion_y & s_mask;
01648 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
01649 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
01650
01651 if (s->out_format == FMT_H263) {
01652 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
01653 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
01654 uvsrc_x = src_x>>1;
01655 uvsrc_y = src_y>>1;
01656 }else if(s->out_format == FMT_H261){
01657 mx = motion_x / 4;
01658 my = motion_y / 4;
01659 uvsx = (2*mx) & s_mask;
01660 uvsy = (2*my) & s_mask;
01661 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
01662 uvsrc_y = mb_y*block_s + (my >> lowres);
01663 } else {
01664 if(s->chroma_y_shift){
01665 mx = motion_x / 2;
01666 my = motion_y / 2;
01667 uvsx = mx & s_mask;
01668 uvsy = my & s_mask;
01669 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
01670 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
01671 } else {
01672 if(s->chroma_x_shift){
01673
01674 mx = motion_x / 2;
01675 uvsx = mx & s_mask;
01676 uvsy = motion_y & s_mask;
01677 uvsrc_y = src_y;
01678 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
01679 } else {
01680
01681 uvsx = motion_x & s_mask;
01682 uvsy = motion_y & s_mask;
01683 uvsrc_x = src_x;
01684 uvsrc_y = src_y;
01685 }
01686 }
01687 }
01688
01689 ptr_y = ref_picture[0] + src_y * linesize + src_x;
01690 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01691 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01692
01693 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
01694 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01695 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
01696 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01697 ptr_y = s->edge_emu_buffer;
01698 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01699 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
01700 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
01701 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01702 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
01703 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01704 ptr_cb= uvbuf;
01705 ptr_cr= uvbuf+16;
01706 }
01707 }
01708
01709 if(bottom_field){
01710 dest_y += s->linesize;
01711 dest_cb+= s->uvlinesize;
01712 dest_cr+= s->uvlinesize;
01713 }
01714
01715 if(field_select){
01716 ptr_y += s->linesize;
01717 ptr_cb+= s->uvlinesize;
01718 ptr_cr+= s->uvlinesize;
01719 }
01720
01721 sx= (sx << 2) >> lowres;
01722 sy= (sy << 2) >> lowres;
01723 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
01724
01725 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01726 uvsx= (uvsx << 2) >> lowres;
01727 uvsy= (uvsy << 2) >> lowres;
01728 if(h >> s->chroma_y_shift){
01729 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01730 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01731 }
01732 }
01733
01734 }
01735
01736 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01737 uint8_t *dest_cb, uint8_t *dest_cr,
01738 uint8_t **ref_picture,
01739 h264_chroma_mc_func *pix_op,
01740 int mx, int my){
01741 const int lowres= s->avctx->lowres;
01742 const int op_index= FFMIN(lowres, 2);
01743 const int block_s= 8>>lowres;
01744 const int s_mask= (2<<lowres)-1;
01745 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
01746 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
01747 int emu=0, src_x, src_y, offset, sx, sy;
01748 uint8_t *ptr;
01749
01750 if(s->quarter_sample){
01751 mx/=2;
01752 my/=2;
01753 }
01754
01755
01756
01757 mx= ff_h263_round_chroma(mx);
01758 my= ff_h263_round_chroma(my);
01759
01760 sx= mx & s_mask;
01761 sy= my & s_mask;
01762 src_x = s->mb_x*block_s + (mx >> (lowres+1));
01763 src_y = s->mb_y*block_s + (my >> (lowres+1));
01764
01765 offset = src_y * s->uvlinesize + src_x;
01766 ptr = ref_picture[1] + offset;
01767 if(s->flags&CODEC_FLAG_EMU_EDGE){
01768 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
01769 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
01770 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01771 ptr= s->edge_emu_buffer;
01772 emu=1;
01773 }
01774 }
01775 sx= (sx << 2) >> lowres;
01776 sy= (sy << 2) >> lowres;
01777 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
01778
01779 ptr = ref_picture[2] + offset;
01780 if(emu){
01781 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01782 ptr= s->edge_emu_buffer;
01783 }
01784 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
01785 }
01786
01798 static inline void MPV_motion_lowres(MpegEncContext *s,
01799 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01800 int dir, uint8_t **ref_picture,
01801 h264_chroma_mc_func *pix_op)
01802 {
01803 int mx, my;
01804 int mb_x, mb_y, i;
01805 const int lowres= s->avctx->lowres;
01806 const int block_s= 8>>lowres;
01807
01808 mb_x = s->mb_x;
01809 mb_y = s->mb_y;
01810
01811 switch(s->mv_type) {
01812 case MV_TYPE_16X16:
01813 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01814 0, 0, 0,
01815 ref_picture, pix_op,
01816 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
01817 break;
01818 case MV_TYPE_8X8:
01819 mx = 0;
01820 my = 0;
01821 for(i=0;i<4;i++) {
01822 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
01823 ref_picture[0], 0, 0,
01824 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
01825 s->width, s->height, s->linesize,
01826 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
01827 block_s, block_s, pix_op,
01828 s->mv[dir][i][0], s->mv[dir][i][1]);
01829
01830 mx += s->mv[dir][i][0];
01831 my += s->mv[dir][i][1];
01832 }
01833
01834 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
01835 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
01836 break;
01837 case MV_TYPE_FIELD:
01838 if (s->picture_structure == PICT_FRAME) {
01839
01840 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01841 1, 0, s->field_select[dir][0],
01842 ref_picture, pix_op,
01843 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
01844
01845 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01846 1, 1, s->field_select[dir][1],
01847 ref_picture, pix_op,
01848 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
01849 } else {
01850 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
01851 ref_picture= s->current_picture_ptr->data;
01852 }
01853
01854 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01855 0, 0, s->field_select[dir][0],
01856 ref_picture, pix_op,
01857 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
01858 }
01859 break;
01860 case MV_TYPE_16X8:
01861 for(i=0; i<2; i++){
01862 uint8_t ** ref2picture;
01863
01864 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
01865 ref2picture= ref_picture;
01866 }else{
01867 ref2picture= s->current_picture_ptr->data;
01868 }
01869
01870 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01871 0, 0, s->field_select[dir][i],
01872 ref2picture, pix_op,
01873 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
01874
01875 dest_y += 2*block_s*s->linesize;
01876 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01877 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01878 }
01879 break;
01880 case MV_TYPE_DMV:
01881 if(s->picture_structure == PICT_FRAME){
01882 for(i=0; i<2; i++){
01883 int j;
01884 for(j=0; j<2; j++){
01885 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01886 1, j, j^i,
01887 ref_picture, pix_op,
01888 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
01889 }
01890 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01891 }
01892 }else{
01893 for(i=0; i<2; i++){
01894 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01895 0, 0, s->picture_structure != i+1,
01896 ref_picture, pix_op,
01897 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
01898
01899
01900 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01901
01902
01903 if(!s->first_field){
01904 ref_picture = s->current_picture_ptr->data;
01905 }
01906 }
01907 }
01908 break;
01909 default: assert(0);
01910 }
01911 }
01912
01916 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
01917 {
01918 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
01919 int my, off, i, mvs;
01920
01921 if (s->picture_structure != PICT_FRAME) goto unhandled;
01922
01923 switch (s->mv_type) {
01924 case MV_TYPE_16X16:
01925 mvs = 1;
01926 break;
01927 case MV_TYPE_16X8:
01928 mvs = 2;
01929 break;
01930 case MV_TYPE_8X8:
01931 mvs = 4;
01932 break;
01933 default:
01934 goto unhandled;
01935 }
01936
01937 for (i = 0; i < mvs; i++) {
01938 my = s->mv[dir][i][1]<<qpel_shift;
01939 my_max = FFMAX(my_max, my);
01940 my_min = FFMIN(my_min, my);
01941 }
01942
01943 off = (FFMAX(-my_min, my_max) + 63) >> 6;
01944
01945 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
01946 unhandled:
01947 return s->mb_height-1;
01948 }
01949
01950
01951 static inline void put_dct(MpegEncContext *s,
01952 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01953 {
01954 s->dct_unquantize_intra(s, block, i, qscale);
01955 s->dsp.idct_put (dest, line_size, block);
01956 }
01957
01958
01959 static inline void add_dct(MpegEncContext *s,
01960 DCTELEM *block, int i, uint8_t *dest, int line_size)
01961 {
01962 if (s->block_last_index[i] >= 0) {
01963 s->dsp.idct_add (dest, line_size, block);
01964 }
01965 }
01966
01967 static inline void add_dequant_dct(MpegEncContext *s,
01968 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01969 {
01970 if (s->block_last_index[i] >= 0) {
01971 s->dct_unquantize_inter(s, block, i, qscale);
01972
01973 s->dsp.idct_add (dest, line_size, block);
01974 }
01975 }
01976
01980 void ff_clean_intra_table_entries(MpegEncContext *s)
01981 {
01982 int wrap = s->b8_stride;
01983 int xy = s->block_index[0];
01984
01985 s->dc_val[0][xy ] =
01986 s->dc_val[0][xy + 1 ] =
01987 s->dc_val[0][xy + wrap] =
01988 s->dc_val[0][xy + 1 + wrap] = 1024;
01989
01990 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
01991 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
01992 if (s->msmpeg4_version>=3) {
01993 s->coded_block[xy ] =
01994 s->coded_block[xy + 1 ] =
01995 s->coded_block[xy + wrap] =
01996 s->coded_block[xy + 1 + wrap] = 0;
01997 }
01998
01999 wrap = s->mb_stride;
02000 xy = s->mb_x + s->mb_y * wrap;
02001 s->dc_val[1][xy] =
02002 s->dc_val[2][xy] = 1024;
02003
02004 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
02005 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
02006
02007 s->mbintra_table[xy]= 0;
02008 }
02009
02010
02011
02012
02013
02014
02015
02016
02017
02018
02019
02020 static av_always_inline
02021 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02022 int lowres_flag, int is_mpeg12)
02023 {
02024 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02025 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02026 ff_xvmc_decode_mb(s);
02027 return;
02028 }
02029
02030 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02031
02032 int i,j;
02033 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
02034 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02035 for(i=0; i<6; i++){
02036 for(j=0; j<64; j++){
02037 *dct++ = block[i][s->dsp.idct_permutation[j]];
02038 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02039 }
02040 av_log(s->avctx, AV_LOG_DEBUG, "\n");
02041 }
02042 }
02043
02044 s->current_picture.qscale_table[mb_xy]= s->qscale;
02045
02046
02047 if (!s->mb_intra) {
02048 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02049 if(s->mbintra_table[mb_xy])
02050 ff_clean_intra_table_entries(s);
02051 } else {
02052 s->last_dc[0] =
02053 s->last_dc[1] =
02054 s->last_dc[2] = 128 << s->intra_dc_precision;
02055 }
02056 }
02057 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02058 s->mbintra_table[mb_xy]=1;
02059
02060 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) {
02061 uint8_t *dest_y, *dest_cb, *dest_cr;
02062 int dct_linesize, dct_offset;
02063 op_pixels_func (*op_pix)[4];
02064 qpel_mc_func (*op_qpix)[16];
02065 const int linesize= s->current_picture.linesize[0];
02066 const int uvlinesize= s->current_picture.linesize[1];
02067 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02068 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02069
02070
02071
02072 if(!s->encoding){
02073 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02074 const int age= s->current_picture.age;
02075
02076 assert(age);
02077
02078 if (s->mb_skipped) {
02079 s->mb_skipped= 0;
02080 assert(s->pict_type!=AV_PICTURE_TYPE_I);
02081
02082 (*mbskip_ptr) ++;
02083 if(*mbskip_ptr >99) *mbskip_ptr= 99;
02084
02085
02086 if (*mbskip_ptr >= age && s->current_picture.reference){
02087 return;
02088 }
02089 } else if(!s->current_picture.reference){
02090 (*mbskip_ptr) ++;
02091 if(*mbskip_ptr >99) *mbskip_ptr= 99;
02092 } else{
02093 *mbskip_ptr = 0;
02094 }
02095 }
02096
02097 dct_linesize = linesize << s->interlaced_dct;
02098 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
02099
02100 if(readable){
02101 dest_y= s->dest[0];
02102 dest_cb= s->dest[1];
02103 dest_cr= s->dest[2];
02104 }else{
02105 dest_y = s->b_scratchpad;
02106 dest_cb= s->b_scratchpad+16*linesize;
02107 dest_cr= s->b_scratchpad+32*linesize;
02108 }
02109
02110 if (!s->mb_intra) {
02111
02112
02113 if(!s->encoding){
02114
02115 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02116 if (s->mv_dir & MV_DIR_FORWARD) {
02117 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
02118 }
02119 if (s->mv_dir & MV_DIR_BACKWARD) {
02120 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
02121 }
02122 }
02123
02124 if(lowres_flag){
02125 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02126
02127 if (s->mv_dir & MV_DIR_FORWARD) {
02128 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
02129 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02130 }
02131 if (s->mv_dir & MV_DIR_BACKWARD) {
02132 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
02133 }
02134 }else{
02135 op_qpix= s->me.qpel_put;
02136 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02137 op_pix = s->dsp.put_pixels_tab;
02138 }else{
02139 op_pix = s->dsp.put_no_rnd_pixels_tab;
02140 }
02141 if (s->mv_dir & MV_DIR_FORWARD) {
02142 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
02143 op_pix = s->dsp.avg_pixels_tab;
02144 op_qpix= s->me.qpel_avg;
02145 }
02146 if (s->mv_dir & MV_DIR_BACKWARD) {
02147 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
02148 }
02149 }
02150 }
02151
02152
02153 #if FF_API_HURRY_UP
02154 if(s->hurry_up>1) goto skip_idct;
02155 #endif
02156 if(s->avctx->skip_idct){
02157 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02158 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02159 || s->avctx->skip_idct >= AVDISCARD_ALL)
02160 goto skip_idct;
02161 }
02162
02163
02164 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
02165 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
02166 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02167 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02168 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02169 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02170
02171 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02172 if (s->chroma_y_shift){
02173 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02174 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02175 }else{
02176 dct_linesize >>= 1;
02177 dct_offset >>=1;
02178 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02179 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02180 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02181 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02182 }
02183 }
02184 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
02185 add_dct(s, block[0], 0, dest_y , dct_linesize);
02186 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
02187 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
02188 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02189
02190 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02191 if(s->chroma_y_shift){
02192 add_dct(s, block[4], 4, dest_cb, uvlinesize);
02193 add_dct(s, block[5], 5, dest_cr, uvlinesize);
02194 }else{
02195
02196 dct_linesize = uvlinesize << s->interlaced_dct;
02197 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
02198
02199 add_dct(s, block[4], 4, dest_cb, dct_linesize);
02200 add_dct(s, block[5], 5, dest_cr, dct_linesize);
02201 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02202 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02203 if(!s->chroma_x_shift){
02204 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
02205 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
02206 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
02207 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
02208 }
02209 }
02210 }
02211 }
02212 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02213 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02214 }
02215 } else {
02216
02217 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02218 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02219 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02220 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02221 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02222
02223 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02224 if(s->chroma_y_shift){
02225 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02226 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02227 }else{
02228 dct_offset >>=1;
02229 dct_linesize >>=1;
02230 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02231 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02232 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02233 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02234 }
02235 }
02236 }else{
02237 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
02238 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
02239 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
02240 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02241
02242 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02243 if(s->chroma_y_shift){
02244 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02245 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02246 }else{
02247
02248 dct_linesize = uvlinesize << s->interlaced_dct;
02249 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
02250
02251 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
02252 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
02253 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02254 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02255 if(!s->chroma_x_shift){
02256 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
02257 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
02258 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
02259 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
02260 }
02261 }
02262 }
02263 }
02264 }
02265 skip_idct:
02266 if(!readable){
02267 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
02268 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02269 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02270 }
02271 }
02272 }
02273
02274 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02275 #if !CONFIG_SMALL
02276 if(s->out_format == FMT_MPEG1) {
02277 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02278 else MPV_decode_mb_internal(s, block, 0, 1);
02279 } else
02280 #endif
02281 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02282 else MPV_decode_mb_internal(s, block, 0, 0);
02283 }
02284
02289 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02290 const int field_pic= s->picture_structure != PICT_FRAME;
02291 if(field_pic){
02292 h <<= 1;
02293 y <<= 1;
02294 }
02295
02296 if (!s->avctx->hwaccel
02297 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02298 && s->unrestricted_mv
02299 && s->current_picture.reference
02300 && !s->intra_only
02301 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02302 int sides = 0, edge_h;
02303 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
02304 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
02305 if (y==0) sides |= EDGE_TOP;
02306 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02307
02308 edge_h= FFMIN(h, s->v_edge_pos - y);
02309
02310 s->dsp.draw_edges(s->current_picture_ptr->data[0] + y *s->linesize,
02311 s->linesize, s->h_edge_pos, edge_h,
02312 EDGE_WIDTH, EDGE_WIDTH, sides);
02313 s->dsp.draw_edges(s->current_picture_ptr->data[1] + (y>>vshift)*s->uvlinesize,
02314 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02315 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02316 s->dsp.draw_edges(s->current_picture_ptr->data[2] + (y>>vshift)*s->uvlinesize,
02317 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
02318 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02319 }
02320
02321 h= FFMIN(h, s->avctx->height - y);
02322
02323 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02324
02325 if (s->avctx->draw_horiz_band) {
02326 AVFrame *src;
02327 int offset[4];
02328
02329 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02330 src= (AVFrame*)s->current_picture_ptr;
02331 else if(s->last_picture_ptr)
02332 src= (AVFrame*)s->last_picture_ptr;
02333 else
02334 return;
02335
02336 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02337 offset[0]=
02338 offset[1]=
02339 offset[2]=
02340 offset[3]= 0;
02341 }else{
02342 offset[0]= y * s->linesize;
02343 offset[1]=
02344 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02345 offset[3]= 0;
02346 }
02347
02348 emms_c();
02349
02350 s->avctx->draw_horiz_band(s->avctx, src, offset,
02351 y, s->picture_structure, h);
02352 }
02353 }
02354
02355 void ff_init_block_index(MpegEncContext *s){
02356 const int linesize= s->current_picture.linesize[0];
02357 const int uvlinesize= s->current_picture.linesize[1];
02358 const int mb_size= 4 - s->avctx->lowres;
02359
02360 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
02361 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
02362 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02363 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02364 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02365 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02366
02367
02368 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
02369 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02370 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02371
02372 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02373 {
02374 if(s->picture_structure==PICT_FRAME){
02375 s->dest[0] += s->mb_y * linesize << mb_size;
02376 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02377 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02378 }else{
02379 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
02380 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02381 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02382 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02383 }
02384 }
02385 }
02386
02387 void ff_mpeg_flush(AVCodecContext *avctx){
02388 int i;
02389 MpegEncContext *s = avctx->priv_data;
02390
02391 if(s==NULL || s->picture==NULL)
02392 return;
02393
02394 for(i=0; i<s->picture_count; i++){
02395 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
02396 || s->picture[i].type == FF_BUFFER_TYPE_USER))
02397 free_frame_buffer(s, &s->picture[i]);
02398 }
02399 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02400
02401 s->mb_x= s->mb_y= 0;
02402 s->closed_gop= 0;
02403
02404 s->parse_context.state= -1;
02405 s->parse_context.frame_start_found= 0;
02406 s->parse_context.overread= 0;
02407 s->parse_context.overread_index= 0;
02408 s->parse_context.index= 0;
02409 s->parse_context.last_index= 0;
02410 s->bitstream_buffer_size=0;
02411 s->pp_time=0;
02412 }
02413
02414 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02415 DCTELEM *block, int n, int qscale)
02416 {
02417 int i, level, nCoeffs;
02418 const uint16_t *quant_matrix;
02419
02420 nCoeffs= s->block_last_index[n];
02421
02422 if (n < 4)
02423 block[0] = block[0] * s->y_dc_scale;
02424 else
02425 block[0] = block[0] * s->c_dc_scale;
02426
02427 quant_matrix = s->intra_matrix;
02428 for(i=1;i<=nCoeffs;i++) {
02429 int j= s->intra_scantable.permutated[i];
02430 level = block[j];
02431 if (level) {
02432 if (level < 0) {
02433 level = -level;
02434 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02435 level = (level - 1) | 1;
02436 level = -level;
02437 } else {
02438 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02439 level = (level - 1) | 1;
02440 }
02441 block[j] = level;
02442 }
02443 }
02444 }
02445
02446 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02447 DCTELEM *block, int n, int qscale)
02448 {
02449 int i, level, nCoeffs;
02450 const uint16_t *quant_matrix;
02451
02452 nCoeffs= s->block_last_index[n];
02453
02454 quant_matrix = s->inter_matrix;
02455 for(i=0; i<=nCoeffs; i++) {
02456 int j= s->intra_scantable.permutated[i];
02457 level = block[j];
02458 if (level) {
02459 if (level < 0) {
02460 level = -level;
02461 level = (((level << 1) + 1) * qscale *
02462 ((int) (quant_matrix[j]))) >> 4;
02463 level = (level - 1) | 1;
02464 level = -level;
02465 } else {
02466 level = (((level << 1) + 1) * qscale *
02467 ((int) (quant_matrix[j]))) >> 4;
02468 level = (level - 1) | 1;
02469 }
02470 block[j] = level;
02471 }
02472 }
02473 }
02474
02475 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02476 DCTELEM *block, int n, int qscale)
02477 {
02478 int i, level, nCoeffs;
02479 const uint16_t *quant_matrix;
02480
02481 if(s->alternate_scan) nCoeffs= 63;
02482 else nCoeffs= s->block_last_index[n];
02483
02484 if (n < 4)
02485 block[0] = block[0] * s->y_dc_scale;
02486 else
02487 block[0] = block[0] * s->c_dc_scale;
02488 quant_matrix = s->intra_matrix;
02489 for(i=1;i<=nCoeffs;i++) {
02490 int j= s->intra_scantable.permutated[i];
02491 level = block[j];
02492 if (level) {
02493 if (level < 0) {
02494 level = -level;
02495 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02496 level = -level;
02497 } else {
02498 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02499 }
02500 block[j] = level;
02501 }
02502 }
02503 }
02504
02505 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02506 DCTELEM *block, int n, int qscale)
02507 {
02508 int i, level, nCoeffs;
02509 const uint16_t *quant_matrix;
02510 int sum=-1;
02511
02512 if(s->alternate_scan) nCoeffs= 63;
02513 else nCoeffs= s->block_last_index[n];
02514
02515 if (n < 4)
02516 block[0] = block[0] * s->y_dc_scale;
02517 else
02518 block[0] = block[0] * s->c_dc_scale;
02519 quant_matrix = s->intra_matrix;
02520 for(i=1;i<=nCoeffs;i++) {
02521 int j= s->intra_scantable.permutated[i];
02522 level = block[j];
02523 if (level) {
02524 if (level < 0) {
02525 level = -level;
02526 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02527 level = -level;
02528 } else {
02529 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02530 }
02531 block[j] = level;
02532 sum+=level;
02533 }
02534 }
02535 block[63]^=sum&1;
02536 }
02537
02538 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02539 DCTELEM *block, int n, int qscale)
02540 {
02541 int i, level, nCoeffs;
02542 const uint16_t *quant_matrix;
02543 int sum=-1;
02544
02545 if(s->alternate_scan) nCoeffs= 63;
02546 else nCoeffs= s->block_last_index[n];
02547
02548 quant_matrix = s->inter_matrix;
02549 for(i=0; i<=nCoeffs; i++) {
02550 int j= s->intra_scantable.permutated[i];
02551 level = block[j];
02552 if (level) {
02553 if (level < 0) {
02554 level = -level;
02555 level = (((level << 1) + 1) * qscale *
02556 ((int) (quant_matrix[j]))) >> 4;
02557 level = -level;
02558 } else {
02559 level = (((level << 1) + 1) * qscale *
02560 ((int) (quant_matrix[j]))) >> 4;
02561 }
02562 block[j] = level;
02563 sum+=level;
02564 }
02565 }
02566 block[63]^=sum&1;
02567 }
02568
02569 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02570 DCTELEM *block, int n, int qscale)
02571 {
02572 int i, level, qmul, qadd;
02573 int nCoeffs;
02574
02575 assert(s->block_last_index[n]>=0);
02576
02577 qmul = qscale << 1;
02578
02579 if (!s->h263_aic) {
02580 if (n < 4)
02581 block[0] = block[0] * s->y_dc_scale;
02582 else
02583 block[0] = block[0] * s->c_dc_scale;
02584 qadd = (qscale - 1) | 1;
02585 }else{
02586 qadd = 0;
02587 }
02588 if(s->ac_pred)
02589 nCoeffs=63;
02590 else
02591 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02592
02593 for(i=1; i<=nCoeffs; i++) {
02594 level = block[i];
02595 if (level) {
02596 if (level < 0) {
02597 level = level * qmul - qadd;
02598 } else {
02599 level = level * qmul + qadd;
02600 }
02601 block[i] = level;
02602 }
02603 }
02604 }
02605
02606 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02607 DCTELEM *block, int n, int qscale)
02608 {
02609 int i, level, qmul, qadd;
02610 int nCoeffs;
02611
02612 assert(s->block_last_index[n]>=0);
02613
02614 qadd = (qscale - 1) | 1;
02615 qmul = qscale << 1;
02616
02617 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02618
02619 for(i=0; i<=nCoeffs; i++) {
02620 level = block[i];
02621 if (level) {
02622 if (level < 0) {
02623 level = level * qmul - qadd;
02624 } else {
02625 level = level * qmul + qadd;
02626 }
02627 block[i] = level;
02628 }
02629 }
02630 }
02631
02635 void ff_set_qscale(MpegEncContext * s, int qscale)
02636 {
02637 if (qscale < 1)
02638 qscale = 1;
02639 else if (qscale > 31)
02640 qscale = 31;
02641
02642 s->qscale = qscale;
02643 s->chroma_qscale= s->chroma_qscale_table[qscale];
02644
02645 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02646 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02647 }
02648
02649 void MPV_report_decode_progress(MpegEncContext *s)
02650 {
02651 if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred)
02652 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
02653 }