00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027 #define RC_VARIANCE 1 // use variance or ssd for fast rc
00028
00029 #include "libavutil/opt.h"
00030 #include "avcodec.h"
00031 #include "dsputil.h"
00032 #include "mpegvideo.h"
00033 #include "mpegvideo_common.h"
00034 #include "dnxhdenc.h"
00035
00036 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
00037 #define DNX10BIT_QMAT_SHIFT 18 // The largest value that will not lead to overflow for 10bit samples.
00038
00039 static const AVOption options[]={
00040 {"nitris_compat", "encode with Avid Nitris compatibility", offsetof(DNXHDEncContext, nitris_compat), AV_OPT_TYPE_INT, {.dbl = 0}, 0, 1, VE},
00041 {NULL}
00042 };
00043 static const AVClass class = { "dnxhd", av_default_item_name, options, LIBAVUTIL_VERSION_INT };
00044
00045 #define LAMBDA_FRAC_BITS 10
00046
00047 static void dnxhd_8bit_get_pixels_8x4_sym(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
00048 {
00049 int i;
00050 for (i = 0; i < 4; i++) {
00051 block[0] = pixels[0]; block[1] = pixels[1];
00052 block[2] = pixels[2]; block[3] = pixels[3];
00053 block[4] = pixels[4]; block[5] = pixels[5];
00054 block[6] = pixels[6]; block[7] = pixels[7];
00055 pixels += line_size;
00056 block += 8;
00057 }
00058 memcpy(block, block - 8, sizeof(*block) * 8);
00059 memcpy(block + 8, block - 16, sizeof(*block) * 8);
00060 memcpy(block + 16, block - 24, sizeof(*block) * 8);
00061 memcpy(block + 24, block - 32, sizeof(*block) * 8);
00062 }
00063
00064 static av_always_inline void dnxhd_10bit_get_pixels_8x4_sym(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
00065 {
00066 int i;
00067
00068 block += 32;
00069
00070 for (i = 0; i < 4; i++) {
00071 memcpy(block + i * 8, pixels + i * line_size, 8 * sizeof(*block));
00072 memcpy(block - (i+1) * 8, pixels + i * line_size, 8 * sizeof(*block));
00073 }
00074 }
00075
00076 static int dnxhd_10bit_dct_quantize(MpegEncContext *ctx, DCTELEM *block,
00077 int n, int qscale, int *overflow)
00078 {
00079 const uint8_t *scantable= ctx->intra_scantable.scantable;
00080 const int *qmat = n<4 ? ctx->q_intra_matrix[qscale] : ctx->q_chroma_intra_matrix[qscale];
00081 int last_non_zero = 0;
00082 int i;
00083
00084 ctx->dsp.fdct(block);
00085
00086
00087 block[0] = (block[0] + 2) >> 2;
00088
00089 for (i = 1; i < 64; ++i) {
00090 int j = scantable[i];
00091 int sign = block[j] >> 31;
00092 int level = (block[j] ^ sign) - sign;
00093 level = level * qmat[j] >> DNX10BIT_QMAT_SHIFT;
00094 block[j] = (level ^ sign) - sign;
00095 if (level)
00096 last_non_zero = i;
00097 }
00098
00099 return last_non_zero;
00100 }
00101
00102 static int dnxhd_init_vlc(DNXHDEncContext *ctx)
00103 {
00104 int i, j, level, run;
00105 int max_level = 1<<(ctx->cid_table->bit_depth+2);
00106
00107 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->vlc_codes, max_level*4*sizeof(*ctx->vlc_codes), fail);
00108 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->vlc_bits, max_level*4*sizeof(*ctx->vlc_bits) , fail);
00109 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_codes, 63*2, fail);
00110 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_bits, 63, fail);
00111
00112 ctx->vlc_codes += max_level*2;
00113 ctx->vlc_bits += max_level*2;
00114 for (level = -max_level; level < max_level; level++) {
00115 for (run = 0; run < 2; run++) {
00116 int index = (level<<1)|run;
00117 int sign, offset = 0, alevel = level;
00118
00119 MASK_ABS(sign, alevel);
00120 if (alevel > 64) {
00121 offset = (alevel-1)>>6;
00122 alevel -= offset<<6;
00123 }
00124 for (j = 0; j < 257; j++) {
00125 if (ctx->cid_table->ac_level[j] >> 1 == alevel &&
00126 (!offset || (ctx->cid_table->ac_flags[j] & 1) && offset) &&
00127 (!run || (ctx->cid_table->ac_flags[j] & 2) && run)) {
00128 assert(!ctx->vlc_codes[index]);
00129 if (alevel) {
00130 ctx->vlc_codes[index] = (ctx->cid_table->ac_codes[j]<<1)|(sign&1);
00131 ctx->vlc_bits [index] = ctx->cid_table->ac_bits[j]+1;
00132 } else {
00133 ctx->vlc_codes[index] = ctx->cid_table->ac_codes[j];
00134 ctx->vlc_bits [index] = ctx->cid_table->ac_bits [j];
00135 }
00136 break;
00137 }
00138 }
00139 assert(!alevel || j < 257);
00140 if (offset) {
00141 ctx->vlc_codes[index] = (ctx->vlc_codes[index]<<ctx->cid_table->index_bits)|offset;
00142 ctx->vlc_bits [index]+= ctx->cid_table->index_bits;
00143 }
00144 }
00145 }
00146 for (i = 0; i < 62; i++) {
00147 int run = ctx->cid_table->run[i];
00148 assert(run < 63);
00149 ctx->run_codes[run] = ctx->cid_table->run_codes[i];
00150 ctx->run_bits [run] = ctx->cid_table->run_bits[i];
00151 }
00152 return 0;
00153 fail:
00154 return -1;
00155 }
00156
00157 static int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
00158 {
00159
00160 uint16_t weight_matrix[64] = {1,};
00161 int qscale, i;
00162 const uint8_t *luma_weight_table = ctx->cid_table->luma_weight;
00163 const uint8_t *chroma_weight_table = ctx->cid_table->chroma_weight;
00164
00165 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_l, (ctx->m.avctx->qmax+1) * 64 * sizeof(int), fail);
00166 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_c, (ctx->m.avctx->qmax+1) * 64 * sizeof(int), fail);
00167 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_l16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t), fail);
00168 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->qmatrix_c16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t), fail);
00169
00170 if (ctx->cid_table->bit_depth == 8) {
00171 for (i = 1; i < 64; i++) {
00172 int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
00173 weight_matrix[j] = ctx->cid_table->luma_weight[i];
00174 }
00175 ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_l, ctx->qmatrix_l16, weight_matrix,
00176 ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1);
00177 for (i = 1; i < 64; i++) {
00178 int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
00179 weight_matrix[j] = ctx->cid_table->chroma_weight[i];
00180 }
00181 ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_c, ctx->qmatrix_c16, weight_matrix,
00182 ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1);
00183
00184 for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) {
00185 for (i = 0; i < 64; i++) {
00186 ctx->qmatrix_l [qscale] [i] <<= 2; ctx->qmatrix_c [qscale] [i] <<= 2;
00187 ctx->qmatrix_l16[qscale][0][i] <<= 2; ctx->qmatrix_l16[qscale][1][i] <<= 2;
00188 ctx->qmatrix_c16[qscale][0][i] <<= 2; ctx->qmatrix_c16[qscale][1][i] <<= 2;
00189 }
00190 }
00191 } else {
00192
00193 for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) {
00194 for (i = 1; i < 64; i++) {
00195 int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205 ctx->qmatrix_l[qscale][j] = (1 << (DNX10BIT_QMAT_SHIFT + 1)) / (qscale * luma_weight_table[i]);
00206 ctx->qmatrix_c[qscale][j] = (1 << (DNX10BIT_QMAT_SHIFT + 1)) / (qscale * chroma_weight_table[i]);
00207 }
00208 }
00209 }
00210
00211 ctx->m.q_chroma_intra_matrix16 = ctx->qmatrix_c16;
00212 ctx->m.q_chroma_intra_matrix = ctx->qmatrix_c;
00213 ctx->m.q_intra_matrix16 = ctx->qmatrix_l16;
00214 ctx->m.q_intra_matrix = ctx->qmatrix_l;
00215
00216 return 0;
00217 fail:
00218 return -1;
00219 }
00220
00221 static int dnxhd_init_rc(DNXHDEncContext *ctx)
00222 {
00223 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_rc, 8160*ctx->m.avctx->qmax*sizeof(RCEntry), fail);
00224 if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD)
00225 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_cmp, ctx->m.mb_num*sizeof(RCCMPEntry), fail);
00226
00227 ctx->frame_bits = (ctx->cid_table->coding_unit_size - 640 - 4 - ctx->min_padding) * 8;
00228 ctx->qscale = 1;
00229 ctx->lambda = 2<<LAMBDA_FRAC_BITS;
00230 return 0;
00231 fail:
00232 return -1;
00233 }
00234
00235 static int dnxhd_encode_init(AVCodecContext *avctx)
00236 {
00237 DNXHDEncContext *ctx = avctx->priv_data;
00238 int i, index, bit_depth;
00239
00240 switch (avctx->pix_fmt) {
00241 case PIX_FMT_YUV422P:
00242 bit_depth = 8;
00243 break;
00244 case PIX_FMT_YUV422P10:
00245 bit_depth = 10;
00246 break;
00247 default:
00248 av_log(avctx, AV_LOG_ERROR, "pixel format is incompatible with DNxHD\n");
00249 return -1;
00250 }
00251
00252 ctx->cid = ff_dnxhd_find_cid(avctx, bit_depth);
00253 if (!ctx->cid) {
00254 av_log(avctx, AV_LOG_ERROR, "video parameters incompatible with DNxHD\n");
00255 return -1;
00256 }
00257 av_log(avctx, AV_LOG_DEBUG, "cid %d\n", ctx->cid);
00258
00259 index = ff_dnxhd_get_cid_table(ctx->cid);
00260 ctx->cid_table = &ff_dnxhd_cid_table[index];
00261
00262 ctx->m.avctx = avctx;
00263 ctx->m.mb_intra = 1;
00264 ctx->m.h263_aic = 1;
00265
00266 avctx->bits_per_raw_sample = ctx->cid_table->bit_depth;
00267
00268 dsputil_init(&ctx->m.dsp, avctx);
00269 ff_dct_common_init(&ctx->m);
00270 if (!ctx->m.dct_quantize)
00271 ctx->m.dct_quantize = dct_quantize_c;
00272
00273 if (ctx->cid_table->bit_depth == 10) {
00274 ctx->m.dct_quantize = dnxhd_10bit_dct_quantize;
00275 ctx->get_pixels_8x4_sym = dnxhd_10bit_get_pixels_8x4_sym;
00276 ctx->block_width_l2 = 4;
00277 } else {
00278 ctx->get_pixels_8x4_sym = dnxhd_8bit_get_pixels_8x4_sym;
00279 ctx->block_width_l2 = 3;
00280 }
00281
00282 #if HAVE_MMX
00283 ff_dnxhd_init_mmx(ctx);
00284 #endif
00285
00286 ctx->m.mb_height = (avctx->height + 15) / 16;
00287 ctx->m.mb_width = (avctx->width + 15) / 16;
00288
00289 if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
00290 ctx->interlaced = 1;
00291 ctx->m.mb_height /= 2;
00292 }
00293
00294 ctx->m.mb_num = ctx->m.mb_height * ctx->m.mb_width;
00295
00296 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
00297 ctx->m.intra_quant_bias = avctx->intra_quant_bias;
00298 if (dnxhd_init_qmat(ctx, ctx->m.intra_quant_bias, 0) < 0)
00299 return -1;
00300
00301
00302 if (ctx->nitris_compat)
00303 ctx->min_padding = 1600;
00304
00305 if (dnxhd_init_vlc(ctx) < 0)
00306 return -1;
00307 if (dnxhd_init_rc(ctx) < 0)
00308 return -1;
00309
00310 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_size, ctx->m.mb_height*sizeof(uint32_t), fail);
00311 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->slice_offs, ctx->m.mb_height*sizeof(uint32_t), fail);
00312 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_bits, ctx->m.mb_num *sizeof(uint16_t), fail);
00313 FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t), fail);
00314
00315 ctx->frame.key_frame = 1;
00316 ctx->frame.pict_type = AV_PICTURE_TYPE_I;
00317 ctx->m.avctx->coded_frame = &ctx->frame;
00318
00319 if (avctx->thread_count > MAX_THREADS) {
00320 av_log(avctx, AV_LOG_ERROR, "too many threads\n");
00321 return -1;
00322 }
00323
00324 ctx->thread[0] = ctx;
00325 for (i = 1; i < avctx->thread_count; i++) {
00326 ctx->thread[i] = av_malloc(sizeof(DNXHDEncContext));
00327 memcpy(ctx->thread[i], ctx, sizeof(DNXHDEncContext));
00328 }
00329
00330 return 0;
00331 fail:
00332 return -1;
00333 }
00334
00335 static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
00336 {
00337 DNXHDEncContext *ctx = avctx->priv_data;
00338 const uint8_t header_prefix[5] = { 0x00,0x00,0x02,0x80,0x01 };
00339
00340 memset(buf, 0, 640);
00341
00342 memcpy(buf, header_prefix, 5);
00343 buf[5] = ctx->interlaced ? ctx->cur_field+2 : 0x01;
00344 buf[6] = 0x80;
00345 buf[7] = 0xa0;
00346 AV_WB16(buf + 0x18, avctx->height>>ctx->interlaced);
00347 AV_WB16(buf + 0x1a, avctx->width);
00348 AV_WB16(buf + 0x1d, avctx->height>>ctx->interlaced);
00349
00350 buf[0x21] = ctx->cid_table->bit_depth == 10 ? 0x58 : 0x38;
00351 buf[0x22] = 0x88 + (ctx->interlaced<<2);
00352 AV_WB32(buf + 0x28, ctx->cid);
00353 buf[0x2c] = ctx->interlaced ? 0 : 0x80;
00354
00355 buf[0x5f] = 0x01;
00356
00357 buf[0x167] = 0x02;
00358 AV_WB16(buf + 0x16a, ctx->m.mb_height * 4 + 4);
00359 buf[0x16d] = ctx->m.mb_height;
00360 buf[0x16f] = 0x10;
00361
00362 ctx->msip = buf + 0x170;
00363 return 0;
00364 }
00365
00366 static av_always_inline void dnxhd_encode_dc(DNXHDEncContext *ctx, int diff)
00367 {
00368 int nbits;
00369 if (diff < 0) {
00370 nbits = av_log2_16bit(-2*diff);
00371 diff--;
00372 } else {
00373 nbits = av_log2_16bit(2*diff);
00374 }
00375 put_bits(&ctx->m.pb, ctx->cid_table->dc_bits[nbits] + nbits,
00376 (ctx->cid_table->dc_codes[nbits]<<nbits) + (diff & ((1 << nbits) - 1)));
00377 }
00378
00379 static av_always_inline void dnxhd_encode_block(DNXHDEncContext *ctx, DCTELEM *block, int last_index, int n)
00380 {
00381 int last_non_zero = 0;
00382 int slevel, i, j;
00383
00384 dnxhd_encode_dc(ctx, block[0] - ctx->m.last_dc[n]);
00385 ctx->m.last_dc[n] = block[0];
00386
00387 for (i = 1; i <= last_index; i++) {
00388 j = ctx->m.intra_scantable.permutated[i];
00389 slevel = block[j];
00390 if (slevel) {
00391 int run_level = i - last_non_zero - 1;
00392 int rlevel = (slevel<<1)|!!run_level;
00393 put_bits(&ctx->m.pb, ctx->vlc_bits[rlevel], ctx->vlc_codes[rlevel]);
00394 if (run_level)
00395 put_bits(&ctx->m.pb, ctx->run_bits[run_level], ctx->run_codes[run_level]);
00396 last_non_zero = i;
00397 }
00398 }
00399 put_bits(&ctx->m.pb, ctx->vlc_bits[0], ctx->vlc_codes[0]);
00400 }
00401
00402 static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, DCTELEM *block, int n, int qscale, int last_index)
00403 {
00404 const uint8_t *weight_matrix;
00405 int level;
00406 int i;
00407
00408 weight_matrix = (n&2) ? ctx->cid_table->chroma_weight : ctx->cid_table->luma_weight;
00409
00410 for (i = 1; i <= last_index; i++) {
00411 int j = ctx->m.intra_scantable.permutated[i];
00412 level = block[j];
00413 if (level) {
00414 if (level < 0) {
00415 level = (1-2*level) * qscale * weight_matrix[i];
00416 if (ctx->cid_table->bit_depth == 10) {
00417 if (weight_matrix[i] != 8)
00418 level += 8;
00419 level >>= 4;
00420 } else {
00421 if (weight_matrix[i] != 32)
00422 level += 32;
00423 level >>= 6;
00424 }
00425 level = -level;
00426 } else {
00427 level = (2*level+1) * qscale * weight_matrix[i];
00428 if (ctx->cid_table->bit_depth == 10) {
00429 if (weight_matrix[i] != 8)
00430 level += 8;
00431 level >>= 4;
00432 } else {
00433 if (weight_matrix[i] != 32)
00434 level += 32;
00435 level >>= 6;
00436 }
00437 }
00438 block[j] = level;
00439 }
00440 }
00441 }
00442
00443 static av_always_inline int dnxhd_ssd_block(DCTELEM *qblock, DCTELEM *block)
00444 {
00445 int score = 0;
00446 int i;
00447 for (i = 0; i < 64; i++)
00448 score += (block[i] - qblock[i]) * (block[i] - qblock[i]);
00449 return score;
00450 }
00451
00452 static av_always_inline int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, DCTELEM *block, int last_index)
00453 {
00454 int last_non_zero = 0;
00455 int bits = 0;
00456 int i, j, level;
00457 for (i = 1; i <= last_index; i++) {
00458 j = ctx->m.intra_scantable.permutated[i];
00459 level = block[j];
00460 if (level) {
00461 int run_level = i - last_non_zero - 1;
00462 bits += ctx->vlc_bits[(level<<1)|!!run_level]+ctx->run_bits[run_level];
00463 last_non_zero = i;
00464 }
00465 }
00466 return bits;
00467 }
00468
00469 static av_always_inline void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
00470 {
00471 const int bs = ctx->block_width_l2;
00472 const int bw = 1 << bs;
00473 const uint8_t *ptr_y = ctx->thread[0]->src[0] + ((mb_y << 4) * ctx->m.linesize) + (mb_x << bs+1);
00474 const uint8_t *ptr_u = ctx->thread[0]->src[1] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << bs);
00475 const uint8_t *ptr_v = ctx->thread[0]->src[2] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << bs);
00476 DSPContext *dsp = &ctx->m.dsp;
00477
00478 dsp->get_pixels(ctx->blocks[0], ptr_y, ctx->m.linesize);
00479 dsp->get_pixels(ctx->blocks[1], ptr_y + bw, ctx->m.linesize);
00480 dsp->get_pixels(ctx->blocks[2], ptr_u, ctx->m.uvlinesize);
00481 dsp->get_pixels(ctx->blocks[3], ptr_v, ctx->m.uvlinesize);
00482
00483 if (mb_y+1 == ctx->m.mb_height && ctx->m.avctx->height == 1080) {
00484 if (ctx->interlaced) {
00485 ctx->get_pixels_8x4_sym(ctx->blocks[4], ptr_y + ctx->dct_y_offset, ctx->m.linesize);
00486 ctx->get_pixels_8x4_sym(ctx->blocks[5], ptr_y + ctx->dct_y_offset + bw, ctx->m.linesize);
00487 ctx->get_pixels_8x4_sym(ctx->blocks[6], ptr_u + ctx->dct_uv_offset, ctx->m.uvlinesize);
00488 ctx->get_pixels_8x4_sym(ctx->blocks[7], ptr_v + ctx->dct_uv_offset, ctx->m.uvlinesize);
00489 } else {
00490 dsp->clear_block(ctx->blocks[4]);
00491 dsp->clear_block(ctx->blocks[5]);
00492 dsp->clear_block(ctx->blocks[6]);
00493 dsp->clear_block(ctx->blocks[7]);
00494 }
00495 } else {
00496 dsp->get_pixels(ctx->blocks[4], ptr_y + ctx->dct_y_offset, ctx->m.linesize);
00497 dsp->get_pixels(ctx->blocks[5], ptr_y + ctx->dct_y_offset + bw, ctx->m.linesize);
00498 dsp->get_pixels(ctx->blocks[6], ptr_u + ctx->dct_uv_offset, ctx->m.uvlinesize);
00499 dsp->get_pixels(ctx->blocks[7], ptr_v + ctx->dct_uv_offset, ctx->m.uvlinesize);
00500 }
00501 }
00502
00503 static av_always_inline int dnxhd_switch_matrix(DNXHDEncContext *ctx, int i)
00504 {
00505 const static uint8_t component[8]={0,0,1,2,0,0,1,2};
00506 return component[i];
00507 }
00508
00509 static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
00510 {
00511 DNXHDEncContext *ctx = avctx->priv_data;
00512 int mb_y = jobnr, mb_x;
00513 int qscale = ctx->qscale;
00514 LOCAL_ALIGNED_16(DCTELEM, block, [64]);
00515 ctx = ctx->thread[threadnr];
00516
00517 ctx->m.last_dc[0] =
00518 ctx->m.last_dc[1] =
00519 ctx->m.last_dc[2] = 1 << (ctx->cid_table->bit_depth + 2);
00520
00521 for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
00522 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00523 int ssd = 0;
00524 int ac_bits = 0;
00525 int dc_bits = 0;
00526 int i;
00527
00528 dnxhd_get_blocks(ctx, mb_x, mb_y);
00529
00530 for (i = 0; i < 8; i++) {
00531 DCTELEM *src_block = ctx->blocks[i];
00532 int overflow, nbits, diff, last_index;
00533 int n = dnxhd_switch_matrix(ctx, i);
00534
00535 memcpy(block, src_block, 64*sizeof(*block));
00536 last_index = ctx->m.dct_quantize(&ctx->m, block, 4&(2*i), qscale, &overflow);
00537 ac_bits += dnxhd_calc_ac_bits(ctx, block, last_index);
00538
00539 diff = block[0] - ctx->m.last_dc[n];
00540 if (diff < 0) nbits = av_log2_16bit(-2*diff);
00541 else nbits = av_log2_16bit( 2*diff);
00542
00543 assert(nbits < ctx->cid_table->bit_depth + 4);
00544 dc_bits += ctx->cid_table->dc_bits[nbits] + nbits;
00545
00546 ctx->m.last_dc[n] = block[0];
00547
00548 if (avctx->mb_decision == FF_MB_DECISION_RD || !RC_VARIANCE) {
00549 dnxhd_unquantize_c(ctx, block, i, qscale, last_index);
00550 ctx->m.dsp.idct(block);
00551 ssd += dnxhd_ssd_block(block, src_block);
00552 }
00553 }
00554 ctx->mb_rc[qscale][mb].ssd = ssd;
00555 ctx->mb_rc[qscale][mb].bits = ac_bits+dc_bits+12+8*ctx->vlc_bits[0];
00556 }
00557 return 0;
00558 }
00559
00560 static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
00561 {
00562 DNXHDEncContext *ctx = avctx->priv_data;
00563 int mb_y = jobnr, mb_x;
00564 ctx = ctx->thread[threadnr];
00565 init_put_bits(&ctx->m.pb, (uint8_t *)arg + 640 + ctx->slice_offs[jobnr], ctx->slice_size[jobnr]);
00566
00567 ctx->m.last_dc[0] =
00568 ctx->m.last_dc[1] =
00569 ctx->m.last_dc[2] = 1 << (ctx->cid_table->bit_depth + 2);
00570 for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
00571 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00572 int qscale = ctx->mb_qscale[mb];
00573 int i;
00574
00575 put_bits(&ctx->m.pb, 12, qscale<<1);
00576
00577 dnxhd_get_blocks(ctx, mb_x, mb_y);
00578
00579 for (i = 0; i < 8; i++) {
00580 DCTELEM *block = ctx->blocks[i];
00581 int last_index, overflow;
00582 int n = dnxhd_switch_matrix(ctx, i);
00583 last_index = ctx->m.dct_quantize(&ctx->m, block, 4&(2*i), qscale, &overflow);
00584
00585 dnxhd_encode_block(ctx, block, last_index, n);
00586
00587 }
00588 }
00589 if (put_bits_count(&ctx->m.pb)&31)
00590 put_bits(&ctx->m.pb, 32-(put_bits_count(&ctx->m.pb)&31), 0);
00591 flush_put_bits(&ctx->m.pb);
00592 return 0;
00593 }
00594
00595 static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx)
00596 {
00597 int mb_y, mb_x;
00598 int offset = 0;
00599 for (mb_y = 0; mb_y < ctx->m.mb_height; mb_y++) {
00600 int thread_size;
00601 ctx->slice_offs[mb_y] = offset;
00602 ctx->slice_size[mb_y] = 0;
00603 for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
00604 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00605 ctx->slice_size[mb_y] += ctx->mb_bits[mb];
00606 }
00607 ctx->slice_size[mb_y] = (ctx->slice_size[mb_y]+31)&~31;
00608 ctx->slice_size[mb_y] >>= 3;
00609 thread_size = ctx->slice_size[mb_y];
00610 offset += thread_size;
00611 }
00612 }
00613
00614 static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
00615 {
00616 DNXHDEncContext *ctx = avctx->priv_data;
00617 int mb_y = jobnr, mb_x;
00618 ctx = ctx->thread[threadnr];
00619 if (ctx->cid_table->bit_depth == 8) {
00620 uint8_t *pix = ctx->thread[0]->src[0] + ((mb_y<<4) * ctx->m.linesize);
00621 for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x, pix += 16) {
00622 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00623 int sum = ctx->m.dsp.pix_sum(pix, ctx->m.linesize);
00624 int varc = (ctx->m.dsp.pix_norm1(pix, ctx->m.linesize) - (((unsigned)sum*sum)>>8)+128)>>8;
00625 ctx->mb_cmp[mb].value = varc;
00626 ctx->mb_cmp[mb].mb = mb;
00627 }
00628 } else {
00629 int const linesize = ctx->m.linesize >> 1;
00630 for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x) {
00631 uint16_t *pix = (uint16_t*)ctx->thread[0]->src[0] + ((mb_y << 4) * linesize) + (mb_x << 4);
00632 unsigned mb = mb_y * ctx->m.mb_width + mb_x;
00633 int sum = 0;
00634 int sqsum = 0;
00635 int mean, sqmean;
00636 int i, j;
00637
00638 for (i = 0; i < 16; ++i) {
00639 for (j = 0; j < 16; ++j) {
00640
00641 int const sample = (unsigned)pix[j] >> 6;
00642 sum += sample;
00643 sqsum += sample * sample;
00644
00645 }
00646 pix += linesize;
00647 }
00648 mean = sum >> 8;
00649 sqmean = sqsum >> 8;
00650 ctx->mb_cmp[mb].value = sqmean - mean * mean;
00651 ctx->mb_cmp[mb].mb = mb;
00652 }
00653 }
00654 return 0;
00655 }
00656
00657 static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
00658 {
00659 int lambda, up_step, down_step;
00660 int last_lower = INT_MAX, last_higher = 0;
00661 int x, y, q;
00662
00663 for (q = 1; q < avctx->qmax; q++) {
00664 ctx->qscale = q;
00665 avctx->execute2(avctx, dnxhd_calc_bits_thread, NULL, NULL, ctx->m.mb_height);
00666 }
00667 up_step = down_step = 2<<LAMBDA_FRAC_BITS;
00668 lambda = ctx->lambda;
00669
00670 for (;;) {
00671 int bits = 0;
00672 int end = 0;
00673 if (lambda == last_higher) {
00674 lambda++;
00675 end = 1;
00676 }
00677 for (y = 0; y < ctx->m.mb_height; y++) {
00678 for (x = 0; x < ctx->m.mb_width; x++) {
00679 unsigned min = UINT_MAX;
00680 int qscale = 1;
00681 int mb = y*ctx->m.mb_width+x;
00682 for (q = 1; q < avctx->qmax; q++) {
00683 unsigned score = ctx->mb_rc[q][mb].bits*lambda+
00684 ((unsigned)ctx->mb_rc[q][mb].ssd<<LAMBDA_FRAC_BITS);
00685 if (score < min) {
00686 min = score;
00687 qscale = q;
00688 }
00689 }
00690 bits += ctx->mb_rc[qscale][mb].bits;
00691 ctx->mb_qscale[mb] = qscale;
00692 ctx->mb_bits[mb] = ctx->mb_rc[qscale][mb].bits;
00693 }
00694 bits = (bits+31)&~31;
00695 if (bits > ctx->frame_bits)
00696 break;
00697 }
00698
00699
00700 if (end) {
00701 if (bits > ctx->frame_bits)
00702 return -1;
00703 break;
00704 }
00705 if (bits < ctx->frame_bits) {
00706 last_lower = FFMIN(lambda, last_lower);
00707 if (last_higher != 0)
00708 lambda = (lambda+last_higher)>>1;
00709 else
00710 lambda -= down_step;
00711 down_step = FFMIN((int64_t)down_step*5, INT_MAX);
00712 up_step = 1<<LAMBDA_FRAC_BITS;
00713 lambda = FFMAX(1, lambda);
00714 if (lambda == last_lower)
00715 break;
00716 } else {
00717 last_higher = FFMAX(lambda, last_higher);
00718 if (last_lower != INT_MAX)
00719 lambda = (lambda+last_lower)>>1;
00720 else if ((int64_t)lambda + up_step > INT_MAX)
00721 return -1;
00722 else
00723 lambda += up_step;
00724 up_step = FFMIN((int64_t)up_step*5, INT_MAX);
00725 down_step = 1<<LAMBDA_FRAC_BITS;
00726 }
00727 }
00728
00729 ctx->lambda = lambda;
00730 return 0;
00731 }
00732
00733 static int dnxhd_find_qscale(DNXHDEncContext *ctx)
00734 {
00735 int bits = 0;
00736 int up_step = 1;
00737 int down_step = 1;
00738 int last_higher = 0;
00739 int last_lower = INT_MAX;
00740 int qscale;
00741 int x, y;
00742
00743 qscale = ctx->qscale;
00744 for (;;) {
00745 bits = 0;
00746 ctx->qscale = qscale;
00747
00748 ctx->m.avctx->execute2(ctx->m.avctx, dnxhd_calc_bits_thread, NULL, NULL, ctx->m.mb_height);
00749 for (y = 0; y < ctx->m.mb_height; y++) {
00750 for (x = 0; x < ctx->m.mb_width; x++)
00751 bits += ctx->mb_rc[qscale][y*ctx->m.mb_width+x].bits;
00752 bits = (bits+31)&~31;
00753 if (bits > ctx->frame_bits)
00754 break;
00755 }
00756
00757
00758 if (bits < ctx->frame_bits) {
00759 if (qscale == 1)
00760 return 1;
00761 if (last_higher == qscale - 1) {
00762 qscale = last_higher;
00763 break;
00764 }
00765 last_lower = FFMIN(qscale, last_lower);
00766 if (last_higher != 0)
00767 qscale = (qscale+last_higher)>>1;
00768 else
00769 qscale -= down_step++;
00770 if (qscale < 1)
00771 qscale = 1;
00772 up_step = 1;
00773 } else {
00774 if (last_lower == qscale + 1)
00775 break;
00776 last_higher = FFMAX(qscale, last_higher);
00777 if (last_lower != INT_MAX)
00778 qscale = (qscale+last_lower)>>1;
00779 else
00780 qscale += up_step++;
00781 down_step = 1;
00782 if (qscale >= ctx->m.avctx->qmax)
00783 return -1;
00784 }
00785 }
00786
00787 ctx->qscale = qscale;
00788 return 0;
00789 }
00790
00791 #define BUCKET_BITS 8
00792 #define RADIX_PASSES 4
00793 #define NBUCKETS (1 << BUCKET_BITS)
00794
00795 static inline int get_bucket(int value, int shift)
00796 {
00797 value >>= shift;
00798 value &= NBUCKETS - 1;
00799 return NBUCKETS - 1 - value;
00800 }
00801
00802 static void radix_count(const RCCMPEntry *data, int size, int buckets[RADIX_PASSES][NBUCKETS])
00803 {
00804 int i, j;
00805 memset(buckets, 0, sizeof(buckets[0][0]) * RADIX_PASSES * NBUCKETS);
00806 for (i = 0; i < size; i++) {
00807 int v = data[i].value;
00808 for (j = 0; j < RADIX_PASSES; j++) {
00809 buckets[j][get_bucket(v, 0)]++;
00810 v >>= BUCKET_BITS;
00811 }
00812 assert(!v);
00813 }
00814 for (j = 0; j < RADIX_PASSES; j++) {
00815 int offset = size;
00816 for (i = NBUCKETS - 1; i >= 0; i--)
00817 buckets[j][i] = offset -= buckets[j][i];
00818 assert(!buckets[j][0]);
00819 }
00820 }
00821
00822 static void radix_sort_pass(RCCMPEntry *dst, const RCCMPEntry *data, int size, int buckets[NBUCKETS], int pass)
00823 {
00824 int shift = pass * BUCKET_BITS;
00825 int i;
00826 for (i = 0; i < size; i++) {
00827 int v = get_bucket(data[i].value, shift);
00828 int pos = buckets[v]++;
00829 dst[pos] = data[i];
00830 }
00831 }
00832
00833 static void radix_sort(RCCMPEntry *data, int size)
00834 {
00835 int buckets[RADIX_PASSES][NBUCKETS];
00836 RCCMPEntry *tmp = av_malloc(sizeof(*tmp) * size);
00837 radix_count(data, size, buckets);
00838 radix_sort_pass(tmp, data, size, buckets[0], 0);
00839 radix_sort_pass(data, tmp, size, buckets[1], 1);
00840 if (buckets[2][NBUCKETS - 1] || buckets[3][NBUCKETS - 1]) {
00841 radix_sort_pass(tmp, data, size, buckets[2], 2);
00842 radix_sort_pass(data, tmp, size, buckets[3], 3);
00843 }
00844 av_free(tmp);
00845 }
00846
00847 static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
00848 {
00849 int max_bits = 0;
00850 int ret, x, y;
00851 if ((ret = dnxhd_find_qscale(ctx)) < 0)
00852 return -1;
00853 for (y = 0; y < ctx->m.mb_height; y++) {
00854 for (x = 0; x < ctx->m.mb_width; x++) {
00855 int mb = y*ctx->m.mb_width+x;
00856 int delta_bits;
00857 ctx->mb_qscale[mb] = ctx->qscale;
00858 ctx->mb_bits[mb] = ctx->mb_rc[ctx->qscale][mb].bits;
00859 max_bits += ctx->mb_rc[ctx->qscale][mb].bits;
00860 if (!RC_VARIANCE) {
00861 delta_bits = ctx->mb_rc[ctx->qscale][mb].bits-ctx->mb_rc[ctx->qscale+1][mb].bits;
00862 ctx->mb_cmp[mb].mb = mb;
00863 ctx->mb_cmp[mb].value = delta_bits ?
00864 ((ctx->mb_rc[ctx->qscale][mb].ssd-ctx->mb_rc[ctx->qscale+1][mb].ssd)*100)/delta_bits
00865 : INT_MIN;
00866 }
00867 }
00868 max_bits += 31;
00869 }
00870 if (!ret) {
00871 if (RC_VARIANCE)
00872 avctx->execute2(avctx, dnxhd_mb_var_thread, NULL, NULL, ctx->m.mb_height);
00873 radix_sort(ctx->mb_cmp, ctx->m.mb_num);
00874 for (x = 0; x < ctx->m.mb_num && max_bits > ctx->frame_bits; x++) {
00875 int mb = ctx->mb_cmp[x].mb;
00876 max_bits -= ctx->mb_rc[ctx->qscale][mb].bits - ctx->mb_rc[ctx->qscale+1][mb].bits;
00877 ctx->mb_qscale[mb] = ctx->qscale+1;
00878 ctx->mb_bits[mb] = ctx->mb_rc[ctx->qscale+1][mb].bits;
00879 }
00880 }
00881 return 0;
00882 }
00883
00884 static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
00885 {
00886 int i;
00887
00888 for (i = 0; i < 3; i++) {
00889 ctx->frame.data[i] = frame->data[i];
00890 ctx->frame.linesize[i] = frame->linesize[i];
00891 }
00892
00893 for (i = 0; i < ctx->m.avctx->thread_count; i++) {
00894 ctx->thread[i]->m.linesize = ctx->frame.linesize[0]<<ctx->interlaced;
00895 ctx->thread[i]->m.uvlinesize = ctx->frame.linesize[1]<<ctx->interlaced;
00896 ctx->thread[i]->dct_y_offset = ctx->m.linesize *8;
00897 ctx->thread[i]->dct_uv_offset = ctx->m.uvlinesize*8;
00898 }
00899
00900 ctx->frame.interlaced_frame = frame->interlaced_frame;
00901 ctx->cur_field = frame->interlaced_frame && !frame->top_field_first;
00902 }
00903
00904 static int dnxhd_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data)
00905 {
00906 DNXHDEncContext *ctx = avctx->priv_data;
00907 int first_field = 1;
00908 int offset, i, ret;
00909
00910 if (buf_size < ctx->cid_table->frame_size) {
00911 av_log(avctx, AV_LOG_ERROR, "output buffer is too small to compress picture\n");
00912 return -1;
00913 }
00914
00915 dnxhd_load_picture(ctx, data);
00916
00917 encode_coding_unit:
00918 for (i = 0; i < 3; i++) {
00919 ctx->src[i] = ctx->frame.data[i];
00920 if (ctx->interlaced && ctx->cur_field)
00921 ctx->src[i] += ctx->frame.linesize[i];
00922 }
00923
00924 dnxhd_write_header(avctx, buf);
00925
00926 if (avctx->mb_decision == FF_MB_DECISION_RD)
00927 ret = dnxhd_encode_rdo(avctx, ctx);
00928 else
00929 ret = dnxhd_encode_fast(avctx, ctx);
00930 if (ret < 0) {
00931 av_log(avctx, AV_LOG_ERROR,
00932 "picture could not fit ratecontrol constraints, increase qmax\n");
00933 return -1;
00934 }
00935
00936 dnxhd_setup_threads_slices(ctx);
00937
00938 offset = 0;
00939 for (i = 0; i < ctx->m.mb_height; i++) {
00940 AV_WB32(ctx->msip + i * 4, offset);
00941 offset += ctx->slice_size[i];
00942 assert(!(ctx->slice_size[i] & 3));
00943 }
00944
00945 avctx->execute2(avctx, dnxhd_encode_thread, buf, NULL, ctx->m.mb_height);
00946
00947 assert(640 + offset + 4 <= ctx->cid_table->coding_unit_size);
00948 memset(buf + 640 + offset, 0, ctx->cid_table->coding_unit_size - 4 - offset - 640);
00949
00950 AV_WB32(buf + ctx->cid_table->coding_unit_size - 4, 0x600DC0DE);
00951
00952 if (ctx->interlaced && first_field) {
00953 first_field = 0;
00954 ctx->cur_field ^= 1;
00955 buf += ctx->cid_table->coding_unit_size;
00956 buf_size -= ctx->cid_table->coding_unit_size;
00957 goto encode_coding_unit;
00958 }
00959
00960 ctx->frame.quality = ctx->qscale*FF_QP2LAMBDA;
00961
00962 return ctx->cid_table->frame_size;
00963 }
00964
00965 static int dnxhd_encode_end(AVCodecContext *avctx)
00966 {
00967 DNXHDEncContext *ctx = avctx->priv_data;
00968 int max_level = 1<<(ctx->cid_table->bit_depth+2);
00969 int i;
00970
00971 av_free(ctx->vlc_codes-max_level*2);
00972 av_free(ctx->vlc_bits -max_level*2);
00973 av_freep(&ctx->run_codes);
00974 av_freep(&ctx->run_bits);
00975
00976 av_freep(&ctx->mb_bits);
00977 av_freep(&ctx->mb_qscale);
00978 av_freep(&ctx->mb_rc);
00979 av_freep(&ctx->mb_cmp);
00980 av_freep(&ctx->slice_size);
00981 av_freep(&ctx->slice_offs);
00982
00983 av_freep(&ctx->qmatrix_c);
00984 av_freep(&ctx->qmatrix_l);
00985 av_freep(&ctx->qmatrix_c16);
00986 av_freep(&ctx->qmatrix_l16);
00987
00988 for (i = 1; i < avctx->thread_count; i++)
00989 av_freep(&ctx->thread[i]);
00990
00991 return 0;
00992 }
00993
00994 AVCodec ff_dnxhd_encoder = {
00995 .name = "dnxhd",
00996 .type = AVMEDIA_TYPE_VIDEO,
00997 .id = CODEC_ID_DNXHD,
00998 .priv_data_size = sizeof(DNXHDEncContext),
00999 .init = dnxhd_encode_init,
01000 .encode = dnxhd_encode_picture,
01001 .close = dnxhd_encode_end,
01002 .capabilities = CODEC_CAP_SLICE_THREADS,
01003 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_YUV422P10, PIX_FMT_NONE},
01004 .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"),
01005 .priv_class = &class,
01006 };