Go to the documentation of this file.
64 s->frame_width,
s->frame_height);
77 #define QUALITY_THRESHOLD 100
78 #define THRESHOLD_MULTIPLIER 0.6
86 score += (pix1[
i] - pix2[
i]) * (pix1[
i] - pix2[
i]);
92 int threshold,
int lambda,
int intra)
94 int count, y, x,
i, j,
split, best_mean, best_score, best_count;
97 int w = 2 << (
level + 2 >> 1);
98 int h = 2 << (
level + 1 >> 1);
100 int16_t (*
block)[256] =
s->encoded_block_levels[
level];
101 const int8_t *codebook_sum, *
codebook;
102 const uint16_t(*mean_vlc)[2];
103 const uint8_t(*multistage_vlc)[2];
114 for (y = 0; y <
h; y++) {
115 for (x = 0; x <
w; x++) {
128 for (y = 0; y <
h; y++) {
129 for (x = 0; x <
w; x++) {
143 for (count = 1; count < 7; count++) {
144 int best_vector_score = INT_MAX;
145 int best_vector_sum = -999, best_vector_mean = -999;
146 const int stage = count - 1;
147 const int8_t *vector;
149 for (
i = 0;
i < 16;
i++) {
150 int sum = codebook_sum[stage * 16 +
i];
151 int sqr,
diff, score;
154 sqr =
s->ssd_int8_vs_int16(vector,
block[stage],
size);
157 if (score < best_vector_score) {
161 best_vector_score = score;
162 best_vector[stage] =
i;
163 best_vector_sum = sum;
164 best_vector_mean =
mean;
169 for (j = 0; j <
size; j++)
170 block[stage + 1][j] =
block[stage][j] - vector[j];
172 best_vector_score += lambda *
174 multistage_vlc[1 + count][1]
175 + mean_vlc[best_vector_mean][1]);
177 if (best_vector_score < best_score) {
178 best_score = best_vector_score;
180 best_mean = best_vector_mean;
186 if (best_score > threshold &&
level) {
192 backup[
i] =
s->reorder_pb[
i];
194 threshold >> 1, lambda, intra);
199 if (score < best_score) {
204 s->reorder_pb[
i] = backup[
i];
211 av_assert1(best_mean >= 0 && best_mean < 256 || !intra);
212 av_assert1(best_mean >= -256 && best_mean < 256);
213 av_assert1(best_count >= 0 && best_count < 7);
218 multistage_vlc[1 + best_count][1],
219 multistage_vlc[1 + best_count][0]);
221 mean_vlc[best_mean][0]);
223 for (
i = 0;
i < best_count;
i++) {
228 for (y = 0; y <
h; y++)
229 for (x = 0; x <
w; x++)
231 block[best_count][x +
w * y] +
239 s->block_index[0]=
s->b8_stride*(
s->mb_y*2 ) +
s->mb_x*2;
240 s->block_index[1]=
s->b8_stride*(
s->mb_y*2 ) + 1 +
s->mb_x*2;
241 s->block_index[2]=
s->b8_stride*(
s->mb_y*2 + 1) +
s->mb_x*2;
242 s->block_index[3]=
s->b8_stride*(
s->mb_y*2 + 1) + 1 +
s->mb_x*2;
243 s->block_index[4]=
s->mb_stride*(
s->mb_y + 1) +
s->b8_stride*
s->mb_height*2 +
s->mb_x;
244 s->block_index[5]=
s->mb_stride*(
s->mb_y +
s->mb_height + 2) +
s->b8_stride*
s->mb_height*2 +
s->mb_x;
248 unsigned char *src_plane,
249 unsigned char *ref_plane,
250 unsigned char *decoded_plane,
255 int block_width, block_height;
259 const int lambda = (
s->quality *
s->quality) >>
267 block_width = (
width + 15) / 16;
268 block_height = (
height + 15) / 16;
271 s->m.avctx =
s->avctx;
272 s->m.current_picture_ptr = &
s->m.current_picture;
273 s->m.last_picture_ptr = &
s->m.last_picture;
274 s->m.last_picture.f->data[0] = ref_plane;
276 s->m.last_picture.f->linesize[0] =
277 s->m.new_picture.f->linesize[0] =
278 s->m.current_picture.f->linesize[0] =
stride;
281 s->m.mb_width = block_width;
282 s->m.mb_height = block_height;
283 s->m.mb_stride =
s->m.mb_width + 1;
284 s->m.b8_stride = 2 *
s->m.mb_width + 1;
286 s->m.pict_type =
s->pict_type;
287 s->m.motion_est =
s->motion_est;
288 s->m.me.scene_change_score = 0;
291 s->m.lambda =
s->quality;
292 s->m.qscale =
s->m.lambda * 139 +
295 s->m.lambda2 =
s->m.lambda *
s->m.lambda +
299 if (!
s->motion_val8[plane]) {
301 block_height * 2 + 2) *
302 2 *
sizeof(int16_t));
304 (block_height + 2) + 1) *
305 2 *
sizeof(int16_t));
306 if (!
s->motion_val8[plane] || !
s->motion_val16[plane])
310 s->m.mb_type =
s->mb_type;
313 s->m.current_picture.mb_mean = (
uint8_t *)
s->dummy;
314 s->m.current_picture.mb_var = (uint16_t *)
s->dummy;
315 s->m.current_picture.mc_mb_var = (uint16_t *)
s->dummy;
316 s->m.current_picture.mb_type =
s->dummy;
318 s->m.current_picture.motion_val[0] =
s->motion_val8[plane] + 2;
319 s->m.p_mv_table =
s->motion_val16[plane] +
324 s->m.me.dia_size =
s->avctx->dia_size;
325 s->m.first_slice_line = 1;
326 for (y = 0; y < block_height; y++) {
327 s->m.new_picture.f->data[0] =
src - y * 16 *
stride;
330 for (
i = 0;
i < 16 &&
i + 16 * y <
height;
i++) {
331 memcpy(&
src[
i *
stride], &src_plane[(
i + 16 * y) * src_stride],
333 for (x =
width; x < 16 * block_width; x++)
336 for (;
i < 16 &&
i + 16 * y < 16 * block_height;
i++)
340 for (x = 0; x < block_width; x++) {
346 s->m.first_slice_line = 0;
354 s->m.first_slice_line = 1;
355 for (y = 0; y < block_height; y++) {
356 for (
i = 0;
i < 16 &&
i + 16 * y <
height;
i++) {
357 memcpy(&
src[
i *
stride], &src_plane[(
i + 16 * y) * src_stride],
359 for (x =
width; x < 16 * block_width; x++)
362 for (;
i < 16 &&
i + 16 * y < 16 * block_height;
i++)
366 for (x = 0; x < block_width; x++) {
367 uint8_t reorder_buffer[2][6][7 * 32];
372 int score[4] = { 0, 0, 0, 0 }, best;
375 if (
s->pb.buf_end -
s->pb.buf -
385 (
s->m.mb_type[x + y *
s->m.mb_stride] &
387 for (
i = 0;
i < 6;
i++)
392 put_bits(&
s->reorder_pb[5], vlc[1], vlc[0]);
393 score[0] = vlc[1] * lambda;
397 for (
i = 0;
i < 6;
i++) {
408 int mx, my, pred_x, pred_y, dxy;
412 if (
s->m.mb_type[x + y *
s->m.mb_stride] &
414 for (
i = 0;
i < 6;
i++)
418 put_bits(&
s->reorder_pb[5], vlc[1], vlc[0]);
420 s->m.pb =
s->reorder_pb[5];
429 s->reorder_pb[5] =
s->m.pb;
432 dxy = (mx & 1) + 2 * (my & 1);
440 decoded,
stride, 5, 64, lambda, 0);
441 best = score[1] <= score[0];
446 score[2] += vlc[1] * lambda;
447 if (score[2] < score[best] && mx == 0 && my == 0) {
449 s->hdsp.put_pixels_tab[0][0](decoded,
ref,
stride, 16);
455 for (
i = 0;
i < 6;
i++) {
464 motion_ptr[0 + 2 *
s->m.b8_stride] =
465 motion_ptr[1 + 2 *
s->m.b8_stride] =
466 motion_ptr[2 + 2 *
s->m.b8_stride] =
467 motion_ptr[3 + 2 *
s->m.b8_stride] = 0;
471 s->rd_total += score[best];
474 for (
i = 5;
i >= 0;
i--)
478 s->hdsp.put_pixels_tab[0][0](decoded,
temp,
stride, 16);
480 s->m.first_slice_line = 0;
505 for (
i = 0;
i < 3;
i++) {
521 if (avctx->
width >= 4096 || avctx->
height >= 4096) {
532 if (!
s->current_picture || !
s->last_picture) {
536 s->frame_width = avctx->
width;
537 s->frame_height = avctx->
height;
539 s->y_block_width = (
s->frame_width + 15) / 16;
540 s->y_block_height = (
s->frame_height + 15) / 16;
542 s->c_block_width = (
s->frame_width / 4 + 15) / 16;
543 s->c_block_height = (
s->frame_height / 4 + 15) / 16;
559 s->y_block_height *
sizeof(int16_t));
561 s->y_block_height *
sizeof(
int32_t));
564 if (!
s->m.me.temp || !
s->m.me.scratchpad || !
s->m.me.map ||
565 !
s->m.me.score_map || !
s->mb_type || !
s->dummy) {
580 const AVFrame *pict,
int *got_packet)
594 if (!
s->current_picture->data[0]) {
599 if (!
s->last_picture->data[0]) {
604 if (!
s->scratchbuf) {
620 #if FF_API_CODED_FRAME
630 for (
i = 0;
i < 3;
i++) {
633 s->last_picture->data[
i],
634 s->current_picture->data[
i],
635 s->frame_width / (
i ? 4 : 1),
636 s->frame_height / (
i ? 4 : 1),
638 s->current_picture->linesize[
i]);
642 for (j = 0; j <
i; j++) {
665 #define OFFSET(x) offsetof(struct SVQ1EncContext, x)
666 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
#define FF_ENABLE_DEPRECATION_WARNINGS
AVPixelFormat
Pixel format.
#define THRESHOLD_MULTIPLIER
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int svq1_encode_plane(SVQ1EncContext *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane, int width, int height, int src_stride, int stride)
#define FFSWAP(type, a, b)
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
static int block_sum(const uint8_t *block, int w, int h, int linesize)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
const int8_t *const ff_svq1_intra_codebooks[6]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define CANDIDATE_MB_TYPE_INTER
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static const int8_t svq1_inter_codebook_sum[4][16 *6]
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t ff_svq1_intra_multistage_vlc[6][8][2]
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
const uint16_t ff_svq1_inter_mean_vlc[512][2]
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
int key_frame
1 -> keyframe, 0-> not
int flags
AV_CODEC_FLAG_*.
static void init_block_index(MpegEncContext *s)
static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2, intptr_t size)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static av_cold int svq1_encode_init(AVCodecContext *avctx)
void ff_mpv_common_end(MpegEncContext *s)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
void ff_h263_encode_init(MpegEncContext *s)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmts[]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_INPUT_BUFFER_MIN_SIZE
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
static int encode_block(SVQ1EncContext *s, uint8_t *src, uint8_t *ref, uint8_t *decoded, int stride, int level, int threshold, int lambda, int intra)
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
static av_cold int svq1_encode_end(AVCodecContext *avctx)
const int8_t *const ff_svq1_inter_codebooks[6]
enum AVPictureType pict_type
Picture type of the frame.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
static char * split(char *message, char delim)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int flags
A combination of AV_PKT_FLAG values.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static void svq1_write_header(SVQ1EncContext *s, int frame_type)
static int put_bits_count(PutBitContext *s)
void ff_svq1enc_init_x86(SVQ1EncContext *c)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
#define av_malloc_array(a, b)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
int ff_init_me(MpegEncContext *s)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static const int8_t svq1_intra_codebook_sum[4][16 *6]
const uint8_t ff_svq1_inter_multistage_vlc[6][8][2]
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static const AVOption options[]
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
main external API structure.
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
const uint8_t ff_svq1_block_type_vlc[4][2]
const uint16_t ff_svq1_intra_mean_vlc[256][2]
#define CANDIDATE_MB_TYPE_INTRA
static int ref[MAX_W *MAX_W]
static float mean(const float *input, int size)
#define FF_DISABLE_DEPRECATION_WARNINGS
@ AV_PICTURE_TYPE_P
Predicted.
int frame_number
Frame counter, set by libavcodec.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static av_always_inline int diff(const uint32_t a, const uint32_t b)
This structure stores compressed data.
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
The exact code depends on how similar the blocks are and how related they are to the block
static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
av_cold void ff_svq1enc_init_ppc(SVQ1EncContext *c)
static const AVClass svq1enc_class
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
const uint16_t ff_svq1_frame_size_table[7][2]
#define QUALITY_THRESHOLD
static const unsigned codebook[256][2]
void ff_h263_encode_motion(PutBitContext *pb, int val, int f_code)