Go to the documentation of this file.
38 #define UNI_ENC_INDEX(last,run,level) ((last)*128*64 + (run)*128 + (level))
65 temp_ref =
s->picture_number * 30000LL *
s->avctx->time_base.num /
66 (1001LL *
s->avctx->time_base.den);
104 s->last_mv[0][0][0] = 0;
105 s->last_mv[0][0][1] = 0;
110 int index =
s->mb_x +
s->mb_y *
s->mb_width;
112 if (
index % 11 == 0) {
115 s->last_mv[0][0][0] = 0;
116 s->last_mv[0][0][1] = 0;
126 s->mb_x += 11 * (
index % 2);
158 for (
i = 0;
i < 6;
i++)
159 if (
s->block_last_index[
i] >= 0)
172 int level,
run,
i, j, last_index, last_non_zero, sign, slevel,
code;
185 else if (
level < 1) {
194 }
else if ((
block[0] == 1 ||
block[0] == -1) &&
195 (
s->block_last_index[
n] > -1)) {
204 last_index =
s->block_last_index[
n];
205 last_non_zero =
i - 1;
206 for (;
i <= last_index;
i++) {
207 j =
s->intra_scantable.permutated[
i];
210 run =
i - last_non_zero - 1;
238 int motion_x,
int motion_y)
241 int mvd, mv_diff_x, mv_diff_y,
i, cbp;
252 mvd = motion_x | motion_y;
254 if ((cbp | mvd) == 0) {
258 s->last_mv[0][0][0] = 0;
259 s->last_mv[0][0][1] = 0;
260 s->qscale -=
s->dquant;
275 if (mvd ||
s->loop_filter)
284 if (
s->dquant && cbp) {
287 s->qscale -=
s->dquant;
301 mv_diff_x = (motion_x >> 1) -
s->last_mv[0][0][0];
302 mv_diff_y = (motion_y >> 1) -
s->last_mv[0][0][1];
303 s->last_mv[0][0][0] = (motion_x >> 1);
304 s->last_mv[0][0][1] = (motion_y >> 1);
315 for (
i = 0;
i < 6;
i++)
320 s->last_mv[0][0][0] = 0;
321 s->last_mv[0][0][1] = 0;
328 int slevel,
run, last;
333 for(slevel=-64; slevel<64; slevel++){
334 if(slevel==0)
continue;
336 for(last=0; last<=1; last++){
338 int level= slevel < 0 ? -slevel : slevel;
369 s->min_qcoeff = -127;
371 s->y_dc_scale_table =
373 s->ac_esc_length = 6+6+8;
378 s->intra_ac_vlc_last_length =
s->inter_ac_vlc_last_length =
uni_h261_rl_len + 128*64;
static av_cold void init_uni_h261_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_tab)
AVPixelFormat
Pixel format.
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
const uint8_t ff_mpeg1_dc_scale_table[128]
static void put_sbits(PutBitContext *pb, int n, int32_t value)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
void ff_init_block_index(MpegEncContext *s)
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
const int ff_h261_mtype_map[10]
int n
number of entries of table_vlc minus 1
static const AVClass h261_class
int ff_mpv_encode_init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
static int get_rl_index(const RLTable *rl, int last, int run, int level)
static void h261_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
const uint8_t ff_h261_mba_code[35]
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
static enum AVPixelFormat pix_fmts[]
const uint8_t ff_h261_mv_tab[17][2]
static void h261_encode_block(H261Context *h, int16_t *block, int n)
Encode an 8x8 block.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
const uint16_t(* table_vlc)[2]
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
av_cold void ff_h261_common_init(void)
const char const char void * val
const AVOption ff_mpv_generic_options[]
static void ff_update_block_index(MpegEncContext *s)
const uint8_t ff_h261_cbp_tab[63][2]
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static void h261_encode_motion(H261Context *h, int val)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int ff_mpv_encode_end(AVCodecContext *avctx)
void ff_h261_reorder_mb_index(MpegEncContext *s)
const uint8_t ff_h261_mtype_code[10]
int ff_h261_get_picture_format(int width, int height)
const char * name
Name of the codec implementation.
static int get_cbp(MpegEncContext *s, int16_t block[6][64])
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
RLTable ff_h261_rl_tcoeff
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
const uint8_t ff_h261_mtype_bits[10]
av_cold void ff_h261_encode_init(MpegEncContext *s)
The exact code depends on how similar the blocks are and how related they are to the block
static uint8_t uni_h261_rl_len[64 *64 *2 *2]
const uint8_t ff_h261_mba_bits[35]
#define UNI_ENC_INDEX(last, run, level)