Go to the documentation of this file.
   65     if (!
s->context_initialized) {
 
   68         memcpy(
s, 
s1, 
sizeof(*
s));
 
   72         s->bitstream_buffer      = 
NULL;
 
   73         s->bitstream_buffer_size = 
s->allocated_bitstream_buffer_size = 0;
 
   75         if (
s1->context_initialized) {
 
   80                 memset(
s, 0, 
sizeof(*
s));
 
   88     if (
s->height != 
s1->height || 
s->width != 
s1->width || 
s->context_reinit) {
 
   89         s->height = 
s1->height;
 
   95     s->avctx->coded_height  = 
s1->avctx->coded_height;
 
   96     s->avctx->coded_width   = 
s1->avctx->coded_width;
 
   97     s->avctx->width         = 
s1->avctx->width;
 
   98     s->avctx->height        = 
s1->avctx->height;
 
  100     s->quarter_sample       = 
s1->quarter_sample;
 
  102     s->coded_picture_number = 
s1->coded_picture_number;
 
  103     s->picture_number       = 
s1->picture_number;
 
  109             if (
s1->picture && 
s1->picture[
i].f->buf[0] &&
 
  114 #define UPDATE_PICTURE(pic)\ 
  116     ff_mpeg_unref_picture(s->avctx, &s->pic);\ 
  117     if (s1->pic.f && s1->pic.f->buf[0])\ 
  118         ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\ 
  120         ret = ff_update_picture_tables(&s->pic, &s1->pic);\ 
  129 #define REBASE_PICTURE(pic, new_ctx, old_ctx)                                 \ 
  130     ((pic && pic >= old_ctx->picture &&                                       \ 
  131       pic < old_ctx->picture + MAX_PICTURE_COUNT) ?                           \ 
  132         &new_ctx->picture[pic - old_ctx->picture] : NULL) 
  139     s->workaround_bugs      = 
s1->workaround_bugs;
 
  140     s->padding_bug_score    = 
s1->padding_bug_score;
 
  143     memcpy(&
s->last_time_base, &
s1->last_time_base,
 
  144            (
char *) &
s1->pb_field_time + 
sizeof(
s1->pb_field_time) -
 
  145            (
char *) &
s1->last_time_base);
 
  148     s->max_b_frames = 
s1->max_b_frames;
 
  149     s->low_delay    = 
s1->low_delay;
 
  150     s->droppable    = 
s1->droppable;
 
  153     s->divx_packed  = 
s1->divx_packed;
 
  155     if (
s1->bitstream_buffer) {
 
  156         if (
s1->bitstream_buffer_size +
 
  159                            &
s->allocated_bitstream_buffer_size,
 
  160                            s1->allocated_bitstream_buffer_size);
 
  161             if (!
s->bitstream_buffer) {
 
  162                 s->bitstream_buffer_size = 0;
 
  166         s->bitstream_buffer_size = 
s1->bitstream_buffer_size;
 
  167         memcpy(
s->bitstream_buffer, 
s1->bitstream_buffer,
 
  168                s1->bitstream_buffer_size);
 
  169         memset(
s->bitstream_buffer + 
s->bitstream_buffer_size, 0,
 
  174     if (!
s->sc.edge_emu_buffer)
 
  177                                         &
s->sc, 
s1->linesize) < 0) {
 
  179                        "scratch buffers.\n");
 
  184                    "be allocated due to unknown size.\n");
 
  188     memcpy(&
s->progressive_sequence, &
s1->progressive_sequence,
 
  189            (
char *) &
s1->rtp_mode - (
char *) &
s1->progressive_sequence);
 
  198     if (!
s->context_initialized)
 
  205             s->picture[
i].needs_realloc = 1;
 
  207     s->last_picture_ptr         =
 
  208     s->next_picture_ptr         =
 
  209     s->current_picture_ptr      = 
NULL;
 
  213         s->mb_height = (
s->height + 31) / 32 * 2;
 
  215         s->mb_height = (
s->height + 15) / 16;
 
  217     if ((
s->width || 
s->height) &&
 
  231     memset(
s->thread_context, 0, 
sizeof(
s->thread_context));
 
  232     s->thread_context[0]   = 
s;
 
  234     if (
s->width && 
s->height) {
 
  239     s->context_reinit = 0;
 
  244     s->context_reinit = 1;
 
  251                             s->chroma_x_shift, 
s->chroma_y_shift, 
s->out_format,
 
  252                             s->mb_stride, 
s->mb_width, 
s->mb_height, 
s->b8_stride,
 
  253                             &
s->linesize, &
s->uvlinesize);
 
  258     int h_chroma_shift, v_chroma_shift;
 
  262     for (
int i = 0; 
i < 
frame->height; 
i++)
 
  290         s->last_picture_ptr != 
s->next_picture_ptr &&
 
  291         s->last_picture_ptr->f->buf[0]) {
 
  298         if (&
s->picture[
i] != 
s->last_picture_ptr &&
 
  299             &
s->picture[
i] != 
s->next_picture_ptr &&
 
  300             s->picture[
i].reference && !
s->picture[
i].needs_realloc) {
 
  311         if (!
s->picture[
i].reference)
 
  315     if (
s->current_picture_ptr && !
s->current_picture_ptr->f->buf[0]) {
 
  318         pic = 
s->current_picture_ptr;
 
  325         pic = &
s->picture[idx];
 
  339     s->current_picture_ptr = pic;
 
  341     s->current_picture_ptr->f->top_field_first = 
s->top_field_first;
 
  345             s->current_picture_ptr->f->top_field_first =
 
  348     s->current_picture_ptr->f->interlaced_frame = !
s->progressive_frame &&
 
  349                                                  !
s->progressive_sequence;
 
  350     s->current_picture_ptr->field_picture      =  
s->picture_structure != 
PICT_FRAME;
 
  352     s->current_picture_ptr->f->pict_type = 
s->pict_type;
 
  356                                    s->current_picture_ptr)) < 0)
 
  360         s->last_picture_ptr = 
s->next_picture_ptr;
 
  362             s->next_picture_ptr = 
s->current_picture_ptr;
 
  364     ff_dlog(
s->avctx, 
"L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
 
  365             s->last_picture_ptr, 
s->next_picture_ptr,
s->current_picture_ptr,
 
  366             s->last_picture_ptr    ? 
s->last_picture_ptr->f->data[0]    : 
NULL,
 
  367             s->next_picture_ptr    ? 
s->next_picture_ptr->f->data[0]    : 
NULL,
 
  368             s->current_picture_ptr ? 
s->current_picture_ptr->f->data[0] : 
NULL,
 
  369             s->pict_type, 
s->droppable);
 
  371     if ((!
s->last_picture_ptr || !
s->last_picture_ptr->f->buf[0]) &&
 
  373         int h_chroma_shift, v_chroma_shift;
 
  375                                          &h_chroma_shift, &v_chroma_shift);
 
  378                    "allocating dummy last picture for B frame\n");
 
  381                    "warning: first frame is no keyframe\n");
 
  389         s->last_picture_ptr = &
s->picture[idx];
 
  391         s->last_picture_ptr->reference    = 3;
 
  392         s->last_picture_ptr->f->key_frame = 0;
 
  396             s->last_picture_ptr = 
NULL;
 
  402                 memset(
s->last_picture_ptr->f->data[0] + 
s->last_picture_ptr->f->linesize[0]*
i,
 
  404             if (
s->last_picture_ptr->f->data[2]) {
 
  406                     memset(
s->last_picture_ptr->f->data[1] + 
s->last_picture_ptr->f->linesize[1]*
i,
 
  408                     memset(
s->last_picture_ptr->f->data[2] + 
s->last_picture_ptr->f->linesize[2]*
i,
 
  415                     memset(
s->last_picture_ptr->f->data[0] + 
s->last_picture_ptr->f->linesize[0] * 
i,
 
  423     if ((!
s->next_picture_ptr || !
s->next_picture_ptr->f->buf[0]) &&
 
  431         s->next_picture_ptr = &
s->picture[idx];
 
  433         s->next_picture_ptr->reference   = 3;
 
  434         s->next_picture_ptr->f->key_frame = 0;
 
  438             s->next_picture_ptr = 
NULL;
 
  445 #if 0 // BUFREF-FIXME 
  446     memset(
s->last_picture.f->data, 0, 
sizeof(
s->last_picture.f->data));
 
  447     memset(
s->next_picture.f->data, 0, 
sizeof(
s->next_picture.f->data));
 
  449     if (
s->last_picture_ptr) {
 
  450         if (
s->last_picture_ptr->f->buf[0] &&
 
  452                                        s->last_picture_ptr)) < 0)
 
  455     if (
s->next_picture_ptr) {
 
  456         if (
s->next_picture_ptr->f->buf[0] &&
 
  458                                        s->next_picture_ptr)) < 0)
 
  463                                                  s->last_picture_ptr->f->buf[0]));
 
  466         for (
int i = 0; 
i < 4; 
i++) {
 
  468                 s->current_picture.f->data[
i] +=
 
  469                     s->current_picture.f->linesize[
i];
 
  471             s->current_picture.f->linesize[
i] *= 2;
 
  472             s->last_picture.f->linesize[
i]    *= 2;
 
  473             s->next_picture.f->linesize[
i]    *= 2;
 
  481         s->dct_unquantize_intra = 
s->dct_unquantize_mpeg2_intra;
 
  482         s->dct_unquantize_inter = 
s->dct_unquantize_mpeg2_inter;
 
  484         s->dct_unquantize_intra = 
s->dct_unquantize_h263_intra;
 
  485         s->dct_unquantize_inter = 
s->dct_unquantize_h263_inter;
 
  487         s->dct_unquantize_intra = 
s->dct_unquantize_mpeg1_intra;
 
  488         s->dct_unquantize_inter = 
s->dct_unquantize_mpeg1_inter;
 
  502     if (
s->current_picture.reference)
 
  510                          s->mb_width, 
s->mb_height, 
s->mb_stride, 
s->quarter_sample);
 
  546                        s->last_picture_ptr ? 
s->last_picture_ptr->f : 
NULL,
 
  547                        y, 
h, 
s->picture_structure,
 
  548                        s->first_field, 
s->low_delay);
 
  560     s->current_picture_ptr = 
s->last_picture_ptr = 
s->next_picture_ptr = 
NULL;
 
  566     s->mb_x = 
s->mb_y = 0;
 
  568 #if FF_API_FLAG_TRUNCATED 
  569     s->parse_context.state = -1;
 
  570     s->parse_context.frame_start_found = 0;
 
  571     s->parse_context.overread = 0;
 
  572     s->parse_context.overread_index = 0;
 
  573     s->parse_context.index = 0;
 
  574     s->parse_context.last_index = 0;
 
  576     s->bitstream_buffer_size = 0;
 
  
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
 
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
 
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
 
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
 
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
 
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
 
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
 
int ff_thread_can_start_frame(AVCodecContext *avctx)
 
int coded_picture_number
picture number in bitstream order
 
int alloc_mb_width
mb_width used to allocate tables
 
Picture current_picture
copy of the current picture structure.
 
This structure describes decoded (raw) audio or video data.
 
void ff_mpv_report_decode_progress(MpegEncContext *s)
 
enum AVPictureType last_picture
 
Picture next_picture
copy of the next picture structure.
 
unsigned int ff_toupper4(unsigned int x)
 
#define PICT_BOTTOM_FIELD
 
struct AVCodecContext * avctx
 
@ AV_VIDEO_ENC_PARAMS_MPEG2
 
static void gray_frame(AVFrame *frame)
 
const struct AVCodec * codec
 
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
 
Video encoding parameters for a given frame.
 
#define MAX_PICTURE_COUNT
 
static int16_t mult(Float11 *f1, Float11 *f2)
 
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
 
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
 
int ff_mpv_common_frame_size_change(MpegEncContext *s)
 
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture; frees the picture tables in case they need to be reallocated anyway.
 
#define AV_CEIL_RSHIFT(a, b)
 
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
 
#define av_assert0(cond)
assert() equivalent, that is always enabled.
 
#define UPDATE_PICTURE(pic)
 
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
 
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
 
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
 
av_cold void ff_mpv_idct_init(MpegEncContext *s)
 
@ AV_PICTURE_TYPE_I
Intra.
 
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
 
#define FF_MPV_QSCALE_TYPE_MPEG1
 
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
 
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
 
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
 
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
 
int alloc_mb_height
mb_height used to allocate tables
 
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
 
void ff_mpeg_flush(AVCodecContext *avctx)
 
int16_t(*[2] motion_val)[2]
 
#define i(width, name, range_min, range_max)
 
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
 
Data structure for storing block-level encoding information.
 
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
 
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
 
static int alloc_picture(MpegEncContext *s, Picture *pic)
 
#define AV_INPUT_BUFFER_PADDING_SIZE
 
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
 
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
 
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
 
main external API structure.
 
uint32_t * mb_type
types and macros are defined in mpegutils.h
 
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
 
void ff_mpv_frame_end(MpegEncContext *s)
 
int coded_width
Bitstream width / height, may be different from width/height e.g.
 
@ AV_PICTURE_TYPE_P
Predicted.
 
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
 
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
 
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
 
int width
picture width / height.
 
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
 
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
 
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
 
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
 
int alloc_mb_stride
mb_stride used to allocate tables