FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpegvideo_dec.c
Go to the documentation of this file.
1 /*
2  * Common mpeg video decoding code
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <limits.h>
24 
25 #include "config_components.h"
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/emms.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
32 
33 #include "avcodec.h"
34 #include "decode.h"
35 #include "h263.h"
36 #include "h264chroma.h"
37 #include "internal.h"
38 #include "mpegutils.h"
39 #include "mpegvideo.h"
40 #include "mpegvideodec.h"
41 #include "mpeg4videodec.h"
42 #include "libavutil/refstruct.h"
43 #include "thread.h"
44 #include "threadprogress.h"
45 #include "wmv2dec.h"
46 
48 {
49  enum ThreadingStatus thread_status;
50 
52 
53  s->avctx = avctx;
54  s->width = avctx->coded_width;
55  s->height = avctx->coded_height;
56  s->codec_id = avctx->codec->id;
57  s->workaround_bugs = avctx->workaround_bugs;
58 
59  /* convert fourcc to upper case */
60  s->codec_tag = ff_toupper4(avctx->codec_tag);
61 
63 
64  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
65 
66  if (s->picture_pool) // VC-1 can call this multiple times
67  return 0;
68 
69  thread_status = ff_thread_sync_ref(avctx, offsetof(MpegEncContext, picture_pool));
70  if (thread_status != FF_THREAD_IS_COPY) {
71  s->picture_pool = ff_mpv_alloc_pic_pool(thread_status != FF_THREAD_NO_FRAME_THREADING);
72  if (!s->picture_pool)
73  return AVERROR(ENOMEM);
74  }
75  return 0;
76 }
77 
79  const AVCodecContext *src)
80 {
81  MpegEncContext *const s1 = src->priv_data;
82  MpegEncContext *const s = dst->priv_data;
83  int ret;
84 
85  if (dst == src)
86  return 0;
87 
88  av_assert0(s != s1);
89 
90  // FIXME can parameters change on I-frames?
91  // in that case dst may need a reinit
92  if (!s->context_initialized) {
93  void *private_ctx = s->private_ctx;
94  int err;
95  memcpy(s, s1, sizeof(*s));
96 
97  s->context_initialized = 0;
98  s->context_reinit = 0;
99  s->avctx = dst;
100  s->private_ctx = private_ctx;
101 
102  if (s1->context_initialized) {
103  if ((err = ff_mpv_common_init(s)) < 0)
104  return err;
105  }
106  }
107 
108  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
109  s->height = s1->height;
110  s->width = s1->width;
112  return ret;
113  }
114 
115  s->quarter_sample = s1->quarter_sample;
116 
117  s->picture_number = s1->picture_number;
118 
119  ff_mpv_replace_picture(&s->cur_pic, &s1->cur_pic);
120  ff_mpv_replace_picture(&s->last_pic, &s1->last_pic);
121  ff_mpv_replace_picture(&s->next_pic, &s1->next_pic);
122 
123  s->linesize = s1->linesize;
124  s->uvlinesize = s1->uvlinesize;
125 
126  // Error/bug resilience
127  s->workaround_bugs = s1->workaround_bugs;
128  s->padding_bug_score = s1->padding_bug_score;
129 
130  // MPEG-4 timing info
131  memcpy(&s->last_time_base, &s1->last_time_base,
132  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
133  (char *) &s1->last_time_base);
134 
135  // B-frame info
136  s->low_delay = s1->low_delay;
137 
138  // MPEG-2/interlacing info
139  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
140  (char *) &s1->first_field + sizeof(s1->first_field) - (char *) &s1->progressive_sequence);
141 
142  return 0;
143 }
144 
146 {
148 
149  av_refstruct_pool_uninit(&s->picture_pool);
151  return 0;
152 }
153 
155 {
156  int err = 0;
157 
158  if (!s->context_initialized)
159  return AVERROR(EINVAL);
160 
162 
163  ff_mpv_unref_picture(&s->last_pic);
164  ff_mpv_unref_picture(&s->next_pic);
165  ff_mpv_unref_picture(&s->cur_pic);
166 
167  if ((s->width || s->height) &&
168  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
169  goto fail;
170 
171  /* set chroma shifts */
172  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
173  &s->chroma_x_shift,
174  &s->chroma_y_shift);
175  if (err < 0)
176  goto fail;
177 
178  if ((err = ff_mpv_init_context_frame(s)))
179  goto fail;
180 
181  memset(s->thread_context, 0, sizeof(s->thread_context));
182  s->thread_context[0] = s;
183 
184  if (s->width && s->height) {
186  if (err < 0)
187  goto fail;
188  }
189  s->context_reinit = 0;
190 
191  return 0;
192  fail:
194  s->context_reinit = 1;
195  return err;
196 }
197 
198 static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
199 {
200  AVCodecContext *avctx = s->avctx;
201  MPVPicture *pic = av_refstruct_pool_get(s->picture_pool);
202  int ret;
203 
204  if (!pic)
205  return AVERROR(ENOMEM);
206 
207  dst->ptr = pic;
208 
209  pic->reference = reference;
210 
211  /* WM Image / Screen codecs allocate internal buffers with different
212  * dimensions / colorspaces; ignore user-defined callbacks for these. */
217  reference ? AV_GET_BUFFER_FLAG_REF : 0);
218  } else {
219  pic->f->width = avctx->width;
220  pic->f->height = avctx->height;
221  pic->f->format = avctx->pix_fmt;
223  }
224  if (ret < 0)
225  goto fail;
226 
227  ret = ff_mpv_pic_check_linesize(avctx, pic->f, &s->linesize, &s->uvlinesize);
228  if (ret < 0)
229  goto fail;
230 
232  if (ret < 0)
233  goto fail;
234 
235  av_assert1(s->mb_width == s->buffer_pools.alloc_mb_width);
236  av_assert1(s->mb_height == s->buffer_pools.alloc_mb_height ||
237  FFALIGN(s->mb_height, 2) == s->buffer_pools.alloc_mb_height);
238  av_assert1(s->mb_stride == s->buffer_pools.alloc_mb_stride);
239  ret = ff_mpv_alloc_pic_accessories(s->avctx, dst, &s->sc,
240  &s->buffer_pools, s->mb_height);
241  if (ret < 0)
242  goto fail;
243 
244  return 0;
245 fail:
247  return ret;
248 }
249 
251 {
252  MPVPicture *pic;
253  int ret = alloc_picture(s, dst, 1);
254  if (ret < 0)
255  return ret;
256 
257  pic = dst->ptr;
258  pic->dummy = 1;
259 
260  ff_thread_progress_report(&pic->progress, INT_MAX);
261 
262  return 0;
263 }
264 
265 static void color_frame(AVFrame *frame, int luma)
266 {
267  int h_chroma_shift, v_chroma_shift;
268 
269  for (int i = 0; i < frame->height; i++)
270  memset(frame->data[0] + frame->linesize[0] * i, luma, frame->width);
271 
272  if (!frame->data[1])
273  return;
274  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
275  for (int i = 0; i < AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
276  memset(frame->data[1] + frame->linesize[1] * i,
277  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
278  memset(frame->data[2] + frame->linesize[2] * i,
279  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
280  }
281 }
282 
284 {
285  AVCodecContext *avctx = s->avctx;
286  int ret;
287 
288  av_assert1(!s->last_pic.ptr || s->last_pic.ptr->f->buf[0]);
289  av_assert1(!s->next_pic.ptr || s->next_pic.ptr->f->buf[0]);
290  if (!s->last_pic.ptr && s->pict_type != AV_PICTURE_TYPE_I) {
291  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_pic.ptr)
293  "allocating dummy last picture for B frame\n");
294  else if (s->codec_id != AV_CODEC_ID_H261 /* H.261 has no keyframes */ &&
295  (s->picture_structure == PICT_FRAME || s->first_field))
297  "warning: first frame is no keyframe\n");
298 
299  /* Allocate a dummy frame */
300  ret = alloc_dummy_frame(s, &s->last_pic);
301  if (ret < 0)
302  return ret;
303 
304  if (!avctx->hwaccel) {
305  int luma_val = s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263 ? 16 : 0x80;
306  color_frame(s->last_pic.ptr->f, luma_val);
307  }
308  }
309  if (!s->next_pic.ptr && s->pict_type == AV_PICTURE_TYPE_B) {
310  /* Allocate a dummy frame */
311  ret = alloc_dummy_frame(s, &s->next_pic);
312  if (ret < 0)
313  return ret;
314  }
315 
316  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_pic.ptr &&
317  s->last_pic.ptr->f->buf[0]));
318 
319  return 0;
320 }
321 
322 /**
323  * generic function called after decoding
324  * the header and before a frame is decoded.
325  */
327 {
328  int ret;
329 
330  s->mb_skipped = 0;
331 
333  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
334  return AVERROR_BUG;
335  }
336 
337  ff_mpv_unref_picture(&s->cur_pic);
338  ret = alloc_picture(s, &s->cur_pic,
339  s->pict_type != AV_PICTURE_TYPE_B && !s->droppable);
340  if (ret < 0)
341  return ret;
342 
343  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST * !!s->top_field_first;
344  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_INTERLACED *
345  (!s->progressive_frame && !s->progressive_sequence);
346  s->cur_pic.ptr->field_picture = s->picture_structure != PICT_FRAME;
347 
348  s->cur_pic.ptr->f->pict_type = s->pict_type;
349  if (s->pict_type == AV_PICTURE_TYPE_I)
350  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
351  else
352  s->cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
353 
354  if (s->pict_type != AV_PICTURE_TYPE_B) {
355  ff_mpv_workpic_from_pic(&s->last_pic, s->next_pic.ptr);
356  if (!s->droppable)
357  ff_mpv_workpic_from_pic(&s->next_pic, s->cur_pic.ptr);
358  }
359  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
360  (void*)s->last_pic.ptr, (void*)s->next_pic.ptr, (void*)s->cur_pic.ptr,
361  s->last_pic.ptr ? s->last_pic.ptr->f->data[0] : NULL,
362  s->next_pic.ptr ? s->next_pic.ptr->f->data[0] : NULL,
363  s->cur_pic.ptr ? s->cur_pic.ptr->f->data[0] : NULL,
364  s->pict_type, s->droppable);
365 
367  if (ret < 0)
368  return ret;
369 
370  if (s->avctx->debug & FF_DEBUG_NOMC)
371  color_frame(s->cur_pic.ptr->f, 0x80);
372 
373  return 0;
374 }
375 
376 /* called after a frame has been decoded. */
378 {
379  emms_c();
380 
381  if (s->cur_pic.reference)
382  ff_thread_progress_report(&s->cur_pic.ptr->progress, INT_MAX);
383 }
384 
386 {
387  ff_print_debug_info2(s->avctx, pict, p->mb_type,
388  p->qscale_table, p->motion_val,
389  p->mb_width, p->mb_height, p->mb_stride, s->quarter_sample);
390 }
391 
393  const MPVPicture *p, int qp_type)
394 {
395  AVVideoEncParams *par;
396  int mult = (qp_type == FF_MPV_QSCALE_TYPE_MPEG1) ? 2 : 1;
397  unsigned int nb_mb = p->mb_height * p->mb_width;
398 
399  if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
400  return 0;
401 
403  if (!par)
404  return AVERROR(ENOMEM);
405 
406  for (unsigned y = 0; y < p->mb_height; y++)
407  for (unsigned x = 0; x < p->mb_width; x++) {
408  const unsigned int block_idx = y * p->mb_width + x;
409  const unsigned int mb_xy = y * p->mb_stride + x;
410  AVVideoBlockParams *const b = av_video_enc_params_block(par, block_idx);
411 
412  b->src_x = x * 16;
413  b->src_y = y * 16;
414  b->w = 16;
415  b->h = 16;
416 
417  b->delta_qp = p->qscale_table[mb_xy] * mult;
418  }
419 
420  return 0;
421 }
422 
424 {
425  ff_draw_horiz_band(s->avctx, s->cur_pic.ptr->f,
426  s->last_pic.ptr ? s->last_pic.ptr->f : NULL,
427  y, h, s->picture_structure,
428  s->first_field, s->low_delay);
429 }
430 
432 {
433  MpegEncContext *const s = avctx->priv_data;
434 
435  ff_mpv_unref_picture(&s->cur_pic);
436  ff_mpv_unref_picture(&s->last_pic);
437  ff_mpv_unref_picture(&s->next_pic);
438 
439  s->mb_x = s->mb_y = 0;
440 
441  s->pp_time = 0;
442 }
443 
445 {
446  if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
447  ff_thread_progress_report(&s->cur_pic.ptr->progress, s->mb_y);
448 }
449 
450 
452  uint8_t *dest, const uint8_t *src,
453  int field_based, int field_select,
454  int src_x, int src_y,
455  int width, int height, ptrdiff_t stride,
456  int h_edge_pos, int v_edge_pos,
457  int w, int h, const h264_chroma_mc_func *pix_op,
458  int motion_x, int motion_y)
459 {
460  const int lowres = s->avctx->lowres;
461  const int op_index = lowres;
462  const int s_mask = (2 << lowres) - 1;
463  int emu = 0;
464  int sx, sy;
465 
466  av_assert2(op_index <= 3);
467 
468  if (s->quarter_sample) {
469  motion_x /= 2;
470  motion_y /= 2;
471  }
472 
473  sx = motion_x & s_mask;
474  sy = motion_y & s_mask;
475  src_x += motion_x >> lowres + 1;
476  src_y += motion_y >> lowres + 1;
477 
478  src += src_y * stride + src_x;
479 
480  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
481  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
482  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
483  s->linesize, s->linesize,
484  w + 1, (h + 1) << field_based,
485  src_x, src_y * (1 << field_based),
487  src = s->sc.edge_emu_buffer;
488  emu = 1;
489  }
490 
491  sx = (sx << 2) >> lowres;
492  sy = (sy << 2) >> lowres;
493  if (field_select)
494  src += s->linesize;
495  pix_op[op_index](dest, src, stride, h, sx, sy);
496  return emu;
497 }
498 
499 /* apply one mpeg motion vector to the three components */
501  uint8_t *dest_y,
502  uint8_t *dest_cb,
503  uint8_t *dest_cr,
504  int field_based,
505  int bottom_field,
506  int field_select,
507  uint8_t *const *ref_picture,
508  const h264_chroma_mc_func *pix_op,
509  int motion_x, int motion_y,
510  int h, int mb_y)
511 {
512  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
513  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
514  ptrdiff_t uvlinesize, linesize;
515  const int lowres = s->avctx->lowres;
516  const int op_index = lowres - 1 + s->chroma_x_shift;
517  const int block_s = 8 >> lowres;
518  const int s_mask = (2 << lowres) - 1;
519  const int h_edge_pos = s->h_edge_pos >> lowres;
520  const int v_edge_pos = s->v_edge_pos >> lowres;
521  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
522 
523  av_assert2(op_index <= 3);
524 
525  linesize = s->cur_pic.linesize[0] << field_based;
526  uvlinesize = s->cur_pic.linesize[1] << field_based;
527 
528  // FIXME obviously not perfect but qpel will not work in lowres anyway
529  if (s->quarter_sample) {
530  motion_x /= 2;
531  motion_y /= 2;
532  }
533 
534  if (field_based) {
535  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
536  }
537 
538  sx = motion_x & s_mask;
539  sy = motion_y & s_mask;
540  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
541  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
542 
543  if (s->out_format == FMT_H263) {
544  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
545  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
546  uvsrc_x = src_x >> 1;
547  uvsrc_y = src_y >> 1;
548  } else if (s->out_format == FMT_H261) {
549  // even chroma mv's are full pel in H261
550  mx = motion_x / 4;
551  my = motion_y / 4;
552  uvsx = (2 * mx) & s_mask;
553  uvsy = (2 * my) & s_mask;
554  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
555  uvsrc_y = mb_y * block_s + (my >> lowres);
556  } else {
557  if (s->chroma_y_shift) {
558  mx = motion_x / 2;
559  my = motion_y / 2;
560  uvsx = mx & s_mask;
561  uvsy = my & s_mask;
562  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
563  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
564  } else {
565  if (s->chroma_x_shift) {
566  //Chroma422
567  mx = motion_x / 2;
568  uvsx = mx & s_mask;
569  uvsy = motion_y & s_mask;
570  uvsrc_y = src_y;
571  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
572  } else {
573  //Chroma444
574  uvsx = motion_x & s_mask;
575  uvsy = motion_y & s_mask;
576  uvsrc_x = src_x;
577  uvsrc_y = src_y;
578  }
579  }
580  }
581 
582  ptr_y = ref_picture[0] + src_y * linesize + src_x;
583  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
584  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
585 
586  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
587  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - FFMAX(h, hc<<s->chroma_y_shift), 0)) {
588  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
589  linesize >> field_based, linesize >> field_based,
590  17, 17 + field_based,
591  src_x, src_y * (1 << field_based), h_edge_pos,
592  v_edge_pos);
593  ptr_y = s->sc.edge_emu_buffer;
594  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
595  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
596  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
597  if (s->workaround_bugs & FF_BUG_IEDGE)
598  vbuf -= s->uvlinesize;
599  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
600  uvlinesize >> field_based, uvlinesize >> field_based,
601  9, 9 + field_based,
602  uvsrc_x, uvsrc_y * (1 << field_based),
603  h_edge_pos >> 1, v_edge_pos >> 1);
604  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
605  uvlinesize >> field_based,uvlinesize >> field_based,
606  9, 9 + field_based,
607  uvsrc_x, uvsrc_y * (1 << field_based),
608  h_edge_pos >> 1, v_edge_pos >> 1);
609  ptr_cb = ubuf;
610  ptr_cr = vbuf;
611  }
612  }
613 
614  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
615  if (bottom_field) {
616  dest_y += s->linesize;
617  dest_cb += s->uvlinesize;
618  dest_cr += s->uvlinesize;
619  }
620 
621  if (field_select) {
622  ptr_y += s->linesize;
623  ptr_cb += s->uvlinesize;
624  ptr_cr += s->uvlinesize;
625  }
626 
627  sx = (sx << 2) >> lowres;
628  sy = (sy << 2) >> lowres;
629  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
630 
631  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
632  uvsx = (uvsx << 2) >> lowres;
633  uvsy = (uvsy << 2) >> lowres;
634  if (hc) {
635  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
636  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
637  }
638  }
639  // FIXME h261 lowres loop filter
640 }
641 
643  uint8_t *dest_cb, uint8_t *dest_cr,
644  uint8_t *const *ref_picture,
645  const h264_chroma_mc_func * pix_op,
646  int mx, int my)
647 {
648  const int lowres = s->avctx->lowres;
649  const int op_index = lowres;
650  const int block_s = 8 >> lowres;
651  const int s_mask = (2 << lowres) - 1;
652  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
653  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
654  int emu = 0, src_x, src_y, sx, sy;
655  ptrdiff_t offset;
656  const uint8_t *ptr;
657 
658  av_assert2(op_index <= 3);
659 
660  if (s->quarter_sample) {
661  mx /= 2;
662  my /= 2;
663  }
664 
665  /* In case of 8X8, we construct a single chroma motion vector
666  with a special rounding */
669 
670  sx = mx & s_mask;
671  sy = my & s_mask;
672  src_x = s->mb_x * block_s + (mx >> lowres + 1);
673  src_y = s->mb_y * block_s + (my >> lowres + 1);
674 
675  offset = src_y * s->uvlinesize + src_x;
676  ptr = ref_picture[1] + offset;
677  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
678  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
679  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
680  s->uvlinesize, s->uvlinesize,
681  9, 9,
682  src_x, src_y, h_edge_pos, v_edge_pos);
683  ptr = s->sc.edge_emu_buffer;
684  emu = 1;
685  }
686  sx = (sx << 2) >> lowres;
687  sy = (sy << 2) >> lowres;
688  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
689 
690  ptr = ref_picture[2] + offset;
691  if (emu) {
692  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
693  s->uvlinesize, s->uvlinesize,
694  9, 9,
695  src_x, src_y, h_edge_pos, v_edge_pos);
696  ptr = s->sc.edge_emu_buffer;
697  }
698  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
699 }
700 
701 /**
702  * motion compensation of a single macroblock
703  * @param s context
704  * @param dest_y luma destination pointer
705  * @param dest_cb chroma cb/u destination pointer
706  * @param dest_cr chroma cr/v destination pointer
707  * @param dir direction (0->forward, 1->backward)
708  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
709  * @param pix_op halfpel motion compensation function (average or put normally)
710  * the motion vectors are taken from s->mv and the MV type from s->mv_type
711  */
712 static inline void MPV_motion_lowres(MpegEncContext *s,
713  uint8_t *dest_y, uint8_t *dest_cb,
714  uint8_t *dest_cr,
715  int dir, uint8_t *const *ref_picture,
716  const h264_chroma_mc_func *pix_op)
717 {
718  int mx, my;
719  int mb_x, mb_y;
720  const int lowres = s->avctx->lowres;
721  const int block_s = 8 >>lowres;
722 
723  mb_x = s->mb_x;
724  mb_y = s->mb_y;
725 
726  switch (s->mv_type) {
727  case MV_TYPE_16X16:
728  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
729  0, 0, 0,
730  ref_picture, pix_op,
731  s->mv[dir][0][0], s->mv[dir][0][1],
732  2 * block_s, mb_y);
733  break;
734  case MV_TYPE_8X8:
735  mx = 0;
736  my = 0;
737  for (int i = 0; i < 4; i++) {
738  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
739  s->linesize) * block_s,
740  ref_picture[0], 0, 0,
741  (2 * mb_x + (i & 1)) * block_s,
742  (2 * mb_y + (i >> 1)) * block_s,
743  s->width, s->height, s->linesize,
744  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
745  block_s, block_s, pix_op,
746  s->mv[dir][i][0], s->mv[dir][i][1]);
747 
748  mx += s->mv[dir][i][0];
749  my += s->mv[dir][i][1];
750  }
751 
752  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
753  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
754  pix_op, mx, my);
755  break;
756  case MV_TYPE_FIELD:
757  if (s->picture_structure == PICT_FRAME) {
758  /* top field */
759  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
760  1, 0, s->field_select[dir][0],
761  ref_picture, pix_op,
762  s->mv[dir][0][0], s->mv[dir][0][1],
763  block_s, mb_y);
764  /* bottom field */
765  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
766  1, 1, s->field_select[dir][1],
767  ref_picture, pix_op,
768  s->mv[dir][1][0], s->mv[dir][1][1],
769  block_s, mb_y);
770  } else {
771  if (s->picture_structure != s->field_select[dir][0] + 1 &&
772  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
773  ref_picture = s->cur_pic.ptr->f->data;
774  }
775  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
776  0, 0, s->field_select[dir][0],
777  ref_picture, pix_op,
778  s->mv[dir][0][0],
779  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
780  }
781  break;
782  case MV_TYPE_16X8:
783  for (int i = 0; i < 2; i++) {
784  uint8_t *const *ref2picture;
785 
786  if (s->picture_structure == s->field_select[dir][i] + 1 ||
787  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
788  ref2picture = ref_picture;
789  } else {
790  ref2picture = s->cur_pic.ptr->f->data;
791  }
792 
793  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
794  0, 0, s->field_select[dir][i],
795  ref2picture, pix_op,
796  s->mv[dir][i][0], s->mv[dir][i][1] +
797  2 * block_s * i, block_s, mb_y >> 1);
798 
799  dest_y += 2 * block_s * s->linesize;
800  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
801  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
802  }
803  break;
804  case MV_TYPE_DMV:
805  if (s->picture_structure == PICT_FRAME) {
806  for (int i = 0; i < 2; i++) {
807  for (int j = 0; j < 2; j++) {
808  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
809  1, j, j ^ i,
810  ref_picture, pix_op,
811  s->mv[dir][2 * i + j][0],
812  s->mv[dir][2 * i + j][1],
813  block_s, mb_y);
814  }
815  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
816  }
817  } else {
818  for (int i = 0; i < 2; i++) {
819  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
820  0, 0, s->picture_structure != i + 1,
821  ref_picture, pix_op,
822  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
823  2 * block_s, mb_y >> 1);
824 
825  // after put we make avg of the same block
826  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
827 
828  // opposite parity is always in the same
829  // frame if this is second field
830  if (!s->first_field) {
831  ref_picture = s->cur_pic.ptr->f->data;
832  }
833  }
834  }
835  break;
836  default:
837  av_assert2(0);
838  }
839 }
840 
841 /**
842  * find the lowest MB row referenced in the MVs
843  */
845 {
846  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
847  int off, mvs;
848 
849  if (s->picture_structure != PICT_FRAME || s->mcsel)
850  goto unhandled;
851 
852  switch (s->mv_type) {
853  case MV_TYPE_16X16:
854  mvs = 1;
855  break;
856  case MV_TYPE_16X8:
857  mvs = 2;
858  break;
859  case MV_TYPE_8X8:
860  mvs = 4;
861  break;
862  default:
863  goto unhandled;
864  }
865 
866  for (int i = 0; i < mvs; i++) {
867  int my = s->mv[dir][i][1];
868  my_max = FFMAX(my_max, my);
869  my_min = FFMIN(my_min, my);
870  }
871 
872  off = ((FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
873 
874  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
875 unhandled:
876  return s->mb_height - 1;
877 }
878 
879 /* add block[] to dest[] */
880 static inline void add_dct(MpegEncContext *s,
881  int16_t *block, int i, uint8_t *dest, int line_size)
882 {
883  if (s->block_last_index[i] >= 0) {
884  s->idsp.idct_add(dest, line_size, block);
885  }
886 }
887 
888 /* put block[] to dest[] */
889 static inline void put_dct(MpegEncContext *s,
890  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
891 {
892  s->dct_unquantize_intra(s, block, i, qscale);
893  s->idsp.idct_put(dest, line_size, block);
894 }
895 
896 static inline void add_dequant_dct(MpegEncContext *s,
897  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
898 {
899  if (s->block_last_index[i] >= 0) {
900  s->dct_unquantize_inter(s, block, i, qscale);
901 
902  s->idsp.idct_add(dest, line_size, block);
903  }
904 }
905 
906 #define NOT_MPEG12_H261 0
907 #define MAY_BE_MPEG12_H261 1
908 #define DEFINITELY_MPEG12_H261 2
909 
910 /* generic function called after a macroblock has been parsed by the decoder.
911 
912  Important variables used:
913  s->mb_intra : true if intra macroblock
914  s->mv_dir : motion vector direction
915  s->mv_type : motion vector type
916  s->mv : motion vector
917  s->interlaced_dct : true if interlaced dct used (mpeg2)
918  */
919 static av_always_inline
921  int lowres_flag, int is_mpeg12)
922 {
923 #define IS_MPEG12_H261(s) (is_mpeg12 == MAY_BE_MPEG12_H261 ? ((s)->out_format <= FMT_H261) : is_mpeg12)
924  uint8_t *dest_y = s->dest[0], *dest_cb = s->dest[1], *dest_cr = s->dest[2];
925  int dct_linesize, dct_offset;
926  const int linesize = s->cur_pic.linesize[0]; //not s->linesize as this would be wrong for field pics
927  const int uvlinesize = s->cur_pic.linesize[1];
928  const int block_size = lowres_flag ? 8 >> s->avctx->lowres : 8;
929 
930  dct_linesize = linesize << s->interlaced_dct;
931  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
932 
933  if (!s->mb_intra) {
934  /* motion handling */
935  if (HAVE_THREADS && is_mpeg12 != DEFINITELY_MPEG12_H261 &&
936  s->avctx->active_thread_type & FF_THREAD_FRAME) {
937  if (s->mv_dir & MV_DIR_FORWARD) {
938  ff_thread_progress_await(&s->last_pic.ptr->progress,
940  }
941  if (s->mv_dir & MV_DIR_BACKWARD) {
942  ff_thread_progress_await(&s->next_pic.ptr->progress,
944  }
945  }
946 
947  if (lowres_flag) {
948  const h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
949 
950  if (s->mv_dir & MV_DIR_FORWARD) {
951  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_pic.data, op_pix);
952  op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
953  }
954  if (s->mv_dir & MV_DIR_BACKWARD) {
955  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_pic.data, op_pix);
956  }
957  } else {
958  const op_pixels_func (*op_pix)[4];
959  const qpel_mc_func (*op_qpix)[16];
960 
961  if ((is_mpeg12 == DEFINITELY_MPEG12_H261 || !s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
962  op_pix = s->hdsp.put_pixels_tab;
963  op_qpix = s->qdsp.put_qpel_pixels_tab;
964  } else {
965  op_pix = s->hdsp.put_no_rnd_pixels_tab;
966  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
967  }
968  if (s->mv_dir & MV_DIR_FORWARD) {
969  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_pic.data, op_pix, op_qpix);
970  op_pix = s->hdsp.avg_pixels_tab;
971  op_qpix = s->qdsp.avg_qpel_pixels_tab;
972  }
973  if (s->mv_dir & MV_DIR_BACKWARD) {
974  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_pic.data, op_pix, op_qpix);
975  }
976  }
977 
978  /* skip dequant / idct if we are really late ;) */
979  if (s->avctx->skip_idct) {
980  if ( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
981  ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
982  || s->avctx->skip_idct >= AVDISCARD_ALL)
983  return;
984  }
985 
986  /* add dct residue */
987  if (!(IS_MPEG12_H261(s) || s->msmpeg4_version != MSMP4_UNUSED ||
988  (s->codec_id == AV_CODEC_ID_MPEG4 && !s->mpeg_quant))) {
989  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
990  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
991  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
992  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
993 
994  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
995  av_assert2(s->chroma_y_shift);
996  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
997  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
998  }
999  } else if (is_mpeg12 == DEFINITELY_MPEG12_H261 || lowres_flag || (s->codec_id != AV_CODEC_ID_WMV2)) {
1000  add_dct(s, block[0], 0, dest_y , dct_linesize);
1001  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
1002  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
1003  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1004 
1005  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1006  if (s->chroma_y_shift) {//Chroma420
1007  add_dct(s, block[4], 4, dest_cb, uvlinesize);
1008  add_dct(s, block[5], 5, dest_cr, uvlinesize);
1009  } else {
1010  //chroma422
1011  dct_linesize = uvlinesize << s->interlaced_dct;
1012  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1013 
1014  add_dct(s, block[4], 4, dest_cb, dct_linesize);
1015  add_dct(s, block[5], 5, dest_cr, dct_linesize);
1016  add_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize);
1017  add_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize);
1018  if (!s->chroma_x_shift) {//Chroma444
1019  add_dct(s, block[8], 8, dest_cb + block_size, dct_linesize);
1020  add_dct(s, block[9], 9, dest_cr + block_size, dct_linesize);
1021  add_dct(s, block[10], 10, dest_cb + block_size + dct_offset, dct_linesize);
1022  add_dct(s, block[11], 11, dest_cr + block_size + dct_offset, dct_linesize);
1023  }
1024  }
1025  } //fi gray
1026  } else if (CONFIG_WMV2_DECODER) {
1027  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1028  }
1029  } else {
1030  /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
1031  TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
1032  if (is_mpeg12 != DEFINITELY_MPEG12_H261 && CONFIG_MPEG4_DECODER &&
1033  /* s->codec_id == AV_CODEC_ID_MPEG4 && */
1034  s->avctx->bits_per_raw_sample > 8) {
1035  ff_mpeg4_decode_studio(s, dest_y, dest_cb, dest_cr, block_size,
1036  uvlinesize, dct_linesize, dct_offset);
1037  } else if (!IS_MPEG12_H261(s)) {
1038  /* dct only in intra block */
1039  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1040  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1041  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1042  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1043 
1044  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1045  if (s->chroma_y_shift) {
1046  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1047  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1048  } else {
1049  dct_offset >>= 1;
1050  dct_linesize >>= 1;
1051  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1052  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1053  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1054  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1055  }
1056  }
1057  } else {
1058  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
1059  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
1060  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
1061  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1062 
1063  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1064  if (s->chroma_y_shift) {
1065  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
1066  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
1067  } else {
1068  dct_linesize = uvlinesize << s->interlaced_dct;
1069  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1070 
1071  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
1072  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
1073  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1074  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1075  if (!s->chroma_x_shift) { //Chroma444
1076  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
1077  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
1078  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
1079  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
1080  }
1081  }
1082  } //gray
1083  }
1084  }
1085 }
1086 
1088 {
1089  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1090  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1091 
1092  s->cur_pic.qscale_table[mb_xy] = s->qscale;
1093 
1094  /* avoid copy if macroblock skipped in last frame too */
1095  if (s->mb_skipped) {
1096  s->mb_skipped = 0;
1097  av_assert2(s->pict_type != AV_PICTURE_TYPE_I);
1098  *mbskip_ptr = 1;
1099  } else if (!s->cur_pic.reference) {
1100  *mbskip_ptr = 1;
1101  } else{
1102  *mbskip_ptr = 0; /* not skipped */
1103  }
1104 
1105  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1106  /* print DCT coefficients */
1107  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1108  for (int i = 0; i < 6; i++) {
1109  for (int j = 0; j < 64; j++) {
1110  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1111  block[i][s->idsp.idct_permutation[j]]);
1112  }
1113  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1114  }
1115  }
1116 
1117  av_assert2((s->out_format <= FMT_H261) == (s->out_format == FMT_H261 || s->out_format == FMT_MPEG1));
1118  if (!s->avctx->lowres) {
1119 #if !CONFIG_SMALL
1120  if (s->out_format <= FMT_H261)
1122  else
1124 #else
1126 #endif
1127  } else
1129 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:422
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:54
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1408
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:192
MpegEncContext::progressive_sequence
int progressive_sequence
Definition: mpegvideo.h:293
av_clip
#define av_clip
Definition: common.h:100
ff_thread_progress_report
void ff_thread_progress_report(ThreadProgress *pro, int n)
This function is a no-op in no-op mode; otherwise it notifies other threads that a certain level of p...
Definition: threadprogress.c:53
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1330
ff_mpv_decode_init
av_cold int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:47
threadprogress.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:820
ff_mpv_init_duplicate_contexts
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:140
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:194
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:1012
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
put_dct
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_dec.c:889
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
MpegEncContext::workaround_bugs
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:102
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:444
AVFrame::width
int width
Definition: frame.h:482
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
b
#define b
Definition: input.c:42
ff_toupper4
unsigned int ff_toupper4(unsigned int x)
Definition: to_upper4.h:29
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:217
mpegvideo.h
ff_wmv2_add_mb
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2dec.c:85
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:82
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
MAY_BE_MPEG12_H261
#define MAY_BE_MPEG12_H261
Definition: mpegvideo_dec.c:907
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:55
MpegEncContext::height
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:91
mpegutils.h
thread.h
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:189
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:196
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:638
AV_VIDEO_ENC_PARAMS_MPEG2
@ AV_VIDEO_ENC_PARAMS_MPEG2
Definition: video_enc_params.h:65
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo_dec.c:1087
MPVPicture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:68
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:54
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
fail
#define fail()
Definition: checkasm.h:193
MpegEncContext::padding_bug_score
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:264
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:114
MPVPicture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:65
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3369
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_dec.c:451
MpegEncContext::width
int width
Definition: mpegvideo.h:91
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:610
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
ff_mpv_init_context_frame
av_cold int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:252
MPVPicture::dummy
int dummy
Picture is a dummy and should not be output.
Definition: mpegpicture.h:81
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:60
avassert.h
mpegvideodec.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
emms_c
#define emms_c()
Definition: emms.h:63
ff_mpeg_flush
av_cold void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:431
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:2157
s
#define s(width, name)
Definition: cbs_vp9.c:198
MpegEncContext::last_time_base
int last_time_base
Definition: mpegvideo.h:250
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MpegEncContext::h_edge_pos
int h_edge_pos
Definition: mpegvideo.h:112
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1048
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
decode.h
limits.h
MpegEncContext::MSMP4_UNUSED
@ MSMP4_UNUSED
Definition: mpegvideo.h:278
AV_CODEC_ID_VC1IMAGE
@ AV_CODEC_ID_VC1IMAGE
Definition: codec_id.h:204
MpegEncContext::cur_pic
MPVWorkPicture cur_pic
copy of the current picture structure.
Definition: mpegvideo.h:144
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:56
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:509
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
MpegEncContext::low_delay
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:262
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:221
MpegEncContext::picture_pool
struct AVRefStructPool * picture_pool
Pool for MPVPictures.
Definition: mpegvideo.h:116
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:203
ff_thread_progress_await
void ff_thread_progress_await(const ThreadProgress *pro_c, int n)
This function is a no-op in no-op mode; otherwise it waits until other threads have reached a certain...
Definition: threadprogress.c:64
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
Definition: mpegvideo_dec.c:392
NULL
#define NULL
Definition: coverity.c:32
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:211
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:95
MpegEncContext::next_pic
MPVWorkPicture next_pic
copy of the next picture structure.
Definition: mpegvideo.h:138
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:230
ff_mpv_decode_close
av_cold int ff_mpv_decode_close(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:145
DEFINITELY_MPEG12_H261
#define DEFINITELY_MPEG12_H261
Definition: mpegvideo_dec.c:908
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1345
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
IS_MPEG12_H261
#define IS_MPEG12_H261(s)
lowres
static int lowres
Definition: ffplay.c:330
FF_THREAD_IS_COPY
@ FF_THREAD_IS_COPY
Definition: thread.h:61
alloc_dummy_frame
static int av_cold alloc_dummy_frame(MpegEncContext *s, MPVWorkPicture *dst)
Definition: mpegvideo_dec.c:250
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
MpegEncContext::private_ctx
void * private_ctx
Definition: mpegvideo.h:89
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:193
ff_mpv_alloc_dummy_frames
int ff_mpv_alloc_dummy_frames(MpegEncContext *s)
Ensure that the dummy frames are allocated according to pict_type if necessary.
Definition: mpegvideo_dec.c:283
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
MpegEncContext::pb_field_time
uint16_t pb_field_time
like above, just for interlaced
Definition: mpegvideo.h:257
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo_dec.c:880
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1383
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:220
f
f
Definition: af_crystalizer.c:122
MPVPicture::mb_stride
int mb_stride
mb_stride of the tables
Definition: mpegpicture.h:79
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, const uint32_t *mbtype_table, const int8_t *qscale_table, int16_t(*const motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:155
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:166
height
#define height
Definition: dsp.h:85
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: h263.h:30
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:112
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
h264chroma.h
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:423
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:497
MpegEncContext::quarter_sample
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:259
MpegEncContext::context_initialized
int context_initialized
Definition: mpegvideo.h:107
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:326
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:195
FF_THREAD_NO_FRAME_THREADING
@ FF_THREAD_NO_FRAME_THREADING
Definition: thread.h:63
color_frame
static void color_frame(AVFrame *frame, int luma)
Definition: mpegvideo_dec.c:265
MPVPicture::mb_width
int mb_width
mb_width of the tables
Definition: mpegpicture.h:77
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo_dec.c:844
AV_CODEC_ID_MSS2
@ AV_CODEC_ID_MSS2
Definition: codec_id.h:221
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1575
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
emms.h
MPVPicture::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
Definition: mpegpicture.h:75
avcodec_default_get_buffer2
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: get_buffer.c:253
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:385
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo_dec.c:920
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:115
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
MPVPicture::qscale_table
int8_t * qscale_table
Definition: mpegpicture.h:62
internal.h
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo_dec.c:500
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:211
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
MpegEncContext::last_pic
MPVWorkPicture last_pic
copy of the previous picture structure.
Definition: mpegvideo.h:132
MPVPicture::mb_height
int mb_height
mb_height of the tables
Definition: mpegpicture.h:78
AVCodecContext::height
int height
Definition: avcodec.h:595
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:634
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo_dec.c:712
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:633
MpegEncContext::picture_number
int picture_number
Definition: mpegvideo.h:108
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1392
avcodec.h
ff_mpv_workpic_from_pic
void ff_mpv_workpic_from_pic(MPVWorkPicture *wpic, MPVPicture *pic)
Definition: mpegpicture.c:128
stride
#define stride
Definition: h264pred_template.c:536
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo_dec.c:642
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_thread_sync_ref
enum ThreadingStatus ff_thread_sync_ref(AVCodecContext *avctx, size_t offset)
Allows to synchronize objects whose lifetime is the whole decoding process among all frame threads.
Definition: decode.c:1815
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:395
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:78
ff_mpeg4_decode_studio
void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int block_size, int uvlinesize, int dct_linesize, int dct_offset)
Definition: mpeg4videodec.c:254
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:482
alloc_picture
static int alloc_picture(MpegEncContext *s, MPVWorkPicture *dst, int reference)
Definition: mpegvideo_dec.c:198
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
add_dequant_dct
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_dec.c:896
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:377
NOT_MPEG12_H261
#define NOT_MPEG12_H261
Definition: mpegvideo_dec.c:906
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:610
MPVWorkPicture
Definition: mpegpicture.h:95
ThreadingStatus
ThreadingStatus
Definition: thread.h:60
MPVPicture::progress
ThreadProgress progress
Definition: mpegpicture.h:92
MpegEncContext::first_field
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:318
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:456
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:188
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:595
imgutils.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mpv_free_context_frame
av_cold void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:484
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2070
AV_CODEC_ID_WMV3IMAGE
@ AV_CODEC_ID_WMV3IMAGE
Definition: codec_id.h:203
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:85
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:217
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:64
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
video_enc_params.h
ff_mpv_common_frame_size_change
av_cold int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:154
h263.h