FFmpeg
h264_slice.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "config_components.h"
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/mem.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/timecode.h"
34 #include "decode.h"
35 #include "cabac.h"
36 #include "cabac_functions.h"
37 #include "error_resilience.h"
38 #include "avcodec.h"
39 #include "h264.h"
40 #include "h264dec.h"
41 #include "h264data.h"
42 #include "h264chroma.h"
43 #include "h264_ps.h"
44 #include "golomb.h"
45 #include "mathops.h"
46 #include "mpegutils.h"
47 #include "rectangle.h"
48 #include "refstruct.h"
49 #include "thread.h"
50 #include "threadframe.h"
51 
52 static const uint8_t field_scan[16+1] = {
53  0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
54  0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
55  2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
56  3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
57 };
58 
59 static const uint8_t field_scan8x8[64+1] = {
60  0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
61  1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
62  2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
63  0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
64  2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
65  2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
66  2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
67  3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
68  3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
69  4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
70  4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
71  5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
72  5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
73  7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
74  6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
75  7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
76 };
77 
78 static const uint8_t field_scan8x8_cavlc[64+1] = {
79  0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
80  2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
81  3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
82  5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
83  0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
84  1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
85  3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
86  5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
87  0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
88  1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
89  3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
90  5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
91  1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
92  1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
93  3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
94  6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
95 };
96 
97 // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
98 static const uint8_t zigzag_scan8x8_cavlc[64+1] = {
99  0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
100  4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
101  3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
102  2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
103  1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
104  3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
105  2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
106  3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
107  0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
108  2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
109  1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
110  4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
111  0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
112  1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
113  0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
114  5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
115 };
116 
117 static void release_unused_pictures(H264Context *h, int remove_current)
118 {
119  int i;
120 
121  /* release non reference frames */
122  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
123  if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
124  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
125  ff_h264_unref_picture(&h->DPB[i]);
126  }
127  }
128 }
129 
130 static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
131 {
132  const H264Context *h = sl->h264;
133  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
134 
135  av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
136  // edge emu needs blocksize + filter length - 1
137  // (= 21x21 for H.264)
138  av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
139 
141  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
143  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
144 
145  if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
146  !sl->top_borders[0] || !sl->top_borders[1]) {
149  av_freep(&sl->top_borders[0]);
150  av_freep(&sl->top_borders[1]);
151 
154  sl->top_borders_allocated[0] = 0;
155  sl->top_borders_allocated[1] = 0;
156  return AVERROR(ENOMEM);
157  }
158 
159  return 0;
160 }
161 
163 {
164  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
165  const int mb_array_size = h->mb_stride * h->mb_height;
166  const int b4_stride = h->mb_width * 4 + 1;
167  const int b4_array_size = b4_stride * h->mb_height * 4;
168 
169  h->qscale_table_pool = ff_refstruct_pool_alloc(big_mb_num + h->mb_stride, 0);
170  h->mb_type_pool = ff_refstruct_pool_alloc((big_mb_num + h->mb_stride) *
171  sizeof(uint32_t), 0);
172  h->motion_val_pool = ff_refstruct_pool_alloc(2 * (b4_array_size + 4) *
173  sizeof(int16_t), 0);
174  h->ref_index_pool = ff_refstruct_pool_alloc(4 * mb_array_size, 0);
175 
176  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
177  !h->ref_index_pool) {
178  ff_refstruct_pool_uninit(&h->qscale_table_pool);
179  ff_refstruct_pool_uninit(&h->mb_type_pool);
180  ff_refstruct_pool_uninit(&h->motion_val_pool);
181  ff_refstruct_pool_uninit(&h->ref_index_pool);
182  return AVERROR(ENOMEM);
183  }
184 
185  return 0;
186 }
187 
189 {
190  int i, ret = 0;
191 
192  av_assert0(!pic->f->data[0]);
193 
194  if (h->sei.common.lcevc.info) {
195  HEVCSEILCEVC *lcevc = &h->sei.common.lcevc;
197  if (ret < 0)
198  return ret;
199  }
200 
201  pic->tf.f = pic->f;
202  ret = ff_thread_get_ext_buffer(h->avctx, &pic->tf,
203  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
204  if (ret < 0)
205  goto fail;
206 
207  if (pic->needs_fg) {
208  pic->f_grain->format = pic->f->format;
209  pic->f_grain->width = pic->f->width;
210  pic->f_grain->height = pic->f->height;
211  ret = ff_thread_get_buffer(h->avctx, pic->f_grain, 0);
212  if (ret < 0)
213  goto fail;
214  }
215 
217  if (ret < 0)
218  goto fail;
219 
220  if (h->decode_error_flags_pool) {
221  pic->decode_error_flags = ff_refstruct_pool_get(h->decode_error_flags_pool);
222  if (!pic->decode_error_flags)
223  goto fail;
225  }
226 
227  if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) {
228  int h_chroma_shift, v_chroma_shift;
230  &h_chroma_shift, &v_chroma_shift);
231 
232  for(i=0; i<AV_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
233  memset(pic->f->data[1] + pic->f->linesize[1]*i,
234  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
235  memset(pic->f->data[2] + pic->f->linesize[2]*i,
236  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
237  }
238  }
239 
240  if (!h->qscale_table_pool) {
242  if (ret < 0)
243  goto fail;
244  }
245 
246  pic->qscale_table_base = ff_refstruct_pool_get(h->qscale_table_pool);
247  pic->mb_type_base = ff_refstruct_pool_get(h->mb_type_pool);
248  if (!pic->qscale_table_base || !pic->mb_type_base)
249  goto fail;
250 
251  pic->mb_type = pic->mb_type_base + 2 * h->mb_stride + 1;
252  pic->qscale_table = pic->qscale_table_base + 2 * h->mb_stride + 1;
253 
254  for (i = 0; i < 2; i++) {
255  pic->motion_val_base[i] = ff_refstruct_pool_get(h->motion_val_pool);
256  pic->ref_index[i] = ff_refstruct_pool_get(h->ref_index_pool);
257  if (!pic->motion_val_base[i] || !pic->ref_index[i])
258  goto fail;
259 
260  pic->motion_val[i] = pic->motion_val_base[i] + 4;
261  }
262 
263  pic->pps = ff_refstruct_ref_c(h->ps.pps);
264 
265  pic->mb_width = h->mb_width;
266  pic->mb_height = h->mb_height;
267  pic->mb_stride = h->mb_stride;
268 
269  return 0;
270 fail:
272  return (ret < 0) ? ret : AVERROR(ENOMEM);
273 }
274 
276 {
277  int i;
278 
279  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
280  if (!h->DPB[i].f->buf[0])
281  return i;
282  }
283  return AVERROR_INVALIDDATA;
284 }
285 
286 
287 #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
288 
289 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
290  (((pic) && (pic) >= (old_ctx)->DPB && \
291  (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ? \
292  &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
293 
294 static void copy_picture_range(H264Picture **to, H264Picture *const *from, int count,
295  H264Context *new_base, const H264Context *old_base)
296 {
297  int i;
298 
299  for (i = 0; i < count; i++) {
300  av_assert1(!from[i] ||
301  IN_RANGE(from[i], old_base, 1) ||
302  IN_RANGE(from[i], old_base->DPB, H264_MAX_PICTURE_COUNT));
303  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
304  }
305 }
306 
307 static void color_frame(AVFrame *frame, const int c[4])
308 {
310 
312 
313  for (int p = 0; p < desc->nb_components; p++) {
314  uint8_t *dst = frame->data[p];
315  int is_chroma = p == 1 || p == 2;
316  int bytes = is_chroma ? AV_CEIL_RSHIFT(frame->width, desc->log2_chroma_w) : frame->width;
317  int height = is_chroma ? AV_CEIL_RSHIFT(frame->height, desc->log2_chroma_h) : frame->height;
318  if (desc->comp[0].depth >= 9) {
319  ((uint16_t*)dst)[0] = c[p];
320  av_memcpy_backptr(dst + 2, 2, bytes - 2);
321  dst += frame->linesize[p];
322  for (int y = 1; y < height; y++) {
323  memcpy(dst, frame->data[p], 2*bytes);
324  dst += frame->linesize[p];
325  }
326  } else {
327  for (int y = 0; y < height; y++) {
328  memset(dst, c[p], bytes);
329  dst += frame->linesize[p];
330  }
331  }
332  }
333 }
334 
336 
338  const AVCodecContext *src)
339 {
340  H264Context *h = dst->priv_data, *h1 = src->priv_data;
341  int inited = h->context_initialized, err = 0;
342  int need_reinit = 0;
343  int i, ret;
344 
345  if (dst == src)
346  return 0;
347 
348  if (inited && !h1->ps.sps)
349  return AVERROR_INVALIDDATA;
350 
351  if (inited &&
352  (h->width != h1->width ||
353  h->height != h1->height ||
354  h->mb_width != h1->mb_width ||
355  h->mb_height != h1->mb_height ||
356  !h->ps.sps ||
357  h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
358  h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
359  h->ps.sps->vui.matrix_coeffs != h1->ps.sps->vui.matrix_coeffs)) {
360  need_reinit = 1;
361  }
362 
363  /* copy block_offset since frame_start may not be called */
364  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
365 
366  // SPS/PPS
367  for (int i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++)
368  ff_refstruct_replace(&h->ps.sps_list[i], h1->ps.sps_list[i]);
369  for (int i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++)
370  ff_refstruct_replace(&h->ps.pps_list[i], h1->ps.pps_list[i]);
371 
372  ff_refstruct_replace(&h->ps.pps, h1->ps.pps);
373  h->ps.sps = h1->ps.sps;
374 
375  if (need_reinit || !inited) {
376  h->width = h1->width;
377  h->height = h1->height;
378  h->mb_height = h1->mb_height;
379  h->mb_width = h1->mb_width;
380  h->mb_num = h1->mb_num;
381  h->mb_stride = h1->mb_stride;
382  h->b_stride = h1->b_stride;
383  h->x264_build = h1->x264_build;
384 
385  if (h->context_initialized || h1->context_initialized) {
386  if ((err = h264_slice_header_init(h)) < 0) {
387  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
388  return err;
389  }
390  }
391 
392  /* copy block_offset since frame_start may not be called */
393  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
394  }
395 
396  h->width_from_caller = h1->width_from_caller;
397  h->height_from_caller = h1->height_from_caller;
398  h->first_field = h1->first_field;
399  h->picture_structure = h1->picture_structure;
400  h->mb_aff_frame = h1->mb_aff_frame;
401  h->droppable = h1->droppable;
402 
403  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
404  ret = ff_h264_replace_picture(&h->DPB[i], &h1->DPB[i]);
405  if (ret < 0)
406  return ret;
407  }
408 
409  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
410  ret = ff_h264_replace_picture(&h->cur_pic, &h1->cur_pic);
411  if (ret < 0)
412  return ret;
413 
414  h->enable_er = h1->enable_er;
415  h->workaround_bugs = h1->workaround_bugs;
416  h->droppable = h1->droppable;
417 
418  // extradata/NAL handling
419  h->is_avc = h1->is_avc;
420  h->nal_length_size = h1->nal_length_size;
421 
422  memcpy(&h->poc, &h1->poc, sizeof(h->poc));
423 
424  memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
425  memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
426  memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
427  memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
428 
429  h->next_output_pic = h1->next_output_pic;
430  h->next_outputed_poc = h1->next_outputed_poc;
431  h->poc_offset = h1->poc_offset;
432 
433  memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
434  h->nb_mmco = h1->nb_mmco;
435  h->mmco_reset = h1->mmco_reset;
436  h->explicit_ref_marking = h1->explicit_ref_marking;
437  h->long_ref_count = h1->long_ref_count;
438  h->short_ref_count = h1->short_ref_count;
439 
440  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
441  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
442  copy_picture_range(h->delayed_pic, h1->delayed_pic,
443  FF_ARRAY_ELEMS(h->delayed_pic), h, h1);
444 
445  h->frame_recovered = h1->frame_recovered;
446 
447  ret = ff_h264_sei_ctx_replace(&h->sei, &h1->sei);
448  if (ret < 0)
449  return ret;
450 
451  h->sei.common.unregistered.x264_build = h1->sei.common.unregistered.x264_build;
452  h->sei.common.mastering_display = h1->sei.common.mastering_display;
453  h->sei.common.content_light = h1->sei.common.content_light;
454 
455  if (!h->cur_pic_ptr)
456  return 0;
457 
458  if (!h->droppable) {
460  h->poc.prev_poc_msb = h->poc.poc_msb;
461  h->poc.prev_poc_lsb = h->poc.poc_lsb;
462  }
463  h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
464  h->poc.prev_frame_num = h->poc.frame_num;
465 
466  h->recovery_frame = h1->recovery_frame;
467  h->non_gray = h1->non_gray;
468 
469  return err;
470 }
471 
473  const AVCodecContext *src)
474 {
475  H264Context *h = dst->priv_data;
476  const H264Context *h1 = src->priv_data;
477 
478  h->is_avc = h1->is_avc;
479  h->nal_length_size = h1->nal_length_size;
480 
481  return 0;
482 }
483 
485 {
486  H264Picture *pic;
487  int i, ret;
488  const int pixel_shift = h->pixel_shift;
489 
490  if (!ff_thread_can_start_frame(h->avctx)) {
491  av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
492  return AVERROR_BUG;
493  }
494 
496  h->cur_pic_ptr = NULL;
497 
499  if (i < 0) {
500  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
501  return i;
502  }
503  pic = &h->DPB[i];
504 
505  pic->reference = h->droppable ? 0 : h->picture_structure;
506  pic->field_picture = h->picture_structure != PICT_FRAME;
507  pic->frame_num = h->poc.frame_num;
508  /*
509  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
510  * in later.
511  * See decode_nal_units().
512  */
513  pic->f->flags &= ~AV_FRAME_FLAG_KEY;
514  pic->mmco_reset = 0;
515  pic->recovered = 0;
516  pic->invalid_gap = 0;
517  pic->sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
518 
519  pic->f->pict_type = h->slice_ctx[0].slice_type;
520 
521  pic->f->crop_left = h->crop_left;
522  pic->f->crop_right = h->crop_right;
523  pic->f->crop_top = h->crop_top;
524  pic->f->crop_bottom = h->crop_bottom;
525 
526  pic->needs_fg =
527  h->sei.common.film_grain_characteristics &&
528  h->sei.common.film_grain_characteristics->present &&
529  !h->avctx->hwaccel &&
530  !(h->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN);
531 
532  if ((ret = alloc_picture(h, pic)) < 0)
533  return ret;
534 
535  h->cur_pic_ptr = pic;
536  ff_h264_unref_picture(&h->cur_pic);
537  if (CONFIG_ERROR_RESILIENCE) {
538  ff_h264_set_erpic(&h->er.cur_pic, NULL);
539  }
540 
541  if ((ret = ff_h264_ref_picture(&h->cur_pic, h->cur_pic_ptr)) < 0)
542  return ret;
543 
544  for (i = 0; i < h->nb_slice_ctx; i++) {
545  h->slice_ctx[i].linesize = h->cur_pic_ptr->f->linesize[0];
546  h->slice_ctx[i].uvlinesize = h->cur_pic_ptr->f->linesize[1];
547  }
548 
549  if (CONFIG_ERROR_RESILIENCE && h->enable_er) {
550  ff_er_frame_start(&h->er);
551  ff_h264_set_erpic(&h->er.last_pic, NULL);
552  ff_h264_set_erpic(&h->er.next_pic, NULL);
553  }
554 
555  for (i = 0; i < 16; i++) {
556  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
557  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
558  }
559  for (i = 0; i < 16; i++) {
560  h->block_offset[16 + i] =
561  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
562  h->block_offset[48 + 16 + i] =
563  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
564  }
565 
566  /* We mark the current picture as non-reference after allocating it, so
567  * that if we break out due to an error it can be released automatically
568  * in the next ff_mpv_frame_start().
569  */
570  h->cur_pic_ptr->reference = 0;
571 
572  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
573 
574  h->next_output_pic = NULL;
575 
576  h->postpone_filter = 0;
577 
578  h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure == PICT_FRAME);
579 
580  if (h->sei.common.unregistered.x264_build >= 0)
581  h->x264_build = h->sei.common.unregistered.x264_build;
582 
583  assert(h->cur_pic_ptr->long_ref == 0);
584 
585  return 0;
586 }
587 
589  const uint8_t *src_y,
590  const uint8_t *src_cb, const uint8_t *src_cr,
591  int linesize, int uvlinesize,
592  int simple)
593 {
594  uint8_t *top_border;
595  int top_idx = 1;
596  const int pixel_shift = h->pixel_shift;
597  int chroma444 = CHROMA444(h);
598  int chroma422 = CHROMA422(h);
599 
600  src_y -= linesize;
601  src_cb -= uvlinesize;
602  src_cr -= uvlinesize;
603 
604  if (!simple && FRAME_MBAFF(h)) {
605  if (sl->mb_y & 1) {
606  if (!MB_MBAFF(sl)) {
607  top_border = sl->top_borders[0][sl->mb_x];
608  AV_COPY128(top_border, src_y + 15 * linesize);
609  if (pixel_shift)
610  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
611  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
612  if (chroma444) {
613  if (pixel_shift) {
614  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
615  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
616  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
617  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
618  } else {
619  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
620  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
621  }
622  } else if (chroma422) {
623  if (pixel_shift) {
624  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
625  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
626  } else {
627  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
628  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
629  }
630  } else {
631  if (pixel_shift) {
632  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
633  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
634  } else {
635  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
636  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
637  }
638  }
639  }
640  }
641  } else if (MB_MBAFF(sl)) {
642  top_idx = 0;
643  } else
644  return;
645  }
646 
647  top_border = sl->top_borders[top_idx][sl->mb_x];
648  /* There are two lines saved, the line above the top macroblock
649  * of a pair, and the line above the bottom macroblock. */
650  AV_COPY128(top_border, src_y + 16 * linesize);
651  if (pixel_shift)
652  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
653 
654  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
655  if (chroma444) {
656  if (pixel_shift) {
657  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
658  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
659  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
660  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
661  } else {
662  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
663  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
664  }
665  } else if (chroma422) {
666  if (pixel_shift) {
667  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
668  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
669  } else {
670  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
671  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
672  }
673  } else {
674  if (pixel_shift) {
675  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
676  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
677  } else {
678  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
679  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
680  }
681  }
682  }
683 }
684 
685 /**
686  * Initialize implicit_weight table.
687  * @param field 0/1 initialize the weight for interlaced MBAFF
688  * -1 initializes the rest
689  */
691 {
692  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
693 
694  for (i = 0; i < 2; i++) {
695  sl->pwt.luma_weight_flag[i] = 0;
696  sl->pwt.chroma_weight_flag[i] = 0;
697  }
698 
699  if (field < 0) {
700  if (h->picture_structure == PICT_FRAME) {
701  cur_poc = h->cur_pic_ptr->poc;
702  } else {
703  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
704  }
705  if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
706  sl->ref_list[0][0].poc + (int64_t)sl->ref_list[1][0].poc == 2LL * cur_poc) {
707  sl->pwt.use_weight = 0;
708  sl->pwt.use_weight_chroma = 0;
709  return;
710  }
711  ref_start = 0;
712  ref_count0 = sl->ref_count[0];
713  ref_count1 = sl->ref_count[1];
714  } else {
715  cur_poc = h->cur_pic_ptr->field_poc[field];
716  ref_start = 16;
717  ref_count0 = 16 + 2 * sl->ref_count[0];
718  ref_count1 = 16 + 2 * sl->ref_count[1];
719  }
720 
721  sl->pwt.use_weight = 2;
722  sl->pwt.use_weight_chroma = 2;
723  sl->pwt.luma_log2_weight_denom = 5;
725 
726  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
727  int64_t poc0 = sl->ref_list[0][ref0].poc;
728  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
729  int w = 32;
730  if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
731  int poc1 = sl->ref_list[1][ref1].poc;
732  int td = av_clip_int8(poc1 - poc0);
733  if (td) {
734  int tb = av_clip_int8(cur_poc - poc0);
735  int tx = (16384 + (FFABS(td) >> 1)) / td;
736  int dist_scale_factor = (tb * tx + 32) >> 8;
737  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
738  w = 64 - dist_scale_factor;
739  }
740  }
741  if (field < 0) {
742  sl->pwt.implicit_weight[ref0][ref1][0] =
743  sl->pwt.implicit_weight[ref0][ref1][1] = w;
744  } else {
745  sl->pwt.implicit_weight[ref0][ref1][field] = w;
746  }
747  }
748  }
749 }
750 
751 /**
752  * initialize scan tables
753  */
755 {
756  int i;
757  for (i = 0; i < 16; i++) {
758 #define TRANSPOSE(x) ((x) >> 2) | (((x) << 2) & 0xF)
759  h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]);
760  h->field_scan[i] = TRANSPOSE(field_scan[i]);
761 #undef TRANSPOSE
762  }
763  for (i = 0; i < 64; i++) {
764 #define TRANSPOSE(x) ((x) >> 3) | (((x) & 7) << 3)
765  h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]);
766  h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]);
767  h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]);
768  h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
769 #undef TRANSPOSE
770  }
771  if (h->ps.sps->transform_bypass) { // FIXME same ugly
772  memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 ));
773  memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
774  memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
775  memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
776  memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
777  memcpy(h->field_scan8x8_cavlc_q0 , field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
778  } else {
779  memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
780  memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
781  memcpy(h->zigzag_scan8x8_cavlc_q0 , h->zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
782  memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
783  memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
784  memcpy(h->field_scan8x8_cavlc_q0 , h->field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
785  }
786 }
787 
788 static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
789 {
790 #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
791  (CONFIG_H264_D3D11VA_HWACCEL * 2) + \
792  CONFIG_H264_D3D12VA_HWACCEL + \
793  CONFIG_H264_NVDEC_HWACCEL + \
794  CONFIG_H264_VAAPI_HWACCEL + \
795  CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
796  CONFIG_H264_VDPAU_HWACCEL + \
797  CONFIG_H264_VULKAN_HWACCEL)
798  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
799 
800  switch (h->ps.sps->bit_depth_luma) {
801  case 9:
802  if (CHROMA444(h)) {
803  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
804  *fmt++ = AV_PIX_FMT_GBRP9;
805  } else
806  *fmt++ = AV_PIX_FMT_YUV444P9;
807  } else if (CHROMA422(h))
808  *fmt++ = AV_PIX_FMT_YUV422P9;
809  else
810  *fmt++ = AV_PIX_FMT_YUV420P9;
811  break;
812  case 10:
813 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
814  if (h->avctx->colorspace != AVCOL_SPC_RGB)
815  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
816 #endif
817 #if CONFIG_H264_VULKAN_HWACCEL
818  *fmt++ = AV_PIX_FMT_VULKAN;
819 #endif
820  if (CHROMA444(h)) {
821  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
822  *fmt++ = AV_PIX_FMT_GBRP10;
823  } else
824  *fmt++ = AV_PIX_FMT_YUV444P10;
825  } else if (CHROMA422(h))
826  *fmt++ = AV_PIX_FMT_YUV422P10;
827  else {
828 #if CONFIG_H264_VAAPI_HWACCEL
829  // Just add as candidate. Whether VAProfileH264High10 usable or
830  // not is decided by vaapi_decode_make_config() defined in FFmpeg
831  // and vaQueryCodingProfile() defined in libva.
832  *fmt++ = AV_PIX_FMT_VAAPI;
833 #endif
834  *fmt++ = AV_PIX_FMT_YUV420P10;
835  }
836  break;
837  case 12:
838 #if CONFIG_H264_VULKAN_HWACCEL
839  *fmt++ = AV_PIX_FMT_VULKAN;
840 #endif
841  if (CHROMA444(h)) {
842  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
843  *fmt++ = AV_PIX_FMT_GBRP12;
844  } else
845  *fmt++ = AV_PIX_FMT_YUV444P12;
846  } else if (CHROMA422(h))
847  *fmt++ = AV_PIX_FMT_YUV422P12;
848  else
849  *fmt++ = AV_PIX_FMT_YUV420P12;
850  break;
851  case 14:
852  if (CHROMA444(h)) {
853  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
854  *fmt++ = AV_PIX_FMT_GBRP14;
855  } else
856  *fmt++ = AV_PIX_FMT_YUV444P14;
857  } else if (CHROMA422(h))
858  *fmt++ = AV_PIX_FMT_YUV422P14;
859  else
860  *fmt++ = AV_PIX_FMT_YUV420P14;
861  break;
862  case 8:
863 #if CONFIG_H264_VDPAU_HWACCEL
864  *fmt++ = AV_PIX_FMT_VDPAU;
865 #endif
866 #if CONFIG_H264_VULKAN_HWACCEL
867  *fmt++ = AV_PIX_FMT_VULKAN;
868 #endif
869 #if CONFIG_H264_NVDEC_HWACCEL
870  *fmt++ = AV_PIX_FMT_CUDA;
871 #endif
872 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
873  if (h->avctx->colorspace != AVCOL_SPC_RGB)
874  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
875 #endif
876  if (CHROMA444(h)) {
877  if (h->avctx->colorspace == AVCOL_SPC_RGB)
878  *fmt++ = AV_PIX_FMT_GBRP;
879  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
880  *fmt++ = AV_PIX_FMT_YUVJ444P;
881  else
882  *fmt++ = AV_PIX_FMT_YUV444P;
883  } else if (CHROMA422(h)) {
884  if (h->avctx->color_range == AVCOL_RANGE_JPEG)
885  *fmt++ = AV_PIX_FMT_YUVJ422P;
886  else
887  *fmt++ = AV_PIX_FMT_YUV422P;
888  } else {
889 #if CONFIG_H264_DXVA2_HWACCEL
890  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
891 #endif
892 #if CONFIG_H264_D3D11VA_HWACCEL
893  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
894  *fmt++ = AV_PIX_FMT_D3D11;
895 #endif
896 #if CONFIG_H264_D3D12VA_HWACCEL
897  *fmt++ = AV_PIX_FMT_D3D12;
898 #endif
899 #if CONFIG_H264_VAAPI_HWACCEL
900  *fmt++ = AV_PIX_FMT_VAAPI;
901 #endif
902  if (h->avctx->color_range == AVCOL_RANGE_JPEG)
903  *fmt++ = AV_PIX_FMT_YUVJ420P;
904  else
905  *fmt++ = AV_PIX_FMT_YUV420P;
906  }
907  break;
908  default:
909  av_log(h->avctx, AV_LOG_ERROR,
910  "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
911  return AVERROR_INVALIDDATA;
912  }
913 
914  *fmt = AV_PIX_FMT_NONE;
915 
916  for (int i = 0; pix_fmts[i] != AV_PIX_FMT_NONE; i++)
917  if (pix_fmts[i] == h->avctx->pix_fmt && !force_callback)
918  return pix_fmts[i];
919  return ff_get_format(h->avctx, pix_fmts);
920 }
921 
922 /* export coded and cropped frame dimensions to AVCodecContext */
924 {
925  const SPS *sps = h->ps.sps;
926  int cr = sps->crop_right;
927  int cl = sps->crop_left;
928  int ct = sps->crop_top;
929  int cb = sps->crop_bottom;
930  int width = h->width - (cr + cl);
931  int height = h->height - (ct + cb);
932  av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
933  av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
934 
935  /* handle container cropping */
936  if (h->width_from_caller > 0 && h->height_from_caller > 0 &&
937  !sps->crop_top && !sps->crop_left &&
938  FFALIGN(h->width_from_caller, 16) == FFALIGN(width, 16) &&
939  FFALIGN(h->height_from_caller, 16) == FFALIGN(height, 16) &&
940  h->width_from_caller <= width &&
941  h->height_from_caller <= height) {
942  width = h->width_from_caller;
943  height = h->height_from_caller;
944  cl = 0;
945  ct = 0;
946  cr = h->width - width;
947  cb = h->height - height;
948  } else {
949  h->width_from_caller = 0;
950  h->height_from_caller = 0;
951  }
952 
953  h->avctx->coded_width = h->width;
954  h->avctx->coded_height = h->height;
955  h->avctx->width = width;
956  h->avctx->height = height;
957  h->crop_right = cr;
958  h->crop_left = cl;
959  h->crop_top = ct;
960  h->crop_bottom = cb;
961 }
962 
964 {
965  const SPS *sps = h->ps.sps;
966  int i, ret;
967 
968  if (!sps) {
970  goto fail;
971  }
972 
973  ff_set_sar(h->avctx, sps->vui.sar);
974  av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
975  &h->chroma_x_shift, &h->chroma_y_shift);
976 
977  if (sps->timing_info_present_flag) {
978  int64_t den = sps->time_scale;
979  if (h->x264_build < 44U)
980  den *= 2;
981  av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
982  sps->num_units_in_tick * 2, den, 1 << 30);
983  }
984 
986 
987  h->first_field = 0;
988  h->prev_interlaced_frame = 1;
989 
992  if (ret < 0) {
993  av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
994  goto fail;
995  }
996 
997  if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
998  sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
999  ) {
1000  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
1001  sps->bit_depth_luma);
1003  goto fail;
1004  }
1005 
1006  h->cur_bit_depth_luma =
1007  h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
1008  h->cur_chroma_format_idc = sps->chroma_format_idc;
1009  h->pixel_shift = sps->bit_depth_luma > 8;
1010  h->chroma_format_idc = sps->chroma_format_idc;
1011  h->bit_depth_luma = sps->bit_depth_luma;
1012 
1013  ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
1014  sps->chroma_format_idc);
1015  ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
1016  ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
1017  ff_h264_pred_init(&h->hpc, AV_CODEC_ID_H264, sps->bit_depth_luma,
1018  sps->chroma_format_idc);
1019  ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
1020 
1021  if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
1022  ff_h264_slice_context_init(h, &h->slice_ctx[0]);
1023  } else {
1024  for (i = 0; i < h->nb_slice_ctx; i++) {
1025  H264SliceContext *sl = &h->slice_ctx[i];
1026 
1027  sl->h264 = h;
1028  sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
1029  sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
1030  sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
1031 
1033  }
1034  }
1035 
1036  h->context_initialized = 1;
1037 
1038  return 0;
1039 fail:
1041  h->context_initialized = 0;
1042  return ret;
1043 }
1044 
1046 {
1047  switch (a) {
1051  default:
1052  return a;
1053  }
1054 }
1055 
1056 static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
1057 {
1058  const SPS *sps;
1059  int needs_reinit = 0, must_reinit, ret;
1060 
1061  if (first_slice)
1062  ff_refstruct_replace(&h->ps.pps, h->ps.pps_list[sl->pps_id]);
1063 
1064  if (h->ps.sps != h->ps.pps->sps) {
1065  h->ps.sps = h->ps.pps->sps;
1066 
1067  if (h->mb_width != h->ps.sps->mb_width ||
1068  h->mb_height != h->ps.sps->mb_height ||
1069  h->cur_bit_depth_luma != h->ps.sps->bit_depth_luma ||
1070  h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc
1071  )
1072  needs_reinit = 1;
1073 
1074  if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
1075  h->chroma_format_idc != h->ps.sps->chroma_format_idc)
1076  needs_reinit = 1;
1077  }
1078  sps = h->ps.sps;
1079 
1080  must_reinit = (h->context_initialized &&
1081  ( 16*sps->mb_width != h->avctx->coded_width
1082  || 16*sps->mb_height != h->avctx->coded_height
1083  || h->cur_bit_depth_luma != sps->bit_depth_luma
1084  || h->cur_chroma_format_idc != sps->chroma_format_idc
1085  || h->mb_width != sps->mb_width
1086  || h->mb_height != sps->mb_height
1087  ));
1088  if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
1089  || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0))))
1090  must_reinit = 1;
1091 
1092  if (first_slice && av_cmp_q(sps->vui.sar, h->avctx->sample_aspect_ratio))
1093  must_reinit = 1;
1094 
1095  if (!h->setup_finished) {
1096  h->avctx->profile = ff_h264_get_profile(sps);
1097  h->avctx->level = sps->level_idc;
1098  h->avctx->refs = sps->ref_frame_count;
1099 
1100  h->mb_width = sps->mb_width;
1101  h->mb_height = sps->mb_height;
1102  h->mb_num = h->mb_width * h->mb_height;
1103  h->mb_stride = h->mb_width + 1;
1104 
1105  h->b_stride = h->mb_width * 4;
1106 
1107  h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
1108 
1109  h->width = 16 * h->mb_width;
1110  h->height = 16 * h->mb_height;
1111 
1112  init_dimensions(h);
1113 
1114  if (sps->vui.video_signal_type_present_flag) {
1115  h->avctx->color_range = sps->vui.video_full_range_flag > 0 ? AVCOL_RANGE_JPEG
1116  : AVCOL_RANGE_MPEG;
1117  if (sps->vui.colour_description_present_flag) {
1118  if (h->avctx->colorspace != sps->vui.matrix_coeffs)
1119  needs_reinit = 1;
1120  h->avctx->color_primaries = sps->vui.colour_primaries;
1121  h->avctx->color_trc = sps->vui.transfer_characteristics;
1122  h->avctx->colorspace = sps->vui.matrix_coeffs;
1123  }
1124  }
1125 
1126  if (h->sei.common.alternative_transfer.present &&
1127  av_color_transfer_name(h->sei.common.alternative_transfer.preferred_transfer_characteristics) &&
1128  h->sei.common.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
1129  h->avctx->color_trc = h->sei.common.alternative_transfer.preferred_transfer_characteristics;
1130  }
1131  }
1132  h->avctx->chroma_sample_location = sps->vui.chroma_location;
1133 
1134  if (!h->context_initialized || must_reinit || needs_reinit) {
1135  int flush_changes = h->context_initialized;
1136  h->context_initialized = 0;
1137  if (sl != h->slice_ctx) {
1138  av_log(h->avctx, AV_LOG_ERROR,
1139  "changing width %d -> %d / height %d -> %d on "
1140  "slice %d\n",
1141  h->width, h->avctx->coded_width,
1142  h->height, h->avctx->coded_height,
1143  h->current_slice + 1);
1144  return AVERROR_INVALIDDATA;
1145  }
1146 
1147  av_assert1(first_slice);
1148 
1149  if (flush_changes)
1151 
1152  if ((ret = get_pixel_format(h, 1)) < 0)
1153  return ret;
1154  h->avctx->pix_fmt = ret;
1155 
1156  av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
1157  "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt));
1158 
1159  if ((ret = h264_slice_header_init(h)) < 0) {
1160  av_log(h->avctx, AV_LOG_ERROR,
1161  "h264_slice_header_init() failed\n");
1162  return ret;
1163  }
1164  }
1165 
1166  return 0;
1167 }
1168 
1170 {
1171  const SPS *sps = h->ps.sps;
1172  H264Picture *cur = h->cur_pic_ptr;
1173  AVFrame *out = cur->f;
1174  int interlaced_frame = 0, top_field_first = 0;
1175  int ret;
1176 
1177  out->flags &= ~AV_FRAME_FLAG_INTERLACED;
1178  out->repeat_pict = 0;
1179 
1180  /* Signal interlacing information externally. */
1181  /* Prioritize picture timing SEI information over used
1182  * decoding process if it exists. */
1183  if (h->sei.picture_timing.present) {
1184  int ret = ff_h264_sei_process_picture_timing(&h->sei.picture_timing, sps,
1185  h->avctx);
1186  if (ret < 0) {
1187  av_log(h->avctx, AV_LOG_ERROR, "Error processing a picture timing SEI\n");
1188  if (h->avctx->err_recognition & AV_EF_EXPLODE)
1189  return ret;
1190  h->sei.picture_timing.present = 0;
1191  }
1192  }
1193 
1194  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1195  const H264SEIPictureTiming *pt = &h->sei.picture_timing;
1196  switch (pt->pic_struct) {
1198  break;
1201  interlaced_frame = 1;
1202  break;
1206  interlaced_frame = 1;
1207  else
1208  // try to flag soft telecine progressive
1209  interlaced_frame = !!h->prev_interlaced_frame;
1210  break;
1213  /* Signal the possibility of telecined film externally
1214  * (pic_struct 5,6). From these hints, let the applications
1215  * decide if they apply deinterlacing. */
1216  out->repeat_pict = 1;
1217  break;
1219  out->repeat_pict = 2;
1220  break;
1222  out->repeat_pict = 4;
1223  break;
1224  }
1225 
1226  if ((pt->ct_type & 3) &&
1227  pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
1228  interlaced_frame = ((pt->ct_type & (1 << 1)) != 0);
1229  } else {
1230  /* Derive interlacing flag from used decoding process. */
1231  interlaced_frame = !!FIELD_OR_MBAFF_PICTURE(h);
1232  }
1233  h->prev_interlaced_frame = interlaced_frame;
1234 
1235  if (cur->field_poc[0] != cur->field_poc[1]) {
1236  /* Derive top_field_first from field pocs. */
1237  top_field_first = (cur->field_poc[0] < cur->field_poc[1]);
1238  } else {
1239  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1240  /* Use picture timing SEI information. Even if it is a
1241  * information of a past frame, better than nothing. */
1242  if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
1243  h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
1244  top_field_first = 1;
1245  } else if (interlaced_frame) {
1246  /* Default to top field first when pic_struct_present_flag
1247  * is not set but interlaced frame detected */
1248  top_field_first = 1;
1249  } // else
1250  /* Most likely progressive */
1251  }
1252 
1253  out->flags |= (AV_FRAME_FLAG_INTERLACED * interlaced_frame) |
1254  (AV_FRAME_FLAG_TOP_FIELD_FIRST * top_field_first);
1255 
1256  ret = ff_h2645_sei_to_frame(out, &h->sei.common, AV_CODEC_ID_H264, h->avctx,
1257  &sps->vui, sps->bit_depth_luma, sps->bit_depth_chroma,
1258  cur->poc + (unsigned)(h->poc_offset << 5));
1259  if (ret < 0)
1260  return ret;
1261 
1262  if (h->sei.picture_timing.timecode_cnt > 0) {
1263  uint32_t *tc_sd;
1264  char tcbuf[AV_TIMECODE_STR_SIZE];
1265  AVFrameSideData *tcside;
1267  sizeof(uint32_t)*4, &tcside);
1268  if (ret < 0)
1269  return ret;
1270 
1271  if (tcside) {
1272  tc_sd = (uint32_t*)tcside->data;
1273  tc_sd[0] = h->sei.picture_timing.timecode_cnt;
1274 
1275  for (int i = 0; i < tc_sd[0]; i++) {
1276  int drop = h->sei.picture_timing.timecode[i].dropframe;
1277  int hh = h->sei.picture_timing.timecode[i].hours;
1278  int mm = h->sei.picture_timing.timecode[i].minutes;
1279  int ss = h->sei.picture_timing.timecode[i].seconds;
1280  int ff = h->sei.picture_timing.timecode[i].frame;
1281 
1282  tc_sd[i + 1] = av_timecode_get_smpte(h->avctx->framerate, drop, hh, mm, ss, ff);
1283  av_timecode_make_smpte_tc_string2(tcbuf, h->avctx->framerate, tc_sd[i + 1], 0, 0);
1284  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
1285  }
1286  }
1287  h->sei.picture_timing.timecode_cnt = 0;
1288  }
1289 
1290  return 0;
1291 }
1292 
1294 {
1295  const SPS *sps = h->ps.sps;
1296  H264Picture *out = h->cur_pic_ptr;
1297  H264Picture *cur = h->cur_pic_ptr;
1298  int i, pics, out_of_order, out_idx;
1299 
1300  cur->mmco_reset = h->mmco_reset;
1301  h->mmco_reset = 0;
1302 
1303  if (sps->bitstream_restriction_flag ||
1304  h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
1305  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
1306  }
1307 
1308  for (i = 0; 1; i++) {
1309  if(i == H264_MAX_DPB_FRAMES || cur->poc < h->last_pocs[i]){
1310  if(i)
1311  h->last_pocs[i-1] = cur->poc;
1312  break;
1313  } else if(i) {
1314  h->last_pocs[i-1]= h->last_pocs[i];
1315  }
1316  }
1317  out_of_order = H264_MAX_DPB_FRAMES - i;
1318  if( cur->f->pict_type == AV_PICTURE_TYPE_B
1319  || (h->last_pocs[H264_MAX_DPB_FRAMES-2] > INT_MIN && h->last_pocs[H264_MAX_DPB_FRAMES-1] - (int64_t)h->last_pocs[H264_MAX_DPB_FRAMES-2] > 2))
1320  out_of_order = FFMAX(out_of_order, 1);
1321  if (out_of_order == H264_MAX_DPB_FRAMES) {
1322  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
1323  for (i = 1; i < H264_MAX_DPB_FRAMES; i++)
1324  h->last_pocs[i] = INT_MIN;
1325  h->last_pocs[0] = cur->poc;
1326  cur->mmco_reset = 1;
1327  } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
1328  int loglevel = h->avctx->frame_num > 1 ? AV_LOG_WARNING : AV_LOG_VERBOSE;
1329  av_log(h->avctx, loglevel, "Increasing reorder buffer to %d\n", out_of_order);
1330  h->avctx->has_b_frames = out_of_order;
1331  }
1332 
1333  pics = 0;
1334  while (h->delayed_pic[pics])
1335  pics++;
1336 
1338 
1339  h->delayed_pic[pics++] = cur;
1340  if (cur->reference == 0)
1341  cur->reference = DELAYED_PIC_REF;
1342 
1343  out = h->delayed_pic[0];
1344  out_idx = 0;
1345  for (i = 1; h->delayed_pic[i] &&
1346  !(h->delayed_pic[i]->f->flags & AV_FRAME_FLAG_KEY) &&
1347  !h->delayed_pic[i]->mmco_reset;
1348  i++)
1349  if (h->delayed_pic[i]->poc < out->poc) {
1350  out = h->delayed_pic[i];
1351  out_idx = i;
1352  }
1353  if (h->avctx->has_b_frames == 0 &&
1354  ((h->delayed_pic[0]->f->flags & AV_FRAME_FLAG_KEY) || h->delayed_pic[0]->mmco_reset))
1355  h->next_outputed_poc = INT_MIN;
1356  out_of_order = out->poc < h->next_outputed_poc;
1357 
1358  if (out_of_order || pics > h->avctx->has_b_frames) {
1359  out->reference &= ~DELAYED_PIC_REF;
1360  for (i = out_idx; h->delayed_pic[i]; i++)
1361  h->delayed_pic[i] = h->delayed_pic[i + 1];
1362  }
1363  if (!out_of_order && pics > h->avctx->has_b_frames) {
1364  h->next_output_pic = out;
1365  if (out_idx == 0 && h->delayed_pic[0] && ((h->delayed_pic[0]->f->flags & AV_FRAME_FLAG_KEY) || h->delayed_pic[0]->mmco_reset)) {
1366  h->next_outputed_poc = INT_MIN;
1367  } else
1368  h->next_outputed_poc = out->poc;
1369 
1370  // We have reached an recovery point and all frames after it in
1371  // display order are "recovered".
1372  h->frame_recovered |= out->recovered;
1373 
1374  out->recovered |= h->frame_recovered & FRAME_RECOVERED_SEI;
1375 
1376  if (!out->recovered) {
1377  if (!(h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) &&
1378  !(h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)) {
1379  h->next_output_pic = NULL;
1380  } else {
1381  out->f->flags |= AV_FRAME_FLAG_CORRUPT;
1382  }
1383  }
1384  } else {
1385  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
1386  }
1387 
1388  return 0;
1389 }
1390 
1391 /* This function is called right after decoding the slice header for a first
1392  * slice in a field (or a frame). It decides whether we are decoding a new frame
1393  * or a second field in a pair and does the necessary setup.
1394  */
1396  const H2645NAL *nal, int first_slice)
1397 {
1398  int i;
1399  const SPS *sps;
1400 
1401  int last_pic_structure, last_pic_droppable, ret;
1402 
1403  ret = h264_init_ps(h, sl, first_slice);
1404  if (ret < 0)
1405  return ret;
1406 
1407  sps = h->ps.sps;
1408 
1409  if (sps->bitstream_restriction_flag &&
1410  h->avctx->has_b_frames < sps->num_reorder_frames) {
1411  h->avctx->has_b_frames = sps->num_reorder_frames;
1412  }
1413 
1414  last_pic_droppable = h->droppable;
1415  last_pic_structure = h->picture_structure;
1416  h->droppable = (nal->ref_idc == 0);
1417  h->picture_structure = sl->picture_structure;
1418 
1419  h->poc.frame_num = sl->frame_num;
1420  h->poc.poc_lsb = sl->poc_lsb;
1421  h->poc.delta_poc_bottom = sl->delta_poc_bottom;
1422  h->poc.delta_poc[0] = sl->delta_poc[0];
1423  h->poc.delta_poc[1] = sl->delta_poc[1];
1424 
1425  if (nal->type == H264_NAL_IDR_SLICE)
1426  h->poc_offset = sl->idr_pic_id;
1427  else if (h->picture_intra_only)
1428  h->poc_offset = 0;
1429 
1430  /* Shorten frame num gaps so we don't have to allocate reference
1431  * frames just to throw them away */
1432  if (h->poc.frame_num != h->poc.prev_frame_num) {
1433  int unwrap_prev_frame_num = h->poc.prev_frame_num;
1434  int max_frame_num = 1 << sps->log2_max_frame_num;
1435 
1436  if (unwrap_prev_frame_num > h->poc.frame_num)
1437  unwrap_prev_frame_num -= max_frame_num;
1438 
1439  if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
1440  unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
1441  if (unwrap_prev_frame_num < 0)
1442  unwrap_prev_frame_num += max_frame_num;
1443 
1444  h->poc.prev_frame_num = unwrap_prev_frame_num;
1445  }
1446  }
1447 
1448  /* See if we have a decoded first field looking for a pair...
1449  * Here, we're using that to see if we should mark previously
1450  * decode frames as "finished".
1451  * We have to do that before the "dummy" in-between frame allocation,
1452  * since that can modify h->cur_pic_ptr. */
1453  if (h->first_field) {
1454  int last_field = last_pic_structure == PICT_BOTTOM_FIELD;
1455  av_assert0(h->cur_pic_ptr);
1456  av_assert0(h->cur_pic_ptr->f->buf[0]);
1457  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1458 
1459  /* Mark old field/frame as completed */
1460  if (h->cur_pic_ptr->tf.owner[last_field] == h->avctx) {
1461  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_field);
1462  }
1463 
1464  /* figure out if we have a complementary field pair */
1465  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1466  /* Previous field is unmatched. Don't display it, but let it
1467  * remain for reference if marked as such. */
1468  if (last_pic_structure != PICT_FRAME) {
1469  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1470  last_pic_structure == PICT_TOP_FIELD);
1471  }
1472  } else {
1473  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1474  /* This and previous field were reference, but had
1475  * different frame_nums. Consider this field first in
1476  * pair. Throw away previous field except for reference
1477  * purposes. */
1478  if (last_pic_structure != PICT_FRAME) {
1479  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1480  last_pic_structure == PICT_TOP_FIELD);
1481  }
1482  } else {
1483  /* Second field in complementary pair */
1484  if (!((last_pic_structure == PICT_TOP_FIELD &&
1485  h->picture_structure == PICT_BOTTOM_FIELD) ||
1486  (last_pic_structure == PICT_BOTTOM_FIELD &&
1487  h->picture_structure == PICT_TOP_FIELD))) {
1488  av_log(h->avctx, AV_LOG_ERROR,
1489  "Invalid field mode combination %d/%d\n",
1490  last_pic_structure, h->picture_structure);
1491  h->picture_structure = last_pic_structure;
1492  h->droppable = last_pic_droppable;
1493  return AVERROR_INVALIDDATA;
1494  } else if (last_pic_droppable != h->droppable) {
1495  avpriv_request_sample(h->avctx,
1496  "Found reference and non-reference fields in the same frame, which");
1497  h->picture_structure = last_pic_structure;
1498  h->droppable = last_pic_droppable;
1499  return AVERROR_PATCHWELCOME;
1500  }
1501  }
1502  }
1503  }
1504 
1505  while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
1506  h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
1507  const H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
1508  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
1509  h->poc.frame_num, h->poc.prev_frame_num);
1510  if (!sps->gaps_in_frame_num_allowed_flag)
1511  for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
1512  h->last_pocs[i] = INT_MIN;
1513  ret = h264_frame_start(h);
1514  if (ret < 0) {
1515  h->first_field = 0;
1516  return ret;
1517  }
1518 
1519  h->poc.prev_frame_num++;
1520  h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
1521  h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
1522  h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
1523  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
1524  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
1525 
1526  h->explicit_ref_marking = 0;
1528  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1529  return ret;
1530  /* Error concealment: If a ref is missing, copy the previous ref
1531  * in its place.
1532  * FIXME: Avoiding a memcpy would be nice, but ref handling makes
1533  * many assumptions about there being no actual duplicates.
1534  * FIXME: This does not copy padding for out-of-frame motion
1535  * vectors. Given we are concealing a lost frame, this probably
1536  * is not noticeable by comparison, but it should be fixed. */
1537  if (h->short_ref_count) {
1538  int c[4] = {
1539  1<<(h->ps.sps->bit_depth_luma-1),
1540  1<<(h->ps.sps->bit_depth_chroma-1),
1541  1<<(h->ps.sps->bit_depth_chroma-1),
1542  -1
1543  };
1544 
1545  if (prev &&
1546  h->short_ref[0]->f->width == prev->f->width &&
1547  h->short_ref[0]->f->height == prev->f->height &&
1548  h->short_ref[0]->f->format == prev->f->format) {
1549  ff_thread_await_progress(&prev->tf, INT_MAX, 0);
1550  if (prev->field_picture)
1551  ff_thread_await_progress(&prev->tf, INT_MAX, 1);
1552  ff_thread_release_ext_buffer(&h->short_ref[0]->tf);
1553  h->short_ref[0]->tf.f = h->short_ref[0]->f;
1554  ret = ff_thread_ref_frame(&h->short_ref[0]->tf, &prev->tf);
1555  if (ret < 0)
1556  return ret;
1557  h->short_ref[0]->poc = prev->poc + 2U;
1558  h->short_ref[0]->gray = prev->gray;
1559  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 0);
1560  if (h->short_ref[0]->field_picture)
1561  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 1);
1562  } else if (!h->frame_recovered) {
1563  if (!h->avctx->hwaccel)
1564  color_frame(h->short_ref[0]->f, c);
1565  h->short_ref[0]->gray = 1;
1566  }
1567  h->short_ref[0]->frame_num = h->poc.prev_frame_num;
1568  }
1569  }
1570 
1571  /* See if we have a decoded first field looking for a pair...
1572  * We're using that to see whether to continue decoding in that
1573  * frame, or to allocate a new one. */
1574  if (h->first_field) {
1575  av_assert0(h->cur_pic_ptr);
1576  av_assert0(h->cur_pic_ptr->f->buf[0]);
1577  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1578 
1579  /* figure out if we have a complementary field pair */
1580  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1581  /* Previous field is unmatched. Don't display it, but let it
1582  * remain for reference if marked as such. */
1583  h->missing_fields ++;
1584  h->cur_pic_ptr = NULL;
1585  h->first_field = FIELD_PICTURE(h);
1586  } else {
1587  h->missing_fields = 0;
1588  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1589  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1590  h->picture_structure==PICT_BOTTOM_FIELD);
1591  /* This and the previous field had different frame_nums.
1592  * Consider this field first in pair. Throw away previous
1593  * one except for reference purposes. */
1594  h->first_field = 1;
1595  h->cur_pic_ptr = NULL;
1596  } else if (h->cur_pic_ptr->reference & DELAYED_PIC_REF) {
1597  /* This frame was already output, we cannot draw into it
1598  * anymore.
1599  */
1600  h->first_field = 1;
1601  h->cur_pic_ptr = NULL;
1602  } else {
1603  /* Second field in complementary pair */
1604  h->first_field = 0;
1605  }
1606  }
1607  } else {
1608  /* Frame or first field in a potentially complementary pair */
1609  h->first_field = FIELD_PICTURE(h);
1610  }
1611 
1612  if (!FIELD_PICTURE(h) || h->first_field) {
1613  if (h264_frame_start(h) < 0) {
1614  h->first_field = 0;
1615  return AVERROR_INVALIDDATA;
1616  }
1617  } else {
1618  int field = h->picture_structure == PICT_BOTTOM_FIELD;
1620  h->cur_pic_ptr->tf.owner[field] = h->avctx;
1621  }
1622  /* Some macroblocks can be accessed before they're available in case
1623  * of lost slices, MBAFF or threading. */
1624  if (FIELD_PICTURE(h)) {
1625  for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
1626  memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
1627  } else {
1628  memset(h->slice_table, -1,
1629  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
1630  }
1631 
1632  ret = ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
1633  h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
1634  if (ret < 0)
1635  return ret;
1636 
1637  memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
1638  h->nb_mmco = sl->nb_mmco;
1639  h->explicit_ref_marking = sl->explicit_ref_marking;
1640 
1641  h->picture_idr = nal->type == H264_NAL_IDR_SLICE;
1642 
1643  if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
1644  const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
1645 
1646  if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
1647  h->valid_recovery_point = 1;
1648 
1649  if ( h->recovery_frame < 0
1650  || av_zero_extend(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
1651  h->recovery_frame = av_zero_extend(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
1652 
1653  if (!h->valid_recovery_point)
1654  h->recovery_frame = h->poc.frame_num;
1655  }
1656  }
1657 
1658  h->cur_pic_ptr->f->flags |= AV_FRAME_FLAG_KEY * !!(nal->type == H264_NAL_IDR_SLICE);
1659 
1660  if (nal->type == H264_NAL_IDR_SLICE) {
1661  h->cur_pic_ptr->recovered |= FRAME_RECOVERED_IDR;
1662  // If we have an IDR, all frames after it in decoded order are
1663  // "recovered".
1664  h->frame_recovered |= FRAME_RECOVERED_IDR;
1665  }
1666 
1667  if (h->recovery_frame == h->poc.frame_num && nal->ref_idc) {
1668  h->recovery_frame = -1;
1669  h->cur_pic_ptr->recovered |= FRAME_RECOVERED_SEI;
1670  }
1671 
1672 #if 1
1673  h->cur_pic_ptr->recovered |= h->frame_recovered;
1674 #else
1675  h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
1676 #endif
1677 
1678  /* Set the frame properties/side data. Only done for the second field in
1679  * field coded frames, since some SEI information is present for each field
1680  * and is merged by the SEI parsing code. */
1681  if (!FIELD_PICTURE(h) || !h->first_field || h->missing_fields > 1) {
1683  if (ret < 0)
1684  return ret;
1685 
1687  if (ret < 0)
1688  return ret;
1689  }
1690 
1691  return 0;
1692 }
1693 
1695  const H2645NAL *nal)
1696 {
1697  const SPS *sps;
1698  const PPS *pps;
1699  int ret;
1700  unsigned int slice_type, tmp, i;
1701  int field_pic_flag, bottom_field_flag;
1702  int first_slice = sl == h->slice_ctx && !h->current_slice;
1703  int picture_structure;
1704 
1705  if (first_slice)
1706  av_assert0(!h->setup_finished);
1707 
1708  sl->first_mb_addr = get_ue_golomb_long(&sl->gb);
1709 
1710  slice_type = get_ue_golomb_31(&sl->gb);
1711  if (slice_type > 9) {
1712  av_log(h->avctx, AV_LOG_ERROR,
1713  "slice type %d too large at %d\n",
1714  slice_type, sl->first_mb_addr);
1715  return AVERROR_INVALIDDATA;
1716  }
1717  if (slice_type > 4) {
1718  slice_type -= 5;
1719  sl->slice_type_fixed = 1;
1720  } else
1721  sl->slice_type_fixed = 0;
1722 
1723  slice_type = ff_h264_golomb_to_pict_type[slice_type];
1724  sl->slice_type = slice_type;
1725  sl->slice_type_nos = slice_type & 3;
1726 
1727  if (nal->type == H264_NAL_IDR_SLICE &&
1729  av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
1730  return AVERROR_INVALIDDATA;
1731  }
1732 
1733  sl->pps_id = get_ue_golomb(&sl->gb);
1734  if (sl->pps_id >= MAX_PPS_COUNT) {
1735  av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
1736  return AVERROR_INVALIDDATA;
1737  }
1738  if (!h->ps.pps_list[sl->pps_id]) {
1739  av_log(h->avctx, AV_LOG_ERROR,
1740  "non-existing PPS %u referenced\n",
1741  sl->pps_id);
1742  return AVERROR_INVALIDDATA;
1743  }
1744  pps = h->ps.pps_list[sl->pps_id];
1745  sps = pps->sps;
1746 
1747  sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
1748  if (!first_slice) {
1749  if (h->poc.frame_num != sl->frame_num) {
1750  av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
1751  h->poc.frame_num, sl->frame_num);
1752  return AVERROR_INVALIDDATA;
1753  }
1754  }
1755 
1756  sl->mb_mbaff = 0;
1757 
1758  if (sps->frame_mbs_only_flag) {
1759  picture_structure = PICT_FRAME;
1760  } else {
1761  if (!sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
1762  av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
1763  return -1;
1764  }
1765  field_pic_flag = get_bits1(&sl->gb);
1766  if (field_pic_flag) {
1767  bottom_field_flag = get_bits1(&sl->gb);
1768  picture_structure = PICT_TOP_FIELD + bottom_field_flag;
1769  } else {
1770  picture_structure = PICT_FRAME;
1771  }
1772  }
1773  sl->picture_structure = picture_structure;
1774  sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
1775 
1776  if (picture_structure == PICT_FRAME) {
1777  sl->curr_pic_num = sl->frame_num;
1778  sl->max_pic_num = 1 << sps->log2_max_frame_num;
1779  } else {
1780  sl->curr_pic_num = 2 * sl->frame_num + 1;
1781  sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
1782  }
1783 
1784  if (nal->type == H264_NAL_IDR_SLICE) {
1785  unsigned idr_pic_id = get_ue_golomb_long(&sl->gb);
1786  if (idr_pic_id < 65536) {
1787  sl->idr_pic_id = idr_pic_id;
1788  } else
1789  av_log(h->avctx, AV_LOG_WARNING, "idr_pic_id is invalid\n");
1790  }
1791 
1792  sl->poc_lsb = 0;
1793  sl->delta_poc_bottom = 0;
1794  if (sps->poc_type == 0) {
1795  sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
1796 
1797  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1798  sl->delta_poc_bottom = get_se_golomb(&sl->gb);
1799  }
1800 
1801  sl->delta_poc[0] = sl->delta_poc[1] = 0;
1802  if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
1803  sl->delta_poc[0] = get_se_golomb(&sl->gb);
1804 
1805  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1806  sl->delta_poc[1] = get_se_golomb(&sl->gb);
1807  }
1808 
1809  sl->redundant_pic_count = 0;
1810  if (pps->redundant_pic_cnt_present)
1811  sl->redundant_pic_count = get_ue_golomb(&sl->gb);
1812 
1813  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1814  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1815 
1817  &sl->gb, pps, sl->slice_type_nos,
1818  picture_structure, h->avctx);
1819  if (ret < 0)
1820  return ret;
1821 
1822  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1824  if (ret < 0) {
1825  sl->ref_count[1] = sl->ref_count[0] = 0;
1826  return ret;
1827  }
1828  }
1829 
1830  sl->pwt.use_weight = 0;
1831  for (i = 0; i < 2; i++) {
1832  sl->pwt.luma_weight_flag[i] = 0;
1833  sl->pwt.chroma_weight_flag[i] = 0;
1834  }
1835  if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
1836  (pps->weighted_bipred_idc == 1 &&
1839  sl->slice_type_nos, &sl->pwt,
1840  picture_structure, h->avctx);
1841  if (ret < 0)
1842  return ret;
1843  }
1844 
1845  sl->explicit_ref_marking = 0;
1846  if (nal->ref_idc) {
1847  ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx);
1848  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1849  return AVERROR_INVALIDDATA;
1850  }
1851 
1852  if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
1853  tmp = get_ue_golomb_31(&sl->gb);
1854  if (tmp > 2) {
1855  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
1856  return AVERROR_INVALIDDATA;
1857  }
1858  sl->cabac_init_idc = tmp;
1859  }
1860 
1861  sl->last_qscale_diff = 0;
1862  tmp = pps->init_qp + (unsigned)get_se_golomb(&sl->gb);
1863  if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
1864  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
1865  return AVERROR_INVALIDDATA;
1866  }
1867  sl->qscale = tmp;
1868  sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
1869  sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
1870  // FIXME qscale / qp ... stuff
1871  if (sl->slice_type == AV_PICTURE_TYPE_SP)
1872  get_bits1(&sl->gb); /* sp_for_switch_flag */
1873  if (sl->slice_type == AV_PICTURE_TYPE_SP ||
1875  get_se_golomb(&sl->gb); /* slice_qs_delta */
1876 
1877  sl->deblocking_filter = 1;
1878  sl->slice_alpha_c0_offset = 0;
1879  sl->slice_beta_offset = 0;
1880  if (pps->deblocking_filter_parameters_present) {
1881  tmp = get_ue_golomb_31(&sl->gb);
1882  if (tmp > 2) {
1883  av_log(h->avctx, AV_LOG_ERROR,
1884  "deblocking_filter_idc %u out of range\n", tmp);
1885  return AVERROR_INVALIDDATA;
1886  }
1887  sl->deblocking_filter = tmp;
1888  if (sl->deblocking_filter < 2)
1889  sl->deblocking_filter ^= 1; // 1<->0
1890 
1891  if (sl->deblocking_filter) {
1892  int slice_alpha_c0_offset_div2 = get_se_golomb(&sl->gb);
1893  int slice_beta_offset_div2 = get_se_golomb(&sl->gb);
1894  if (slice_alpha_c0_offset_div2 > 6 ||
1895  slice_alpha_c0_offset_div2 < -6 ||
1896  slice_beta_offset_div2 > 6 ||
1897  slice_beta_offset_div2 < -6) {
1898  av_log(h->avctx, AV_LOG_ERROR,
1899  "deblocking filter parameters %d %d out of range\n",
1900  slice_alpha_c0_offset_div2, slice_beta_offset_div2);
1901  return AVERROR_INVALIDDATA;
1902  }
1903  sl->slice_alpha_c0_offset = slice_alpha_c0_offset_div2 * 2;
1904  sl->slice_beta_offset = slice_beta_offset_div2 * 2;
1905  }
1906  }
1907 
1908  return 0;
1909 }
1910 
1911 /* do all the per-slice initialization needed before we can start decoding the
1912  * actual MBs */
1914  const H2645NAL *nal)
1915 {
1916  int i, j, ret = 0;
1917 
1918  if (h->picture_idr && nal->type != H264_NAL_IDR_SLICE) {
1919  av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of IDR and non-IDR slices\n");
1920  return AVERROR_INVALIDDATA;
1921  }
1922 
1923  av_assert1(h->mb_num == h->mb_width * h->mb_height);
1924  if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
1925  sl->first_mb_addr >= h->mb_num) {
1926  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
1927  return AVERROR_INVALIDDATA;
1928  }
1929  sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
1930  sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
1932  if (h->picture_structure == PICT_BOTTOM_FIELD)
1933  sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
1934  av_assert1(sl->mb_y < h->mb_height);
1935 
1936  ret = ff_h264_build_ref_list(h, sl);
1937  if (ret < 0)
1938  return ret;
1939 
1940  if (h->ps.pps->weighted_bipred_idc == 2 &&
1942  implicit_weight_table(h, sl, -1);
1943  if (FRAME_MBAFF(h)) {
1944  implicit_weight_table(h, sl, 0);
1945  implicit_weight_table(h, sl, 1);
1946  }
1947  }
1948 
1951  if (!h->setup_finished)
1953 
1954  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
1955  (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
1956  h->nal_unit_type != H264_NAL_IDR_SLICE) ||
1957  (h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
1959  (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
1961  (h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
1962  nal->ref_idc == 0))
1963  sl->deblocking_filter = 0;
1964 
1965  if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
1966  if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
1967  /* Cheat slightly for speed:
1968  * Do not bother to deblock across slices. */
1969  sl->deblocking_filter = 2;
1970  } else {
1971  h->postpone_filter = 1;
1972  }
1973  }
1974  sl->qp_thresh = 15 -
1976  FFMAX3(0,
1977  h->ps.pps->chroma_qp_index_offset[0],
1978  h->ps.pps->chroma_qp_index_offset[1]) +
1979  6 * (h->ps.sps->bit_depth_luma - 8);
1980 
1981  sl->slice_num = ++h->current_slice;
1982 
1983  if (sl->slice_num)
1984  h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
1985  if ( h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
1986  && h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
1987  && sl->slice_num >= MAX_SLICES) {
1988  //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
1989  av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
1990  }
1991 
1992  for (j = 0; j < 2; j++) {
1993  int id_list[16];
1994  int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
1995  for (i = 0; i < 16; i++) {
1996  id_list[i] = 60;
1997  if (j < sl->list_count && i < sl->ref_count[j] &&
1998  sl->ref_list[j][i].parent->f->buf[0]) {
1999  int k;
2000  const AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
2001  for (k = 0; k < h->short_ref_count; k++)
2002  if (h->short_ref[k]->f->buf[0]->buffer == buf) {
2003  id_list[i] = k;
2004  break;
2005  }
2006  for (k = 0; k < h->long_ref_count; k++)
2007  if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
2008  id_list[i] = h->short_ref_count + k;
2009  break;
2010  }
2011  }
2012  }
2013 
2014  ref2frm[0] =
2015  ref2frm[1] = -1;
2016  for (i = 0; i < 16; i++)
2017  ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
2018  ref2frm[18 + 0] =
2019  ref2frm[18 + 1] = -1;
2020  for (i = 16; i < 48; i++)
2021  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
2022  (sl->ref_list[j][i].reference & 3);
2023  }
2024 
2025  if (sl->slice_type_nos == AV_PICTURE_TYPE_I) {
2026  h->cur_pic_ptr->gray = 0;
2027  h->non_gray = 1;
2028  } else {
2029  int gray = 0;
2030  for (j = 0; j < sl->list_count; j++) {
2031  for (i = 0; i < sl->ref_count[j]; i++) {
2032  gray |= sl->ref_list[j][i].parent->gray;
2033  }
2034  }
2035  h->cur_pic_ptr->gray = gray;
2036  }
2037 
2038  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
2039  av_log(h->avctx, AV_LOG_DEBUG,
2040  "slice:%d %c mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
2041  sl->slice_num,
2042  (h->picture_structure == PICT_FRAME ? 'F' : h->picture_structure == PICT_TOP_FIELD ? 'T' : 'B'),
2043  sl->mb_y * h->mb_width + sl->mb_x,
2045  sl->slice_type_fixed ? " fix" : "",
2046  nal->type == H264_NAL_IDR_SLICE ? " IDR" : "",
2047  h->poc.frame_num,
2048  h->cur_pic_ptr->field_poc[0],
2049  h->cur_pic_ptr->field_poc[1],
2050  sl->ref_count[0], sl->ref_count[1],
2051  sl->qscale,
2052  sl->deblocking_filter,
2054  sl->pwt.use_weight,
2055  sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
2056  sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
2057  }
2058 
2059  return 0;
2060 }
2061 
2063 {
2064  H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
2065  int first_slice = sl == h->slice_ctx && !h->current_slice;
2066  int ret;
2067 
2068  sl->gb = nal->gb;
2069 
2070  ret = h264_slice_header_parse(h, sl, nal);
2071  if (ret < 0)
2072  return ret;
2073 
2074  // discard redundant pictures
2075  if (sl->redundant_pic_count > 0) {
2076  sl->ref_count[0] = sl->ref_count[1] = 0;
2077  return 0;
2078  }
2079 
2080  if (sl->first_mb_addr == 0 || !h->current_slice) {
2081  if (h->setup_finished) {
2082  av_log(h->avctx, AV_LOG_ERROR, "Too many fields\n");
2083  return AVERROR_INVALIDDATA;
2084  }
2085  }
2086 
2087  if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
2088  if (h->current_slice) {
2089  // this slice starts a new field
2090  // first decode any pending queued slices
2091  if (h->nb_slice_ctx_queued) {
2092  H264SliceContext tmp_ctx;
2093 
2095  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
2096  return ret;
2097 
2098  memcpy(&tmp_ctx, h->slice_ctx, sizeof(tmp_ctx));
2099  memcpy(h->slice_ctx, sl, sizeof(tmp_ctx));
2100  memcpy(sl, &tmp_ctx, sizeof(tmp_ctx));
2101  sl = h->slice_ctx;
2102  }
2103 
2104  if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
2105  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2106  if (ret < 0)
2107  return ret;
2108  } else if (h->cur_pic_ptr && !FIELD_PICTURE(h) && !h->first_field && h->nal_unit_type == H264_NAL_IDR_SLICE) {
2109  av_log(h, AV_LOG_WARNING, "Broken frame packetizing\n");
2110  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2111  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
2112  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
2113  h->cur_pic_ptr = NULL;
2114  if (ret < 0)
2115  return ret;
2116  } else
2117  return AVERROR_INVALIDDATA;
2118  }
2119 
2120  if (!h->first_field) {
2121  if (h->cur_pic_ptr && !h->droppable) {
2122  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
2123  h->picture_structure == PICT_BOTTOM_FIELD);
2124  }
2125  h->cur_pic_ptr = NULL;
2126  }
2127  }
2128 
2129  if (!h->current_slice)
2130  av_assert0(sl == h->slice_ctx);
2131 
2132  if (h->current_slice == 0 && !h->first_field) {
2133  if (
2134  (h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
2135  (h->avctx->skip_frame >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
2136  (h->avctx->skip_frame >= AVDISCARD_NONINTRA && sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
2137  (h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != H264_NAL_IDR_SLICE && h->sei.recovery_point.recovery_frame_cnt < 0) ||
2138  h->avctx->skip_frame >= AVDISCARD_ALL) {
2139  return 0;
2140  }
2141  }
2142 
2143  if (!first_slice) {
2144  const PPS *pps = h->ps.pps_list[sl->pps_id];
2145 
2146  if (h->ps.pps->sps_id != pps->sps_id ||
2147  h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
2148  (h->setup_finished && h->ps.pps != pps)*/) {
2149  av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
2150  return AVERROR_INVALIDDATA;
2151  }
2152  if (h->ps.sps != pps->sps) {
2153  av_log(h->avctx, AV_LOG_ERROR,
2154  "SPS changed in the middle of the frame\n");
2155  return AVERROR_INVALIDDATA;
2156  }
2157  }
2158 
2159  if (h->current_slice == 0) {
2160  ret = h264_field_start(h, sl, nal, first_slice);
2161  if (ret < 0)
2162  return ret;
2163  } else {
2164  if (h->picture_structure != sl->picture_structure ||
2165  h->droppable != (nal->ref_idc == 0)) {
2166  av_log(h->avctx, AV_LOG_ERROR,
2167  "Changing field mode (%d -> %d) between slices is not allowed\n",
2168  h->picture_structure, sl->picture_structure);
2169  return AVERROR_INVALIDDATA;
2170  } else if (!h->cur_pic_ptr) {
2171  av_log(h->avctx, AV_LOG_ERROR,
2172  "unset cur_pic_ptr on slice %d\n",
2173  h->current_slice + 1);
2174  return AVERROR_INVALIDDATA;
2175  }
2176  }
2177 
2178  ret = h264_slice_init(h, sl, nal);
2179  if (ret < 0)
2180  return ret;
2181 
2182  h->nb_slice_ctx_queued++;
2183 
2184  return 0;
2185 }
2186 
2188 {
2189  switch (sl->slice_type) {
2190  case AV_PICTURE_TYPE_P:
2191  return 0;
2192  case AV_PICTURE_TYPE_B:
2193  return 1;
2194  case AV_PICTURE_TYPE_I:
2195  return 2;
2196  case AV_PICTURE_TYPE_SP:
2197  return 3;
2198  case AV_PICTURE_TYPE_SI:
2199  return 4;
2200  default:
2201  return AVERROR_INVALIDDATA;
2202  }
2203 }
2204 
2206  H264SliceContext *sl,
2207  int mb_type, int top_xy,
2208  const int left_xy[LEFT_MBS],
2209  int top_type,
2210  const int left_type[LEFT_MBS],
2211  int mb_xy, int list)
2212 {
2213  int b_stride = h->b_stride;
2214  int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
2215  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
2216  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
2217  if (USES_LIST(top_type, list)) {
2218  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
2219  const int b8_xy = 4 * top_xy + 2;
2220  const int *ref2frm = &h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2221  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
2222  ref_cache[0 - 1 * 8] =
2223  ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
2224  ref_cache[2 - 1 * 8] =
2225  ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
2226  } else {
2227  AV_ZERO128(mv_dst - 1 * 8);
2228  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2229  }
2230 
2231  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
2232  if (USES_LIST(left_type[LTOP], list)) {
2233  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
2234  const int b8_xy = 4 * left_xy[LTOP] + 1;
2235  const int *ref2frm = &h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2236  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
2237  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
2238  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
2239  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
2240  ref_cache[-1 + 0] =
2241  ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
2242  ref_cache[-1 + 16] =
2243  ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
2244  } else {
2245  AV_ZERO32(mv_dst - 1 + 0);
2246  AV_ZERO32(mv_dst - 1 + 8);
2247  AV_ZERO32(mv_dst - 1 + 16);
2248  AV_ZERO32(mv_dst - 1 + 24);
2249  ref_cache[-1 + 0] =
2250  ref_cache[-1 + 8] =
2251  ref_cache[-1 + 16] =
2252  ref_cache[-1 + 24] = LIST_NOT_USED;
2253  }
2254  }
2255  }
2256 
2257  if (!USES_LIST(mb_type, list)) {
2258  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
2259  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2260  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2261  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2262  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2263  return;
2264  }
2265 
2266  {
2267  const int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
2268  const int *ref2frm = &h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2269  uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
2270  uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
2271  AV_WN32A(&ref_cache[0 * 8], ref01);
2272  AV_WN32A(&ref_cache[1 * 8], ref01);
2273  AV_WN32A(&ref_cache[2 * 8], ref23);
2274  AV_WN32A(&ref_cache[3 * 8], ref23);
2275  }
2276 
2277  {
2278  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
2279  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
2280  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
2281  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
2282  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
2283  }
2284 }
2285 
2286 /**
2287  * @return non zero if the loop filter can be skipped
2288  */
2289 static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
2290 {
2291  const int mb_xy = sl->mb_xy;
2292  int top_xy, left_xy[LEFT_MBS];
2293  int top_type, left_type[LEFT_MBS];
2294  const uint8_t *nnz;
2295  uint8_t *nnz_cache;
2296 
2297  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
2298 
2299  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
2300  if (FRAME_MBAFF(h)) {
2301  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
2302  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
2303  if (sl->mb_y & 1) {
2304  if (left_mb_field_flag != curr_mb_field_flag)
2305  left_xy[LTOP] -= h->mb_stride;
2306  } else {
2307  if (curr_mb_field_flag)
2308  top_xy += h->mb_stride &
2309  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
2310  if (left_mb_field_flag != curr_mb_field_flag)
2311  left_xy[LBOT] += h->mb_stride;
2312  }
2313  }
2314 
2315  sl->top_mb_xy = top_xy;
2316  sl->left_mb_xy[LTOP] = left_xy[LTOP];
2317  sl->left_mb_xy[LBOT] = left_xy[LBOT];
2318  {
2319  /* For sufficiently low qp, filtering wouldn't do anything.
2320  * This is a conservative estimate: could also check beta_offset
2321  * and more accurate chroma_qp. */
2322  int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
2323  int qp = h->cur_pic.qscale_table[mb_xy];
2324  if (qp <= qp_thresh &&
2325  (left_xy[LTOP] < 0 ||
2326  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
2327  (top_xy < 0 ||
2328  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
2329  if (!FRAME_MBAFF(h))
2330  return 1;
2331  if ((left_xy[LTOP] < 0 ||
2332  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
2333  (top_xy < h->mb_stride ||
2334  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
2335  return 1;
2336  }
2337  }
2338 
2339  top_type = h->cur_pic.mb_type[top_xy];
2340  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
2341  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
2342  if (sl->deblocking_filter == 2) {
2343  if (h->slice_table[top_xy] != sl->slice_num)
2344  top_type = 0;
2345  if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
2346  left_type[LTOP] = left_type[LBOT] = 0;
2347  } else {
2348  if (h->slice_table[top_xy] == 0xFFFF)
2349  top_type = 0;
2350  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
2351  left_type[LTOP] = left_type[LBOT] = 0;
2352  }
2353  sl->top_type = top_type;
2354  sl->left_type[LTOP] = left_type[LTOP];
2355  sl->left_type[LBOT] = left_type[LBOT];
2356 
2357  if (IS_INTRA(mb_type))
2358  return 0;
2359 
2360  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2361  top_type, left_type, mb_xy, 0);
2362  if (sl->list_count == 2)
2363  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2364  top_type, left_type, mb_xy, 1);
2365 
2366  nnz = h->non_zero_count[mb_xy];
2367  nnz_cache = sl->non_zero_count_cache;
2368  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
2369  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
2370  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
2371  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
2372  sl->cbp = h->cbp_table[mb_xy];
2373 
2374  if (top_type) {
2375  nnz = h->non_zero_count[top_xy];
2376  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
2377  }
2378 
2379  if (left_type[LTOP]) {
2380  nnz = h->non_zero_count[left_xy[LTOP]];
2381  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
2382  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
2383  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
2384  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
2385  }
2386 
2387  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
2388  * from what the loop filter needs */
2389  if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
2390  if (IS_8x8DCT(top_type)) {
2391  nnz_cache[4 + 8 * 0] =
2392  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
2393  nnz_cache[6 + 8 * 0] =
2394  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
2395  }
2396  if (IS_8x8DCT(left_type[LTOP])) {
2397  nnz_cache[3 + 8 * 1] =
2398  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
2399  }
2400  if (IS_8x8DCT(left_type[LBOT])) {
2401  nnz_cache[3 + 8 * 3] =
2402  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
2403  }
2404 
2405  if (IS_8x8DCT(mb_type)) {
2406  nnz_cache[scan8[0]] =
2407  nnz_cache[scan8[1]] =
2408  nnz_cache[scan8[2]] =
2409  nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
2410 
2411  nnz_cache[scan8[0 + 4]] =
2412  nnz_cache[scan8[1 + 4]] =
2413  nnz_cache[scan8[2 + 4]] =
2414  nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
2415 
2416  nnz_cache[scan8[0 + 8]] =
2417  nnz_cache[scan8[1 + 8]] =
2418  nnz_cache[scan8[2 + 8]] =
2419  nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
2420 
2421  nnz_cache[scan8[0 + 12]] =
2422  nnz_cache[scan8[1 + 12]] =
2423  nnz_cache[scan8[2 + 12]] =
2424  nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
2425  }
2426  }
2427 
2428  return 0;
2429 }
2430 
2431 static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
2432 {
2433  uint8_t *dest_y, *dest_cb, *dest_cr;
2434  int linesize, uvlinesize, mb_x, mb_y;
2435  const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
2436  const int old_slice_type = sl->slice_type;
2437  const int pixel_shift = h->pixel_shift;
2438  const int block_h = 16 >> h->chroma_y_shift;
2439 
2440  if (h->postpone_filter)
2441  return;
2442 
2443  if (sl->deblocking_filter) {
2444  for (mb_x = start_x; mb_x < end_x; mb_x++)
2445  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
2446  int mb_xy, mb_type;
2447  mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
2448  mb_type = h->cur_pic.mb_type[mb_xy];
2449 
2450  if (FRAME_MBAFF(h))
2451  sl->mb_mbaff =
2452  sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
2453 
2454  sl->mb_x = mb_x;
2455  sl->mb_y = mb_y;
2456  dest_y = h->cur_pic.f->data[0] +
2457  ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
2458  dest_cb = h->cur_pic.f->data[1] +
2459  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2460  mb_y * sl->uvlinesize * block_h;
2461  dest_cr = h->cur_pic.f->data[2] +
2462  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2463  mb_y * sl->uvlinesize * block_h;
2464  // FIXME simplify above
2465 
2466  if (MB_FIELD(sl)) {
2467  linesize = sl->mb_linesize = sl->linesize * 2;
2468  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
2469  if (mb_y & 1) { // FIXME move out of this function?
2470  dest_y -= sl->linesize * 15;
2471  dest_cb -= sl->uvlinesize * (block_h - 1);
2472  dest_cr -= sl->uvlinesize * (block_h - 1);
2473  }
2474  } else {
2475  linesize = sl->mb_linesize = sl->linesize;
2476  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
2477  }
2478  backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
2479  uvlinesize, 0);
2480  if (fill_filter_caches(h, sl, mb_type))
2481  continue;
2482  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
2483  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
2484 
2485  if (FRAME_MBAFF(h)) {
2486  ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
2487  linesize, uvlinesize);
2488  } else {
2489  ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
2490  dest_cr, linesize, uvlinesize);
2491  }
2492  }
2493  }
2494  sl->slice_type = old_slice_type;
2495  sl->mb_x = end_x;
2496  sl->mb_y = end_mb_y - FRAME_MBAFF(h);
2497  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
2498  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
2499 }
2500 
2502 {
2503  const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
2504  int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
2505  h->cur_pic.mb_type[mb_xy - 1] :
2506  (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
2507  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
2508  sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
2509 }
2510 
2511 /**
2512  * Draw edges and report progress for the last MB row.
2513  */
2515 {
2516  int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
2517  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
2518  int height = 16 << FRAME_MBAFF(h);
2519  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
2520 
2521  if (sl->deblocking_filter) {
2522  if ((top + height) >= pic_height)
2523  height += deblock_border;
2524  top -= deblock_border;
2525  }
2526 
2527  if (top >= pic_height || (top + height) < 0)
2528  return;
2529 
2530  height = FFMIN(height, pic_height - top);
2531  if (top < 0) {
2532  height = top + height;
2533  top = 0;
2534  }
2535 
2536  ff_h264_draw_horiz_band(h, sl, top, height);
2537 
2538  if (h->droppable || h->er.error_occurred)
2539  return;
2540 
2541  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
2542  h->picture_structure == PICT_BOTTOM_FIELD);
2543 }
2544 
2546  int startx, int starty,
2547  int endx, int endy, int status)
2548 {
2549  if (!sl->h264->enable_er)
2550  return;
2551 
2552  if (CONFIG_ERROR_RESILIENCE) {
2553  ff_er_add_slice(sl->er, startx, starty, endx, endy, status);
2554  }
2555 }
2556 
2557 static int decode_slice(struct AVCodecContext *avctx, void *arg)
2558 {
2559  H264SliceContext *sl = arg;
2560  const H264Context *h = sl->h264;
2561  int lf_x_start = sl->mb_x;
2562  int orig_deblock = sl->deblocking_filter;
2563  int ret;
2564 
2565  sl->linesize = h->cur_pic_ptr->f->linesize[0];
2566  sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
2567 
2568  ret = alloc_scratch_buffers(sl, sl->linesize);
2569  if (ret < 0)
2570  return ret;
2571 
2572  sl->mb_skip_run = -1;
2573 
2574  av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * sl->linesize * ((scan8[15] - scan8[0]) >> 3));
2575 
2576  if (h->postpone_filter)
2577  sl->deblocking_filter = 0;
2578 
2579  sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
2580  (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
2581 
2582  if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && sl->er->error_status_table) {
2583  const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
2584  if (start_i) {
2585  int prev_status = sl->er->error_status_table[sl->er->mb_index2xy[start_i - 1]];
2586  prev_status &= ~ VP_START;
2587  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
2588  sl->er->error_occurred = 1;
2589  }
2590  }
2591 
2592  if (h->ps.pps->cabac) {
2593  /* realign */
2594  align_get_bits(&sl->gb);
2595 
2596  /* init cabac */
2598  sl->gb.buffer + get_bits_count(&sl->gb) / 8,
2599  (get_bits_left(&sl->gb) + 7) / 8);
2600  if (ret < 0)
2601  return ret;
2602 
2604 
2605  for (;;) {
2606  int ret, eos;
2607  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2608  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2609  sl->next_slice_idx);
2610  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2611  sl->mb_y, ER_MB_ERROR);
2612  return AVERROR_INVALIDDATA;
2613  }
2614 
2615  ret = ff_h264_decode_mb_cabac(h, sl);
2616 
2617  if (ret >= 0)
2618  ff_h264_hl_decode_mb(h, sl);
2619 
2620  // FIXME optimal? or let mb_decode decode 16x32 ?
2621  if (ret >= 0 && FRAME_MBAFF(h)) {
2622  sl->mb_y++;
2623 
2624  ret = ff_h264_decode_mb_cabac(h, sl);
2625 
2626  if (ret >= 0)
2627  ff_h264_hl_decode_mb(h, sl);
2628  sl->mb_y--;
2629  }
2630  eos = get_cabac_terminate(&sl->cabac);
2631 
2632  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
2633  sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
2634  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2635  sl->mb_y, ER_MB_END);
2636  if (sl->mb_x >= lf_x_start)
2637  loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
2638  goto finish;
2639  }
2640  if (sl->cabac.bytestream > sl->cabac.bytestream_end + 2 )
2641  av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %"PTRDIFF_SPECIFIER"\n", sl->cabac.bytestream_end - sl->cabac.bytestream);
2642  if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 4) {
2643  av_log(h->avctx, AV_LOG_ERROR,
2644  "error while decoding MB %d %d, bytestream %"PTRDIFF_SPECIFIER"\n",
2645  sl->mb_x, sl->mb_y,
2646  sl->cabac.bytestream_end - sl->cabac.bytestream);
2647  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2648  sl->mb_y, ER_MB_ERROR);
2649  return AVERROR_INVALIDDATA;
2650  }
2651 
2652  if (++sl->mb_x >= h->mb_width) {
2653  loop_filter(h, sl, lf_x_start, sl->mb_x);
2654  sl->mb_x = lf_x_start = 0;
2655  decode_finish_row(h, sl);
2656  ++sl->mb_y;
2657  if (FIELD_OR_MBAFF_PICTURE(h)) {
2658  ++sl->mb_y;
2659  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2661  }
2662  }
2663 
2664  if (eos || sl->mb_y >= h->mb_height) {
2665  ff_tlog(h->avctx, "slice end %d %d\n",
2666  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2667  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2668  sl->mb_y, ER_MB_END);
2669  if (sl->mb_x > lf_x_start)
2670  loop_filter(h, sl, lf_x_start, sl->mb_x);
2671  goto finish;
2672  }
2673  }
2674  } else {
2675  for (;;) {
2676  int ret;
2677 
2678  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2679  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2680  sl->next_slice_idx);
2681  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2682  sl->mb_y, ER_MB_ERROR);
2683  return AVERROR_INVALIDDATA;
2684  }
2685 
2686  ret = ff_h264_decode_mb_cavlc(h, sl);
2687 
2688  if (ret >= 0)
2689  ff_h264_hl_decode_mb(h, sl);
2690 
2691  // FIXME optimal? or let mb_decode decode 16x32 ?
2692  if (ret >= 0 && FRAME_MBAFF(h)) {
2693  sl->mb_y++;
2694  ret = ff_h264_decode_mb_cavlc(h, sl);
2695 
2696  if (ret >= 0)
2697  ff_h264_hl_decode_mb(h, sl);
2698  sl->mb_y--;
2699  }
2700 
2701  if (ret < 0) {
2702  av_log(h->avctx, AV_LOG_ERROR,
2703  "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
2704  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2705  sl->mb_y, ER_MB_ERROR);
2706  return ret;
2707  }
2708 
2709  if (++sl->mb_x >= h->mb_width) {
2710  loop_filter(h, sl, lf_x_start, sl->mb_x);
2711  sl->mb_x = lf_x_start = 0;
2712  decode_finish_row(h, sl);
2713  ++sl->mb_y;
2714  if (FIELD_OR_MBAFF_PICTURE(h)) {
2715  ++sl->mb_y;
2716  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2718  }
2719  if (sl->mb_y >= h->mb_height) {
2720  ff_tlog(h->avctx, "slice end %d %d\n",
2721  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2722 
2723  if ( get_bits_left(&sl->gb) == 0
2724  || get_bits_left(&sl->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
2725  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2726  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2727 
2728  goto finish;
2729  } else {
2730  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2731  sl->mb_x, sl->mb_y, ER_MB_END);
2732 
2733  return AVERROR_INVALIDDATA;
2734  }
2735  }
2736  }
2737 
2738  if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
2739  ff_tlog(h->avctx, "slice end %d %d\n",
2740  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2741 
2742  if (get_bits_left(&sl->gb) == 0) {
2743  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2744  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2745  if (sl->mb_x > lf_x_start)
2746  loop_filter(h, sl, lf_x_start, sl->mb_x);
2747 
2748  goto finish;
2749  } else {
2750  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2751  sl->mb_y, ER_MB_ERROR);
2752 
2753  return AVERROR_INVALIDDATA;
2754  }
2755  }
2756  }
2757  }
2758 
2759 finish:
2760  sl->deblocking_filter = orig_deblock;
2761  return 0;
2762 }
2763 
2764 /**
2765  * Call decode_slice() for each context.
2766  *
2767  * @param h h264 master context
2768  */
2770 {
2771  AVCodecContext *const avctx = h->avctx;
2772  H264SliceContext *sl;
2773  int context_count = h->nb_slice_ctx_queued;
2774  int ret = 0;
2775  int i, j;
2776 
2777  h->slice_ctx[0].next_slice_idx = INT_MAX;
2778 
2779  if (h->avctx->hwaccel || context_count < 1)
2780  return 0;
2781 
2782  av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
2783 
2784  if (context_count == 1) {
2785 
2786  h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
2787  h->postpone_filter = 0;
2788 
2789  ret = decode_slice(avctx, &h->slice_ctx[0]);
2790  h->mb_y = h->slice_ctx[0].mb_y;
2791  if (ret < 0)
2792  goto finish;
2793  } else {
2794  av_assert0(context_count > 0);
2795  for (i = 0; i < context_count; i++) {
2796  int next_slice_idx = h->mb_width * h->mb_height;
2797  int slice_idx;
2798 
2799  sl = &h->slice_ctx[i];
2800 
2801  /* make sure none of those slices overlap */
2802  slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
2803  for (j = 0; j < context_count; j++) {
2804  H264SliceContext *sl2 = &h->slice_ctx[j];
2805  int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
2806 
2807  if (i == j || slice_idx2 < slice_idx)
2808  continue;
2809  next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
2810  }
2811  sl->next_slice_idx = next_slice_idx;
2812  }
2813 
2814  avctx->execute(avctx, decode_slice, h->slice_ctx,
2815  NULL, context_count, sizeof(h->slice_ctx[0]));
2816 
2817  /* pull back stuff from slices to master context */
2818  sl = &h->slice_ctx[context_count - 1];
2819  h->mb_y = sl->mb_y;
2820 
2821  if (h->postpone_filter) {
2822  h->postpone_filter = 0;
2823 
2824  for (i = 0; i < context_count; i++) {
2825  int y_end, x_end;
2826 
2827  sl = &h->slice_ctx[i];
2828  y_end = FFMIN(sl->mb_y + 1, h->mb_height);
2829  x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
2830 
2831  for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
2832  sl->mb_y = j;
2833  loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
2834  j == y_end - 1 ? x_end : h->mb_width);
2835  }
2836  }
2837  }
2838  }
2839 
2840 finish:
2841  h->nb_slice_ctx_queued = 0;
2842  return ret;
2843 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
er_add_slice
static void er_add_slice(H264SliceContext *sl, int startx, int starty, int endx, int endy, int status)
Definition: h264_slice.c:2545
ff_h264_filter_mb_fast
void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:416
h264_slice_header_init
static int h264_slice_header_init(H264Context *h)
Definition: h264_slice.c:963
implicit_weight_table
static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
Initialize implicit_weight table.
Definition: h264_slice.c:690
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:234
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:302
ff_h264_free_tables
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:141
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
av_clip
#define av_clip
Definition: common.h:100
h264_init_ps
static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
Definition: h264_slice.c:1056
H264SliceContext::max_pic_num
int max_pic_num
Definition: h264dec.h:334
H264SliceContext::nb_mmco
int nb_mmco
Definition: h264dec.h:325
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
CHROMA422
#define CHROMA422(h)
Definition: h264dec.h:91
FF_BUG_TRUNCATED
#define FF_BUG_TRUNCATED
Definition: avcodec.h:1373
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
cabac.h
H264Picture::poc
int poc
frame POC
Definition: h264dec.h:135
h264_export_frame_props
static int h264_export_frame_props(H264Context *h)
Definition: h264_slice.c:1169
ff_h264_sei_ctx_replace
static int ff_h264_sei_ctx_replace(H264SEIContext *dst, const H264SEIContext *src)
Definition: h264_sei.h:132
H264Picture::f
AVFrame * f
Definition: h264dec.h:115
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1272
ff_refstruct_pool_alloc
FFRefStructPool * ff_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to ff_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
Definition: refstruct.c:335
out
FILE * out
Definition: movenc.c:55
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:247
av_clip_int8
#define av_clip_int8
Definition: common.h:109
zigzag_scan8x8_cavlc
static const uint8_t zigzag_scan8x8_cavlc[64+1]
Definition: h264_slice.c:98
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
ff_h264_ref_picture
int ff_h264_ref_picture(H264Picture *dst, const H264Picture *src)
Definition: h264_picture.c:108
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:1013
ff_h2645_sei_to_frame
int ff_h2645_sei_to_frame(AVFrame *frame, H2645SEI *sei, enum AVCodecID codec_id, AVCodecContext *avctx, const H2645VUI *vui, unsigned bit_depth_luma, unsigned bit_depth_chroma, int seed)
Definition: h2645_sei.c:715
H264Picture::ref_index
int8_t * ref_index[2]
RefStruct reference.
Definition: h264dec.h:132
int64_t
long long int64_t
Definition: coverity.c:34
HWACCEL_MAX
#define HWACCEL_MAX
MB_MBAFF
#define MB_MBAFF(h)
Definition: h264dec.h:64
H264SliceContext::mvd_table
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:315
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
H264_SEI_PIC_STRUCT_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM
3: top field, bottom field, in that order
Definition: h264_sei.h:35
color_frame
static void color_frame(AVFrame *frame, const int c[4])
Definition: h264_slice.c:307
H264Picture::pps
const PPS * pps
Definition: h264dec.h:158
HEVCSEILCEVC::info
AVBufferRef * info
Definition: h2645_sei.h:54
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
GetBitContext::size_in_bits
int size_in_bits
Definition: get_bits.h:111
H2645NAL::ref_idc
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:57
ff_h264_slice_context_init
void ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init slice context.
Definition: h264dec.c:265
ERContext::mb_index2xy
int * mb_index2xy
Definition: error_resilience.h:59
predict_field_decoding_flag
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
Definition: h264_slice.c:2501
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
AVFrame::width
int width
Definition: frame.h:461
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:686
get_ue_golomb
static int get_ue_golomb(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to 8190.
Definition: golomb.h:53
ff_h264_update_thread_context
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:337
alloc_scratch_buffers
static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
Definition: h264_slice.c:130
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:583
FRAME_RECOVERED_IDR
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264dec.h:525
decode_finish_row
static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
Draw edges and report progress for the last MB row.
Definition: h264_slice.c:2514
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:270
FF_COMPLIANCE_STRICT
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: defs.h:59
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
ff_er_frame_start
void ff_er_frame_start(ERContext *s)
Definition: error_resilience.c:791
H264Picture::qscale_table
int8_t * qscale_table
Definition: h264dec.h:121
H264SliceContext::left_mb_xy
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:214
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
H264PredWeightTable::use_weight_chroma
int use_weight_chroma
Definition: h264_parse.h:71
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:32
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:610
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:534
ff_h264_update_thread_context_for_user
int ff_h264_update_thread_context_for_user(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:472
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:826
find_unused_picture
static int find_unused_picture(const H264Context *h)
Definition: h264_slice.c:275
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:661
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_h264_pred_weight_table
int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx)
Definition: h264_parse.c:30
FRAME_RECOVERED_SEI
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264dec.h:530
H264SliceContext::is_complex
int is_complex
Definition: h264dec.h:241
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:34
ff_h264_decode_ref_pic_list_reordering
int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx)
Definition: h264_refs.c:427
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:602
H264Picture::invalid_gap
int invalid_gap
Definition: h264dec.h:154
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:69
HEVCSEILCEVC
Definition: h2645_sei.h:53
thread.h
ThreadFrame::f
AVFrame * f
Definition: threadframe.h:28
AV_PIX_FMT_VULKAN
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
Definition: pixfmt.h:379
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1407
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:233
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:653
H264Picture::frame_num
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:136
H264SliceContext::next_slice_idx
int next_slice_idx
Definition: h264dec.h:239
H264SliceContext
Definition: h264dec.h:180
golomb.h
exp golomb vlc stuff
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:65
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
ff_h264_filter_mb
void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:716
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:301
AV_CODEC_FLAG_OUTPUT_CORRUPT
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:232
USES_LIST
#define USES_LIST(a, list)
Definition: h264dec.h:103
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:496
finish
static void finish(void)
Definition: movenc.c:374
get_chroma_qp
static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
Get the chroma qp.
Definition: h264dec.h:675
H264Picture::mmco_reset
int mmco_reset
MMCO_RESET set this 1.
Definition: h264dec.h:137
fail
#define fail()
Definition: checkasm.h:188
ff_refstruct_pool_uninit
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
timecode.h
h264_select_output_frame
static int h264_select_output_frame(H264Context *h)
Definition: h264_slice.c:1293
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:476
CABACContext::bytestream
const uint8_t * bytestream
Definition: cabac.h:45
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2993
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
H264Picture::mb_stride
int mb_stride
Definition: h264dec.h:161
IN_RANGE
#define IN_RANGE(a, b, size)
Definition: h264_slice.c:287
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264_parse.h:40
ff_h264_flush_change
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:458
ff_h264qpel_init
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:49
MAX_SLICES
#define MAX_SLICES
Definition: d3d12va_hevc.c:33
ff_h264_sei_process_picture_timing
int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps, void *logctx)
Parse the contents of a picture timing message given an active SPS.
Definition: h264_sei.c:64
h264_frame_start
static int h264_frame_start(H264Context *h)
Definition: h264_slice.c:484
H264SliceContext::deblocking_filter
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:196
H264PredWeightTable::luma_log2_weight_denom
int luma_log2_weight_denom
Definition: h264_parse.h:72
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
H264Picture::f_grain
AVFrame * f_grain
Definition: h264dec.h:118
H264SliceContext::picture_structure
int picture_structure
Definition: h264dec.h:243
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
release_unused_pictures
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264_slice.c:117
H264PredWeightTable::use_weight
int use_weight
Definition: h264_parse.h:70
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:254
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:185
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264_parse.h:127
refstruct.h
ff_frame_new_side_data_from_buf
int ff_frame_new_side_data_from_buf(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef **buf)
Similar to ff_frame_new_side_data, but using an existing buffer ref.
Definition: decode.c:2161
non_j_pixfmt
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
Definition: h264_slice.c:1045
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
ff_h264_init_cabac_states
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
Definition: h264_cabac.c:1262
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:800
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
ff_refstruct_ref_c
const void * ff_refstruct_ref_c(const void *obj)
Analog of ff_refstruct_ref(), but for constant objects.
Definition: refstruct.c:149
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:640
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:627
av_memcpy_backptr
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
Definition: mem.c:447
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
H264Picture::qscale_table_base
int8_t * qscale_table_base
RefStruct reference.
Definition: h264dec.h:120
ff_h264_queue_decode_slice
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2062
H264Context::DPB
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264dec.h:349
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:2275
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
H264PredWeightTable::chroma_log2_weight_denom
int chroma_log2_weight_denom
Definition: h264_parse.h:73
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:662
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:431
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:67
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1049
ff_h264_execute_ref_pic_marking
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:606
ff_h264_decode_ref_pic_marking
int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb, const H2645NAL *nal, void *logctx)
Definition: h264_refs.c:828
from
const char * from
Definition: jacosubdec.c:66
to
const char * to
Definition: webvttdec.c:35
h264_slice_header_parse
static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1694
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
H264PredWeightTable::chroma_weight_flag
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264_parse.h:75
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
h264data.h
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:475
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:31
decode.h
field_scan8x8_cavlc
static const uint8_t field_scan8x8_cavlc[64+1]
Definition: h264_slice.c:78
H264SliceContext::slice_alpha_c0_offset
int slice_alpha_c0_offset
Definition: h264dec.h:197
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVFrame::crop_right
size_t crop_right
Definition: frame.h:769
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
backup_mb_border
static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl, const uint8_t *src_y, const uint8_t *src_cb, const uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264_slice.c:588
H264SliceContext::slice_type
int slice_type
Definition: h264dec.h:186
H264SliceContext::resync_mb_x
int resync_mb_x
Definition: h264dec.h:235
H264Picture::sei_recovery_frame_cnt
int sei_recovery_frame_cnt
Definition: h264dec.h:155
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:218
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
H264Context::enable_er
int enable_er
Definition: h264dec.h:567
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:103
H264SliceContext::curr_pic_num
int curr_pic_num
Definition: h264dec.h:333
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:845
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:221
threadframe.h
GetBitContext::buffer
const uint8_t * buffer
Definition: get_bits.h:109
alloc_picture
static int alloc_picture(H264Context *h, H264Picture *pic)
Definition: h264_slice.c:188
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
NULL
#define NULL
Definition: coverity.c:32
AV_COPY128
#define AV_COPY128(d, s)
Definition: intreadwrite.h:642
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_COPY64
#define AV_COPY64(d, s)
Definition: intreadwrite.h:638
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:286
H264Picture::mb_type_base
uint32_t * mb_type_base
RefStruct reference.
Definition: h264dec.h:126
ff_thread_await_progress
void ff_thread_await_progress(const ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
Definition: pthread_frame.c:650
H264_MAX_DPB_FRAMES
@ H264_MAX_DPB_FRAMES
Definition: h264.h:76
SPS
Sequence parameter set.
Definition: h264_ps.h:44
H264Ref::parent
const H264Picture * parent
Definition: h264dec.h:177
TRANSPOSE
#define TRANSPOSE(x)
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:37
ff_h264_decode_mb_cabac
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cabac.c:1920
AV_PICTURE_TYPE_SI
@ AV_PICTURE_TYPE_SI
Switching Intra.
Definition: avutil.h:283
H264SliceContext::chroma_qp
int chroma_qp[2]
Definition: h264dec.h:191
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:357
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
PPS
Picture parameter set.
Definition: h264_ps.h:110
av_fast_mallocz
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
Definition: mem.c:562
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:1078
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:109
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
IS_INTERLACED
#define IS_INTERLACED(a)
Definition: mpegutils.h:78
H264Picture::mb_height
int mb_height
Definition: h264dec.h:160
MAX_PPS_COUNT
#define MAX_PPS_COUNT
Definition: h264_ps.h:38
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
H264SliceContext::qscale
int qscale
Definition: h264dec.h:190
get_pixel_format
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
Definition: h264_slice.c:788
H264_NAL_IDR_SLICE
@ H264_NAL_IDR_SLICE
Definition: h264.h:39
fill_filter_caches
static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_slice.c:2289
ERContext::error_occurred
int error_occurred
Definition: error_resilience.h:66
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:670
init_scan_tables
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264_slice.c:754
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:493
H264SliceContext::top_borders_allocated
int top_borders_allocated[2]
Definition: h264dec.h:290
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:284
FIELD_OR_MBAFF_PICTURE
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264dec.h:84
H264SliceContext::mb_skip_run
int mb_skip_run
Definition: h264dec.h:240
h264_ps.h
init_dimensions
static void init_dimensions(H264Context *h)
Definition: h264_slice.c:923
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
H264SliceContext::top_type
int top_type
Definition: h264dec.h:217
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:767
H264SliceContext::resync_mb_y
int resync_mb_y
Definition: h264dec.h:236
H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
6: bottom field, top field, bottom field repeated, in that order
Definition: h264_sei.h:38
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:69
H264SEIPictureTiming
Definition: h264_sei.h:54
H264SliceContext::cabac
CABACContext cabac
Cabac.
Definition: h264dec.h:320
H264SliceContext::redundant_pic_count
int redundant_pic_count
Definition: h264dec.h:247
AVFrame::crop_left
size_t crop_left
Definition: frame.h:768
IS_INTRA
#define IS_INTRA(x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:220
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:491
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
H264Picture::reference
int reference
Definition: h264dec.h:152
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
CABAC
#define CABAC(h)
Definition: h264_cabac.c:28
LEFT_MBS
#define LEFT_MBS
Definition: h264dec.h:68
height
#define height
Definition: dsp.h:85
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
rectangle.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
H264SliceContext::mb_uvlinesize
ptrdiff_t mb_uvlinesize
Definition: h264dec.h:231
VP_START
#define VP_START
< current MB is the first after a resync marker
Definition: error_resilience.h:29
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:483
H264SliceContext::pwt
H264PredWeightTable pwt
Definition: h264dec.h:200
AV_FRAME_DATA_LCEVC
@ AV_FRAME_DATA_LCEVC
Raw LCEVC payload data, as a uint8_t array, with NAL emulation bytes intact.
Definition: frame.h:236
H264Picture::tf
ThreadFrame tf
Definition: h264dec.h:116
H264Picture::mb_type
uint32_t * mb_type
Definition: h264dec.h:127
H264Picture::decode_error_flags
atomic_int * decode_error_flags
RefStruct reference; its pointee is shared between decoding threads.
Definition: h264dec.h:164
ff_h264_decode_mb_cavlc
int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cavlc.c:665
H264_SEI_PIC_STRUCT_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP
4: bottom field, top field, in that order
Definition: h264_sei.h:36
H264Picture::recovered
int recovered
picture at IDR or recovery point + recovery count
Definition: h264dec.h:153
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
H264SliceContext::top_mb_xy
int top_mb_xy
Definition: h264dec.h:212
H264SliceContext::qp_thresh
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264dec.h:192
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2123
H264Picture::gray
int gray
Definition: h264dec.h:166
H2645NAL
Definition: h2645_parse.h:34
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:485
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:287
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1605
H264SliceContext::cbp
int cbp
Definition: h264dec.h:258
gray
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted then it is pulled from the input slice through the input converter and horizontal scaler The result is also stored in the ring buffer to serve future vertical scaler requests When no more output can be generated because lines from a future slice would be then all remaining lines in the current slice are horizontally scaled and put in the ring buffer[This is done for luma and chroma, each with possibly different numbers of lines per picture.] Input to YUV Converter When the input to the main path is not planar bits per component YUV or bit gray
Definition: swscale.txt:52
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:476
H264SliceContext::left_type
int left_type[LEFT_MBS]
Definition: h264dec.h:219
ff_h264_direct_ref_list_init
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:120
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:233
H264PredWeightTable::implicit_weight
int implicit_weight[48][48][2]
Definition: h264_parse.h:79
decode_slice
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264_slice.c:2557
H264SliceContext::explicit_ref_marking
int explicit_ref_marking
Definition: h264dec.h:326
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
pt
int pt
Definition: rtp.c:35
H264SliceContext::uvlinesize
ptrdiff_t uvlinesize
Definition: h264dec.h:229
AVBufferRef::buffer
AVBuffer * buffer
Definition: buffer.h:83
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
H264SliceContext::slice_type_nos
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:187
H264SliceContext::delta_poc_bottom
int delta_poc_bottom
Definition: h264dec.h:331
copy_picture_range
static void copy_picture_range(H264Picture **to, H264Picture *const *from, int count, H264Context *new_base, const H264Context *old_base)
Definition: h264_slice.c:294
av_zero_extend
#define av_zero_extend
Definition: common.h:151
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:66
IS_DIRECT
#define IS_DIRECT(a)
Definition: mpegutils.h:79
H264_SEI_PIC_STRUCT_FRAME
@ H264_SEI_PIC_STRUCT_FRAME
0: frame
Definition: h264_sei.h:32
get_cabac_terminate
static int av_unused get_cabac_terminate(CABACContext *c)
Definition: cabac_functions.h:187
H264_SEI_PIC_STRUCT_FRAME_TRIPLING
@ H264_SEI_PIC_STRUCT_FRAME_TRIPLING
8: frame tripling
Definition: h264_sei.h:40
field_scan
static const uint8_t field_scan[16+1]
Definition: h264_slice.c:52
loop_filter
static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
Definition: h264_slice.c:2431
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:162
H264SliceContext::mb_mbaff
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:245
field_scan8x8
static const uint8_t field_scan8x8[64+1]
Definition: h264_slice.c:59
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
LIST_NOT_USED
#define LIST_NOT_USED
Definition: h264dec.h:398
H264Picture::field_picture
int field_picture
whether or not picture was encoded in separate fields
Definition: h264dec.h:145
h264dec.h
H264SliceContext::poc_lsb
int poc_lsb
Definition: h264dec.h:330
H264SliceContext::first_mb_addr
unsigned int first_mb_addr
Definition: h264dec.h:237
ff_h264_direct_dist_scale_factor
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:61
H264Picture::needs_fg
int needs_fg
whether picture needs film grain synthesis (see f_grain)
Definition: h264dec.h:156
AVBuffer
A reference counted buffer type.
Definition: buffer_internal.h:38
H264Context
H264Context.
Definition: h264dec.h:340
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: defs.h:219
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
av_timecode_make_smpte_tc_string2
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:139
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:380
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:636
H264_SEI_PIC_STRUCT_FRAME_DOUBLING
@ H264_SEI_PIC_STRUCT_FRAME_DOUBLING
7: frame doubling
Definition: h264_sei.h:39
H264SliceContext::frame_num
int frame_num
Definition: h264dec.h:328
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
ff_h264_execute_decode_slices
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2769
H264SliceContext::mb_linesize
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264dec.h:230
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
cabac_functions.h
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:634
ff_h264_replace_picture
int ff_h264_replace_picture(H264Picture *dst, const H264Picture *src)
Definition: h264_picture.c:135
ff_h264_parse_ref_count
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2], GetBitContext *gb, const PPS *pps, int slice_type_nos, int picture_structure, void *logctx)
Definition: h264_parse.c:222
ff_h264_alloc_tables
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:186
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1057
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:648
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:669
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:477
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:271
avcodec.h
H264SliceContext::h264
const struct H264Context * h264
Definition: h264dec.h:181
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:66
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
ff_h264_init_poc
int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc, const SPS *sps, H264POCContext *pc, int picture_structure, int nal_ref_idc)
Definition: h264_parse.c:280
ff_h264_get_profile
int ff_h264_get_profile(const SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264_parse.c:533
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
h264_field_start
static int h264_field_start(H264Context *h, const H264SliceContext *sl, const H2645NAL *nal, int first_slice)
Definition: h264_slice.c:1395
H264SliceContext::last_qscale_diff
int last_qscale_diff
Definition: h264dec.h:193
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
ff_refstruct_replace
void ff_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:482
U
#define U(x)
Definition: vpx_arith.h:37
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:487
H264SliceContext::pps_id
unsigned int pps_id
Definition: h264dec.h:281
H264SliceContext::linesize
ptrdiff_t linesize
Definition: h264dec.h:229
H264SliceContext::slice_beta_offset
int slice_beta_offset
Definition: h264dec.h:198
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AVFrame::height
int height
Definition: frame.h:461
get_ue_golomb_31
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:120
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
ff_h264_build_ref_list
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl)
Definition: h264_refs.c:294
H264Picture::motion_val_base
int16_t(*[2] motion_val_base)[2]
RefStruct reference.
Definition: h264dec.h:123
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1623
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:285
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
H264Picture::field_poc
int field_poc[2]
top/bottom POC
Definition: h264dec.h:134
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
H264SliceContext::mmco
MMCO mmco[H264_MAX_MMCO_COUNT]
Definition: h264dec.h:324
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
H264Picture::mb_width
int mb_width
Definition: h264dec.h:160
ff_h264_unref_picture
void ff_h264_unref_picture(H264Picture *pic)
Definition: h264_picture.c:39
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:826
H264Picture
Definition: h264dec.h:114
ERContext::error_status_table
uint8_t * error_status_table
Definition: error_resilience.h:67
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
pps
uint64_t pps
Definition: dovi_rpuenc.c:35
h264_slice_init
static int h264_slice_init(H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1913
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
ff_h264_field_end
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:189
CABACContext::bytestream_end
const uint8_t * bytestream_end
Definition: cabac.h:46
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
init_table_pools
static int init_table_pools(H264Context *h)
Definition: h264_slice.c:162
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:272
LBOT
#define LBOT
Definition: h264dec.h:70
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:296
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
desc
const char * desc
Definition: libsvtav1.c:79
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:74
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:104
H264Context::nal_length_size
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264dec.h:458
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:38
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
H264SliceContext::er
ERContext * er
Definition: h264dec.h:183
H264_SEI_PIC_STRUCT_BOTTOM_FIELD
@ H264_SEI_PIC_STRUCT_BOTTOM_FIELD
2: bottom field
Definition: h264_sei.h:34
H264Picture::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
Definition: h264dec.h:130
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:35
H264SliceContext::idr_pic_id
int idr_pic_id
Definition: h264dec.h:329
fill_filter_caches_inter
static av_always_inline void fill_filter_caches_inter(const H264Context *h, H264SliceContext *sl, int mb_type, int top_xy, const int left_xy[LEFT_MBS], int top_type, const int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264_slice.c:2205
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:141
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:248
AVFrame::crop_top
size_t crop_top
Definition: frame.h:766
H264SliceContext::gb
GetBitContext gb
Definition: h264dec.h:182
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
H264SliceContext::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: h264dec.h:209
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
LTOP
#define LTOP
Definition: h264dec.h:69
h264.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
H264SliceContext::edge_emu_buffer_allocated
int edge_emu_buffer_allocated
Definition: h264dec.h:289
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264_slice.c:289
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:92
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
ff_h264_get_slice_type
int ff_h264_get_slice_type(const H264SliceContext *sl)
Reconstruct bitstream slice_type.
Definition: h264_slice.c:2187
h
h
Definition: vp9dsp_template.c:2070
H264SliceContext::cabac_init_idc
int cabac_init_idc
Definition: h264dec.h:322
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:488
H264PredWeightTable::luma_weight_flag
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264_parse.h:74
H264_MAX_PICTURE_COUNT
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:49
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:33
H264SliceContext::bipred_scratchpad_allocated
int bipred_scratchpad_allocated
Definition: h264dec.h:288
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
width
#define width
Definition: dsp.h:85
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:217
H264SliceContext::slice_type_fixed
int slice_type_fixed
Definition: h264dec.h:188
H264Ref::poc
int poc
Definition: h264dec.h:174
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:95
H264_SEI_PIC_STRUCT_TOP_FIELD
@ H264_SEI_PIC_STRUCT_TOP_FIELD
1: top field
Definition: h264_sei.h:33
H264SliceContext::delta_poc
int delta_poc[2]
Definition: h264dec.h:332
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3320
H264Picture::long_ref
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:141
H264Ref::reference
int reference
Definition: h264dec.h:173
src
#define src
Definition: vp8dsp.c:248
H264Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: h264dec.h:124
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:420
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:486
ff_refstruct_pool_get
void * ff_refstruct_pool_get(FFRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
5: top field, bottom field, top field repeated, in that order
Definition: h264_sei.h:37
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2885
H264SliceContext::mb_field_decoding_flag
int mb_field_decoding_flag
Definition: h264dec.h:244
ff_h264_set_erpic
void ff_h264_set_erpic(ERPicture *dst, const H264Picture *src)
Definition: h264_picture.c:166
H264Context::is_avc
int is_avc
Used to parse AVC variant of H.264.
Definition: h264dec.h:457