FFmpeg
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "cpu.h"
24 #include "dict.h"
25 #include "frame.h"
26 #include "imgutils.h"
27 #include "mem.h"
28 #include "samplefmt.h"
29 #include "hwcontext.h"
30 
31 static const AVSideDataDescriptor sd_props[] = {
33  [AV_FRAME_DATA_A53_CC] = { "ATSC A53 Part 4 Closed Captions" },
35  [AV_FRAME_DATA_DOWNMIX_INFO] = { "Metadata relevant to a downmix procedure", AV_SIDE_DATA_PROP_CHANNEL_DEPENDENT },
36  [AV_FRAME_DATA_AFD] = { "Active format description" },
38  [AV_FRAME_DATA_SKIP_SAMPLES] = { "Skip samples" },
39  [AV_FRAME_DATA_GOP_TIMECODE] = { "GOP timecode" },
40  [AV_FRAME_DATA_S12M_TIMECODE] = { "SMPTE 12-1 timecode" },
41  [AV_FRAME_DATA_DYNAMIC_HDR_PLUS] = { "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)", AV_SIDE_DATA_PROP_COLOR_DEPENDENT },
42  [AV_FRAME_DATA_DYNAMIC_HDR_VIVID] = { "HDR Dynamic Metadata CUVA 005.1 2021 (Vivid)", AV_SIDE_DATA_PROP_COLOR_DEPENDENT },
44  [AV_FRAME_DATA_VIDEO_ENC_PARAMS] = { "Video encoding parameters" },
45  [AV_FRAME_DATA_FILM_GRAIN_PARAMS] = { "Film grain parameters" },
46  [AV_FRAME_DATA_DETECTION_BBOXES] = { "Bounding boxes for object detection and classification", AV_SIDE_DATA_PROP_SIZE_DEPENDENT },
48  [AV_FRAME_DATA_DOVI_METADATA] = { "Dolby Vision Metadata", AV_SIDE_DATA_PROP_COLOR_DEPENDENT },
49  [AV_FRAME_DATA_LCEVC] = { "LCEVC NAL data", AV_SIDE_DATA_PROP_SIZE_DEPENDENT },
50  [AV_FRAME_DATA_VIEW_ID] = { "View ID" },
52  [AV_FRAME_DATA_REPLAYGAIN] = { "AVReplayGain", AV_SIDE_DATA_PROP_GLOBAL },
53  [AV_FRAME_DATA_DISPLAYMATRIX] = { "3x3 displaymatrix", AV_SIDE_DATA_PROP_GLOBAL },
54  [AV_FRAME_DATA_AUDIO_SERVICE_TYPE] = { "Audio service type", AV_SIDE_DATA_PROP_GLOBAL },
57  [AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT] = { "Ambient viewing environment", AV_SIDE_DATA_PROP_GLOBAL },
60  [AV_FRAME_DATA_SEI_UNREGISTERED] = { "H.26[45] User Data Unregistered SEI message", AV_SIDE_DATA_PROP_MULTI },
61  [AV_FRAME_DATA_VIDEO_HINT] = { "Encoding video hint", AV_SIDE_DATA_PROP_SIZE_DEPENDENT },
62 };
63 
65 {
66  memset(frame, 0, sizeof(*frame));
67 
68  frame->pts =
69  frame->pkt_dts = AV_NOPTS_VALUE;
70  frame->best_effort_timestamp = AV_NOPTS_VALUE;
71  frame->duration = 0;
72 #if FF_API_FRAME_PKT
74  frame->pkt_pos = -1;
75  frame->pkt_size = -1;
77 #endif
78  frame->time_base = (AVRational){ 0, 1 };
79  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
80  frame->format = -1; /* unknown */
81  frame->extended_data = frame->data;
82  frame->color_primaries = AVCOL_PRI_UNSPECIFIED;
83  frame->color_trc = AVCOL_TRC_UNSPECIFIED;
84  frame->colorspace = AVCOL_SPC_UNSPECIFIED;
85  frame->color_range = AVCOL_RANGE_UNSPECIFIED;
86  frame->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
87  frame->flags = 0;
88 }
89 
90 static void free_side_data(AVFrameSideData **ptr_sd)
91 {
92  AVFrameSideData *sd = *ptr_sd;
93 
94  av_buffer_unref(&sd->buf);
95  av_dict_free(&sd->metadata);
96  av_freep(ptr_sd);
97 }
98 
99 static void wipe_side_data(AVFrameSideData ***sd, int *nb_side_data)
100 {
101  for (int i = 0; i < *nb_side_data; i++) {
102  free_side_data(&((*sd)[i]));
103  }
104  *nb_side_data = 0;
105 
106  av_freep(sd);
107 }
108 
110 {
111  wipe_side_data(&frame->side_data, &frame->nb_side_data);
112 }
113 
115 {
116  wipe_side_data(sd, nb_sd);
117 }
118 
119 static void remove_side_data(AVFrameSideData ***sd, int *nb_side_data,
120  const enum AVFrameSideDataType type)
121 {
122  for (int i = *nb_side_data - 1; i >= 0; i--) {
123  AVFrameSideData *entry = ((*sd)[i]);
124  if (entry->type != type)
125  continue;
126 
128 
129  ((*sd)[i]) = ((*sd)[*nb_side_data - 1]);
130  (*nb_side_data)--;
131  }
132 }
133 
134 static void remove_side_data_by_entry(AVFrameSideData ***sd, int *nb_sd,
135  const AVFrameSideData *target)
136 {
137  for (int i = *nb_sd - 1; i >= 0; i--) {
138  AVFrameSideData *entry = ((*sd)[i]);
139  if (entry != target)
140  continue;
141 
143 
144  ((*sd)[i]) = ((*sd)[*nb_sd - 1]);
145  (*nb_sd)--;
146 
147  return;
148  }
149 }
150 
152 {
153  AVFrame *frame = av_malloc(sizeof(*frame));
154 
155  if (!frame)
156  return NULL;
157 
159 
160  return frame;
161 }
162 
164 {
165  if (!frame || !*frame)
166  return;
167 
169  av_freep(frame);
170 }
171 
172 #define ALIGN (HAVE_SIMD_ALIGN_64 ? 64 : 32)
173 
175 {
177  int ret, padded_height;
178  int plane_padding;
179  ptrdiff_t linesizes[4];
180  size_t total_size, sizes[4];
181 
182  if (!desc)
183  return AVERROR(EINVAL);
184 
185  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
186  return ret;
187 
188  if (align <= 0)
189  align = ALIGN;
190  plane_padding = FFMAX(ALIGN, align);
191 
192  if (!frame->linesize[0]) {
193  for (int i = 1; i <= align; i += i) {
194  ret = av_image_fill_linesizes(frame->linesize, frame->format,
195  FFALIGN(frame->width, i));
196  if (ret < 0)
197  return ret;
198  if (!(frame->linesize[0] & (align-1)))
199  break;
200  }
201 
202  for (int i = 0; i < 4 && frame->linesize[i]; i++)
203  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
204  }
205 
206  for (int i = 0; i < 4; i++)
207  linesizes[i] = frame->linesize[i];
208 
209  padded_height = FFALIGN(frame->height, 32);
210  if ((ret = av_image_fill_plane_sizes(sizes, frame->format,
211  padded_height, linesizes)) < 0)
212  return ret;
213 
214  total_size = 4 * plane_padding + 4 * align;
215  for (int i = 0; i < 4; i++) {
216  if (sizes[i] > SIZE_MAX - total_size)
217  return AVERROR(EINVAL);
218  total_size += sizes[i];
219  }
220 
221  frame->buf[0] = av_buffer_alloc(total_size);
222  if (!frame->buf[0]) {
223  ret = AVERROR(ENOMEM);
224  goto fail;
225  }
226 
227  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
228  frame->buf[0]->data, frame->linesize)) < 0)
229  goto fail;
230 
231  for (int i = 1; i < 4; i++) {
232  if (frame->data[i])
233  frame->data[i] += i * plane_padding;
234  frame->data[i] = (uint8_t *)FFALIGN((uintptr_t)frame->data[i], align);
235  }
236 
237  frame->extended_data = frame->data;
238 
239  return 0;
240 fail:
242  return ret;
243 }
244 
246 {
247  int planar = av_sample_fmt_is_planar(frame->format);
248  int channels, planes;
249  size_t size;
250  int ret;
251 
252  channels = frame->ch_layout.nb_channels;
253  planes = planar ? channels : 1;
254  if (!frame->linesize[0]) {
255  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
256  frame->nb_samples, frame->format,
257  align);
258  if (ret < 0)
259  return ret;
260  }
261 
262  if (align <= 0)
263  align = ALIGN;
264 
266  frame->extended_data = av_calloc(planes,
267  sizeof(*frame->extended_data));
268  frame->extended_buf = av_calloc(planes - AV_NUM_DATA_POINTERS,
269  sizeof(*frame->extended_buf));
270  if (!frame->extended_data || !frame->extended_buf) {
271  av_freep(&frame->extended_data);
272  av_freep(&frame->extended_buf);
273  return AVERROR(ENOMEM);
274  }
275  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
276  } else
277  frame->extended_data = frame->data;
278 
279  if (frame->linesize[0] > SIZE_MAX - align)
280  return AVERROR(EINVAL);
281  size = frame->linesize[0] + (size_t)align;
282 
283  for (int i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
284  frame->buf[i] = av_buffer_alloc(size);
285  if (!frame->buf[i]) {
287  return AVERROR(ENOMEM);
288  }
289  frame->extended_data[i] = frame->data[i] =
290  (uint8_t *)FFALIGN((uintptr_t)frame->buf[i]->data, align);
291  }
292  for (int i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
293  frame->extended_buf[i] = av_buffer_alloc(size);
294  if (!frame->extended_buf[i]) {
296  return AVERROR(ENOMEM);
297  }
298  frame->extended_data[i + AV_NUM_DATA_POINTERS] =
299  (uint8_t *)FFALIGN((uintptr_t)frame->extended_buf[i]->data, align);
300  }
301  return 0;
302 
303 }
304 
306 {
307  if (frame->format < 0)
308  return AVERROR(EINVAL);
309 
310  if (frame->width > 0 && frame->height > 0)
311  return get_video_buffer(frame, align);
312  else if (frame->nb_samples > 0 &&
313  (av_channel_layout_check(&frame->ch_layout)))
314  return get_audio_buffer(frame, align);
315 
316  return AVERROR(EINVAL);
317 }
318 
319 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
320 {
321  int ret;
322 
323 #if FF_API_FRAME_KEY
325  dst->key_frame = src->key_frame;
327 #endif
328  dst->pict_type = src->pict_type;
329  dst->sample_aspect_ratio = src->sample_aspect_ratio;
330  dst->crop_top = src->crop_top;
331  dst->crop_bottom = src->crop_bottom;
332  dst->crop_left = src->crop_left;
333  dst->crop_right = src->crop_right;
334  dst->pts = src->pts;
335  dst->duration = src->duration;
336  dst->repeat_pict = src->repeat_pict;
337 #if FF_API_INTERLACED_FRAME
339  dst->interlaced_frame = src->interlaced_frame;
340  dst->top_field_first = src->top_field_first;
342 #endif
343 #if FF_API_PALETTE_HAS_CHANGED
345  dst->palette_has_changed = src->palette_has_changed;
347 #endif
348  dst->sample_rate = src->sample_rate;
349  dst->opaque = src->opaque;
350  dst->pkt_dts = src->pkt_dts;
351 #if FF_API_FRAME_PKT
353  dst->pkt_pos = src->pkt_pos;
354  dst->pkt_size = src->pkt_size;
356 #endif
357  dst->time_base = src->time_base;
358  dst->quality = src->quality;
359  dst->best_effort_timestamp = src->best_effort_timestamp;
360  dst->flags = src->flags;
361  dst->decode_error_flags = src->decode_error_flags;
362  dst->color_primaries = src->color_primaries;
363  dst->color_trc = src->color_trc;
364  dst->colorspace = src->colorspace;
365  dst->color_range = src->color_range;
366  dst->chroma_location = src->chroma_location;
367 
368  av_dict_copy(&dst->metadata, src->metadata, 0);
369 
370  for (int i = 0; i < src->nb_side_data; i++) {
371  const AVFrameSideData *sd_src = src->side_data[i];
372  AVFrameSideData *sd_dst;
373  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
374  && (src->width != dst->width || src->height != dst->height))
375  continue;
376  if (force_copy) {
377  sd_dst = av_frame_new_side_data(dst, sd_src->type,
378  sd_src->size);
379  if (!sd_dst) {
381  return AVERROR(ENOMEM);
382  }
383  memcpy(sd_dst->data, sd_src->data, sd_src->size);
384  } else {
385  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
386  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
387  if (!sd_dst) {
390  return AVERROR(ENOMEM);
391  }
392  }
393  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
394  }
395 
396  ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
397  ret |= av_buffer_replace(&dst->private_ref, src->private_ref);
398  return ret;
399 }
400 
402 {
403  int ret = 0;
404 
405  av_assert1(dst->width == 0 && dst->height == 0);
406  av_assert1(dst->ch_layout.nb_channels == 0 &&
407  dst->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC);
408 
409  dst->format = src->format;
410  dst->width = src->width;
411  dst->height = src->height;
412  dst->nb_samples = src->nb_samples;
413 
414  ret = frame_copy_props(dst, src, 0);
415  if (ret < 0)
416  goto fail;
417 
418  ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
419  if (ret < 0)
420  goto fail;
421 
422  /* duplicate the frame data if it's not refcounted */
423  if (!src->buf[0]) {
425  if (ret < 0)
426  goto fail;
427 
428  ret = av_frame_copy(dst, src);
429  if (ret < 0)
430  goto fail;
431 
432  return 0;
433  }
434 
435  /* ref the buffers */
436  for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
437  if (!src->buf[i])
438  continue;
439  dst->buf[i] = av_buffer_ref(src->buf[i]);
440  if (!dst->buf[i]) {
441  ret = AVERROR(ENOMEM);
442  goto fail;
443  }
444  }
445 
446  if (src->extended_buf) {
447  dst->extended_buf = av_calloc(src->nb_extended_buf,
448  sizeof(*dst->extended_buf));
449  if (!dst->extended_buf) {
450  ret = AVERROR(ENOMEM);
451  goto fail;
452  }
453  dst->nb_extended_buf = src->nb_extended_buf;
454 
455  for (int i = 0; i < src->nb_extended_buf; i++) {
456  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
457  if (!dst->extended_buf[i]) {
458  ret = AVERROR(ENOMEM);
459  goto fail;
460  }
461  }
462  }
463 
464  if (src->hw_frames_ctx) {
465  dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
466  if (!dst->hw_frames_ctx) {
467  ret = AVERROR(ENOMEM);
468  goto fail;
469  }
470  }
471 
472  /* duplicate extended data */
473  if (src->extended_data != src->data) {
474  int ch = dst->ch_layout.nb_channels;
475 
476  if (!ch) {
477  ret = AVERROR(EINVAL);
478  goto fail;
479  }
480 
481  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
482  if (!dst->extended_data) {
483  ret = AVERROR(ENOMEM);
484  goto fail;
485  }
486  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
487  } else
488  dst->extended_data = dst->data;
489 
490  memcpy(dst->data, src->data, sizeof(src->data));
491  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
492 
493  return 0;
494 
495 fail:
497  return ret;
498 }
499 
501 {
502  int ret = 0;
503 
504  if (dst == src)
505  return AVERROR(EINVAL);
506 
507  if (!src->buf[0]) {
509 
510  /* duplicate the frame data if it's not refcounted */
511  if ( src->data[0] || src->data[1]
512  || src->data[2] || src->data[3])
513  return av_frame_ref(dst, src);
514 
515  ret = frame_copy_props(dst, src, 0);
516  if (ret < 0)
517  goto fail;
518  }
519 
520  dst->format = src->format;
521  dst->width = src->width;
522  dst->height = src->height;
523  dst->nb_samples = src->nb_samples;
524 
525  ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
526  if (ret < 0)
527  goto fail;
528 
530  av_dict_free(&dst->metadata);
531  ret = frame_copy_props(dst, src, 0);
532  if (ret < 0)
533  goto fail;
534 
535  /* replace the buffers */
536  for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
537  ret = av_buffer_replace(&dst->buf[i], src->buf[i]);
538  if (ret < 0)
539  goto fail;
540  }
541 
542  if (src->extended_buf) {
543  if (dst->nb_extended_buf != src->nb_extended_buf) {
544  int nb_extended_buf = FFMIN(dst->nb_extended_buf, src->nb_extended_buf);
545  void *tmp;
546 
547  for (int i = nb_extended_buf; i < dst->nb_extended_buf; i++)
548  av_buffer_unref(&dst->extended_buf[i]);
549 
550  tmp = av_realloc_array(dst->extended_buf, sizeof(*dst->extended_buf),
551  src->nb_extended_buf);
552  if (!tmp) {
553  ret = AVERROR(ENOMEM);
554  goto fail;
555  }
556  dst->extended_buf = tmp;
557  dst->nb_extended_buf = src->nb_extended_buf;
558 
559  memset(&dst->extended_buf[nb_extended_buf], 0,
560  (src->nb_extended_buf - nb_extended_buf) * sizeof(*dst->extended_buf));
561  }
562 
563  for (int i = 0; i < src->nb_extended_buf; i++) {
564  ret = av_buffer_replace(&dst->extended_buf[i], src->extended_buf[i]);
565  if (ret < 0)
566  goto fail;
567  }
568  } else if (dst->extended_buf) {
569  for (int i = 0; i < dst->nb_extended_buf; i++)
570  av_buffer_unref(&dst->extended_buf[i]);
571  av_freep(&dst->extended_buf);
572  }
573 
574  ret = av_buffer_replace(&dst->hw_frames_ctx, src->hw_frames_ctx);
575  if (ret < 0)
576  goto fail;
577 
578  if (dst->extended_data != dst->data)
579  av_freep(&dst->extended_data);
580 
581  if (src->extended_data != src->data) {
582  int ch = dst->ch_layout.nb_channels;
583 
584  if (!ch) {
585  ret = AVERROR(EINVAL);
586  goto fail;
587  }
588 
589  if (ch > SIZE_MAX / sizeof(*dst->extended_data))
590  goto fail;
591 
592  dst->extended_data = av_memdup(src->extended_data, sizeof(*dst->extended_data) * ch);
593  if (!dst->extended_data) {
594  ret = AVERROR(ENOMEM);
595  goto fail;
596  }
597  } else
598  dst->extended_data = dst->data;
599 
600  memcpy(dst->data, src->data, sizeof(src->data));
601  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
602 
603  return 0;
604 
605 fail:
607  return ret;
608 }
609 
611 {
613 
614  if (!ret)
615  return NULL;
616 
617  if (av_frame_ref(ret, src) < 0)
618  av_frame_free(&ret);
619 
620  return ret;
621 }
622 
624 {
625  if (!frame)
626  return;
627 
629 
630  for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
631  av_buffer_unref(&frame->buf[i]);
632  for (int i = 0; i < frame->nb_extended_buf; i++)
633  av_buffer_unref(&frame->extended_buf[i]);
634  av_freep(&frame->extended_buf);
635  av_dict_free(&frame->metadata);
636 
637  av_buffer_unref(&frame->hw_frames_ctx);
638 
639  av_buffer_unref(&frame->opaque_ref);
640  av_buffer_unref(&frame->private_ref);
641 
642  if (frame->extended_data != frame->data)
643  av_freep(&frame->extended_data);
644 
645  av_channel_layout_uninit(&frame->ch_layout);
646 
648 }
649 
651 {
652  av_assert1(dst->width == 0 && dst->height == 0);
653  av_assert1(dst->ch_layout.nb_channels == 0 &&
654  dst->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC);
655 
656  *dst = *src;
657  if (src->extended_data == src->data)
658  dst->extended_data = dst->data;
660 }
661 
663 {
664  int ret = 1;
665 
666  /* assume non-refcounted frames are not writable */
667  if (!frame->buf[0])
668  return 0;
669 
670  for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
671  if (frame->buf[i])
672  ret &= !!av_buffer_is_writable(frame->buf[i]);
673  for (int i = 0; i < frame->nb_extended_buf; i++)
674  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
675 
676  return ret;
677 }
678 
680 {
681  AVFrame tmp;
682  int ret;
683 
685  return 0;
686 
687  memset(&tmp, 0, sizeof(tmp));
688  tmp.format = frame->format;
689  tmp.width = frame->width;
690  tmp.height = frame->height;
691  tmp.nb_samples = frame->nb_samples;
692  ret = av_channel_layout_copy(&tmp.ch_layout, &frame->ch_layout);
693  if (ret < 0) {
695  return ret;
696  }
697 
698  if (frame->hw_frames_ctx)
699  ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
700  else
701  ret = av_frame_get_buffer(&tmp, 0);
702  if (ret < 0)
703  return ret;
704 
705  ret = av_frame_copy(&tmp, frame);
706  if (ret < 0) {
708  return ret;
709  }
710 
712  if (ret < 0) {
714  return ret;
715  }
716 
718 
719  *frame = tmp;
720  if (tmp.data == tmp.extended_data)
721  frame->extended_data = frame->data;
722 
723  return 0;
724 }
725 
727 {
728  return frame_copy_props(dst, src, 1);
729 }
730 
732 {
733  uint8_t *data;
734  int planes;
735 
736  if (frame->nb_samples) {
737  int channels = frame->ch_layout.nb_channels;
738  if (!channels)
739  return NULL;
740  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
741  } else
742  planes = 4;
743 
744  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
745  return NULL;
746  data = frame->extended_data[plane];
747 
748  for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
749  AVBufferRef *buf = frame->buf[i];
750  if (data >= buf->data && data < buf->data + buf->size)
751  return buf;
752  }
753  for (int i = 0; i < frame->nb_extended_buf; i++) {
754  AVBufferRef *buf = frame->extended_buf[i];
755  if (data >= buf->data && data < buf->data + buf->size)
756  return buf;
757  }
758  return NULL;
759 }
760 
762  int *nb_sd,
764  AVBufferRef *buf, uint8_t *data,
765  size_t size)
766 {
767  AVFrameSideData *ret, **tmp;
768 
769  // *nb_sd + 1 needs to fit into an int and a size_t.
770  if ((unsigned)*nb_sd >= FFMIN(INT_MAX, SIZE_MAX))
771  return NULL;
772 
773  tmp = av_realloc_array(*sd, sizeof(**sd), *nb_sd + 1);
774  if (!tmp)
775  return NULL;
776  *sd = tmp;
777 
778  ret = av_mallocz(sizeof(*ret));
779  if (!ret)
780  return NULL;
781 
782  ret->buf = buf;
783  ret->data = data;
784  ret->size = size;
785  ret->type = type;
786 
787  (*sd)[(*nb_sd)++] = ret;
788 
789  return ret;
790 }
791 
793  int *nb_sd,
795  AVBufferRef *buf)
796 {
797  if (!buf)
798  return NULL;
799 
800  return add_side_data_from_buf_ext(sd, nb_sd, type, buf, buf->data, buf->size);
801 }
802 
805  AVBufferRef *buf)
806 {
807  return
809  &frame->side_data, &frame->nb_side_data, type, buf);
810 }
811 
814  size_t size)
815 {
819  if (!ret)
820  av_buffer_unref(&buf);
821  return ret;
822 }
823 
825  AVBufferRef *buf, int flags)
826 {
828  return NULL;
829 
830  av_dict_free(&dst->metadata);
831  av_buffer_unref(&dst->buf);
832  dst->buf = buf;
833  dst->data = buf->data;
834  dst->size = buf->size;
835  return dst;
836 }
837 
840  size_t size, unsigned int flags)
841 {
845 
847  remove_side_data(sd, nb_sd, type);
848  if ((!desc || !(desc->props & AV_SIDE_DATA_PROP_MULTI)) &&
849  (ret = (AVFrameSideData *)av_frame_side_data_get(*sd, *nb_sd, type))) {
851  if (!ret)
852  av_buffer_unref(&buf);
853  return ret;
854  }
855 
856  ret = add_side_data_from_buf(sd, nb_sd, type, buf);
857  if (!ret)
858  av_buffer_unref(&buf);
859 
860  return ret;
861 }
862 
865  AVBufferRef **pbuf, unsigned int flags)
866 {
868  AVFrameSideData *sd_dst = NULL;
869  AVBufferRef *buf = *pbuf;
870 
871  if ((flags & AV_FRAME_SIDE_DATA_FLAG_NEW_REF) && !(buf = av_buffer_ref(*pbuf)))
872  return NULL;
874  remove_side_data(sd, nb_sd, type);
875  if ((!desc || !(desc->props & AV_SIDE_DATA_PROP_MULTI)) &&
876  (sd_dst = (AVFrameSideData *)av_frame_side_data_get(*sd, *nb_sd, type))) {
877  sd_dst = replace_side_data_from_buf(sd_dst, buf, flags);
878  } else
879  sd_dst = add_side_data_from_buf(sd, nb_sd, type, buf);
880 
881  if (sd_dst && !(flags & AV_FRAME_SIDE_DATA_FLAG_NEW_REF))
882  *pbuf = NULL;
883  else if (!sd_dst && (flags & AV_FRAME_SIDE_DATA_FLAG_NEW_REF))
884  av_buffer_unref(&buf);
885  return sd_dst;
886 }
887 
889  const AVFrameSideData *src, unsigned int flags)
890 {
891  const AVSideDataDescriptor *desc;
892  AVBufferRef *buf = NULL;
893  AVFrameSideData *sd_dst = NULL;
894  int ret = AVERROR_BUG;
895 
896  if (!sd || !src || !nb_sd || (*nb_sd && !*sd))
897  return AVERROR(EINVAL);
898 
901  remove_side_data(sd, nb_sd, src->type);
902  if ((!desc || !(desc->props & AV_SIDE_DATA_PROP_MULTI)) &&
903  (sd_dst = (AVFrameSideData *)av_frame_side_data_get(*sd, *nb_sd, src->type))) {
904  AVDictionary *dict = NULL;
905 
907  return AVERROR(EEXIST);
908 
909  ret = av_dict_copy(&dict, src->metadata, 0);
910  if (ret < 0)
911  return ret;
912 
913  ret = av_buffer_replace(&sd_dst->buf, src->buf);
914  if (ret < 0) {
915  av_dict_free(&dict);
916  return ret;
917  }
918 
919  av_dict_free(&sd_dst->metadata);
920  sd_dst->metadata = dict;
921  sd_dst->data = src->data;
922  sd_dst->size = src->size;
923  return 0;
924  }
925 
926  buf = av_buffer_ref(src->buf);
927  if (!buf)
928  return AVERROR(ENOMEM);
929 
930  sd_dst = add_side_data_from_buf_ext(sd, nb_sd, src->type, buf,
931  src->data, src->size);
932  if (!sd_dst) {
933  av_buffer_unref(&buf);
934  return AVERROR(ENOMEM);
935  }
936 
937  ret = av_dict_copy(&sd_dst->metadata, src->metadata, 0);
938  if (ret < 0) {
939  remove_side_data_by_entry(sd, nb_sd, sd_dst);
940  return ret;
941  }
942 
943  return 0;
944 }
945 
947  const int nb_sd,
949 {
950  for (int i = 0; i < nb_sd; i++) {
951  if (sd[i]->type == type)
952  return sd[i];
953  }
954  return NULL;
955 }
956 
959 {
960  remove_side_data(sd, nb_sd, type);
961 }
962 
964  int props)
965 {
966  for (int i = *nb_sd - 1; i >= 0; i--) {
967  AVFrameSideData *entry = ((*sd)[i]);
969  if (!desc || !(desc->props & props))
970  continue;
971 
973 
974  ((*sd)[i]) = ((*sd)[*nb_sd - 1]);
975  (*nb_sd)--;
976  }
977 }
978 
981 {
983  frame->side_data, frame->nb_side_data,
984  type
985  );
986 }
987 
988 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
989 {
990  int planes;
991 
992  if (dst->width < src->width ||
993  dst->height < src->height)
994  return AVERROR(EINVAL);
995 
996  if (src->hw_frames_ctx || dst->hw_frames_ctx)
997  return av_hwframe_transfer_data(dst, src, 0);
998 
1000  for (int i = 0; i < planes; i++)
1001  if (!dst->data[i] || !src->data[i])
1002  return AVERROR(EINVAL);
1003 
1004  av_image_copy2(dst->data, dst->linesize,
1005  src->data, src->linesize,
1006  dst->format, src->width, src->height);
1007 
1008  return 0;
1009 }
1010 
1012 {
1013  int planar = av_sample_fmt_is_planar(dst->format);
1014  int channels = dst->ch_layout.nb_channels;
1015  int planes = planar ? channels : 1;
1016 
1017  if (dst->nb_samples != src->nb_samples ||
1018  av_channel_layout_compare(&dst->ch_layout, &src->ch_layout))
1019  return AVERROR(EINVAL);
1020 
1021  for (int i = 0; i < planes; i++)
1022  if (!dst->extended_data[i] || !src->extended_data[i])
1023  return AVERROR(EINVAL);
1024 
1025  av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
1026  dst->nb_samples, channels, dst->format);
1027 
1028  return 0;
1029 }
1030 
1032 {
1033  if (dst->format != src->format || dst->format < 0)
1034  return AVERROR(EINVAL);
1035 
1036  if (dst->width > 0 && dst->height > 0)
1037  return frame_copy_video(dst, src);
1038  else if (dst->nb_samples > 0 &&
1039  (av_channel_layout_check(&dst->ch_layout)))
1040  return frame_copy_audio(dst, src);
1041 
1042  return AVERROR(EINVAL);
1043 }
1044 
1046 {
1047  remove_side_data(&frame->side_data, &frame->nb_side_data, type);
1048 }
1049 
1051 {
1052  unsigned t = type;
1053  if (t < FF_ARRAY_ELEMS(sd_props) && sd_props[t].name)
1054  return &sd_props[t];
1055  return NULL;
1056 }
1057 
1059 {
1061  return desc ? desc->name : NULL;
1062 }
1063 
1064 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
1065  const AVPixFmtDescriptor *desc)
1066 {
1067  for (int i = 0; frame->data[i]; i++) {
1068  const AVComponentDescriptor *comp = NULL;
1069  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
1070  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
1071 
1072  if (desc->flags & AV_PIX_FMT_FLAG_PAL && i == 1) {
1073  offsets[i] = 0;
1074  break;
1075  }
1076 
1077  /* find any component descriptor for this plane */
1078  for (int j = 0; j < desc->nb_components; j++) {
1079  if (desc->comp[j].plane == i) {
1080  comp = &desc->comp[j];
1081  break;
1082  }
1083  }
1084  if (!comp)
1085  return AVERROR_BUG;
1086 
1087  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
1088  (frame->crop_left >> shift_x) * comp->step;
1089  }
1090 
1091  return 0;
1092 }
1093 
1095 {
1096  const AVPixFmtDescriptor *desc;
1097  size_t offsets[4];
1098 
1099  if (!(frame->width > 0 && frame->height > 0))
1100  return AVERROR(EINVAL);
1101 
1102  if (frame->crop_left >= INT_MAX - frame->crop_right ||
1103  frame->crop_top >= INT_MAX - frame->crop_bottom ||
1104  (frame->crop_left + frame->crop_right) >= frame->width ||
1105  (frame->crop_top + frame->crop_bottom) >= frame->height)
1106  return AVERROR(ERANGE);
1107 
1108  desc = av_pix_fmt_desc_get(frame->format);
1109  if (!desc)
1110  return AVERROR_BUG;
1111 
1112  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
1113  * formats cannot be easily handled here either (and corresponding decoders
1114  * should not export any cropping anyway), so do the same for those as well.
1115  * */
1117  frame->width -= frame->crop_right;
1118  frame->height -= frame->crop_bottom;
1119  frame->crop_right = 0;
1120  frame->crop_bottom = 0;
1121  return 0;
1122  }
1123 
1124  /* calculate the offsets for each plane */
1126 
1127  /* adjust the offsets to avoid breaking alignment */
1128  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
1129  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
1130  int min_log2_align = INT_MAX;
1131 
1132  for (int i = 0; frame->data[i]; i++) {
1133  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
1134  min_log2_align = FFMIN(log2_align, min_log2_align);
1135  }
1136 
1137  /* we assume, and it should always be true, that the data alignment is
1138  * related to the cropping alignment by a constant power-of-2 factor */
1139  if (log2_crop_align < min_log2_align)
1140  return AVERROR_BUG;
1141 
1142  if (min_log2_align < 5 && log2_crop_align != INT_MAX) {
1143  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
1145  }
1146  }
1147 
1148  for (int i = 0; frame->data[i]; i++)
1149  frame->data[i] += offsets[i];
1150 
1151  frame->width -= (frame->crop_left + frame->crop_right);
1152  frame->height -= (frame->crop_top + frame->crop_bottom);
1153  frame->crop_left = 0;
1154  frame->crop_right = 0;
1155  frame->crop_top = 0;
1156  frame->crop_bottom = 0;
1157 
1158  return 0;
1159 }
av_samples_copy
int av_samples_copy(uint8_t *const *dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:222
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
free_side_data
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:90
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
entry
#define entry
Definition: aom_film_grain_template.c:66
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
get_video_buffer
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:174
sd_props
static const AVSideDataDescriptor sd_props[]
Definition: frame.c:31
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:305
ff_ctz
#define ff_ctz
Definition: intmath.h:107
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:979
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:81
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:812
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3244
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
add_side_data_from_buf_ext
static AVFrameSideData * add_side_data_from_buf_ext(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef *buf, uint8_t *data, size_t size)
Definition: frame.c:761
AV_FRAME_DATA_DOVI_METADATA
@ AV_FRAME_DATA_DOVI_METADATA
Parsed Dolby Vision metadata, suitable for passing to a software implementation.
Definition: frame.h:208
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:188
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:679
AVFrameSideData::buf
AVBufferRef * buf
Definition: frame.h:270
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:625
data
const char data[16]
Definition: mxf.c:149
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:201
frame_copy_props
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:319
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
get_audio_buffer
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:245
AVDictionary
Definition: dict.c:34
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: frame.c:888
frame_copy_video
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:988
av_frame_apply_cropping
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields.
Definition: frame.c:1094
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:304
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3284
ALIGN
#define ALIGN
Definition: frame.c:172
AV_FRAME_DATA_MATRIXENCODING
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:68
fail
#define fail()
Definition: checkasm.h:193
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
samplefmt.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_image_fill_pointers
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:145
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:49
AV_SIDE_DATA_PROP_SIZE_DEPENDENT
@ AV_SIDE_DATA_PROP_SIZE_DEPENDENT
Side data depends on the video dimensions.
Definition: frame.h:292
AV_FRAME_CROP_UNALIGNED
@ AV_FRAME_CROP_UNALIGNED
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:1044
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:151
get_frame_defaults
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:64
avassert.h
AV_FRAME_SIDE_DATA_FLAG_UNIQUE
#define AV_FRAME_SIDE_DATA_FLAG_UNIQUE
Remove existing entries before adding new ones.
Definition: frame.h:1090
AVFrameSideData::size
size_t size
Definition: frame.h:268
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
remove_side_data_by_entry
static void remove_side_data_by_entry(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *target)
Definition: frame.c:134
AV_FRAME_SIDE_DATA_FLAG_NEW_REF
#define AV_FRAME_SIDE_DATA_FLAG_NEW_REF
Create a new reference to the passed in buffer instead of taking ownership of it.
Definition: frame.h:1100
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
offsets
static const int offsets[]
Definition: hevc_pel.c:34
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:217
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:119
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:114
replace_side_data_from_buf
static AVFrameSideData * replace_side_data_from_buf(AVFrameSideData *dst, AVBufferRef *buf, int flags)
Definition: frame.c:824
av_sample_fmt_is_planar
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:114
wipe_side_data
static void wipe_side_data(AVFrameSideData ***sd, int *nb_side_data)
Definition: frame.c:99
channels
channels
Definition: aptx.h:31
AV_SIDE_DATA_PROP_MULTI
@ AV_SIDE_DATA_PROP_MULTI
Multiple instances of this side data type can be meaningfully present in a single side data array.
Definition: frame.h:285
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:610
AV_SIDE_DATA_PROP_GLOBAL
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
Definition: frame.h:279
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:600
AV_FRAME_DATA_DYNAMIC_HDR_VIVID
@ AV_FRAME_DATA_DYNAMIC_HDR_VIVID
HDR Vivid dynamic metadata associated with a video frame.
Definition: frame.h:215
frame_copy_audio
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:1011
AV_FRAME_DATA_SPHERICAL
@ AV_FRAME_DATA_SPHERICAL
The data represents the AVSphericalMapping structure defined in libavutil/spherical....
Definition: frame.h:131
NULL
#define NULL
Definition: coverity.c:32
sizes
static const int sizes[][2]
Definition: img2dec.c:60
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:726
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVComponentDescriptor
Definition: pixdesc.h:30
av_image_fill_plane_sizes
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
av_frame_side_data_remove
void av_frame_side_data_remove(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type from an array.
Definition: frame.c:957
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:120
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:803
AV_FRAME_SIDE_DATA_FLAG_REPLACE
#define AV_FRAME_SIDE_DATA_FLAG_REPLACE
Don't add a new entry if another of the same type exists.
Definition: frame.h:1095
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
av_frame_get_plane_buffer
AVBufferRef * av_frame_get_plane_buffer(const AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:731
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:694
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:178
AV_FRAME_DATA_REPLAYGAIN
@ AV_FRAME_DATA_REPLAYGAIN
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:77
AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT
@ AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT
Ambient viewing environment metadata, as defined by H.274.
Definition: frame.h:220
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:401
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:1031
cpu.h
AV_FRAME_DATA_LCEVC
@ AV_FRAME_DATA_LCEVC
Raw LCEVC payload data, as a uint8_t array, with NAL emulation bytes intact.
Definition: frame.h:236
size
int size
Definition: twinvq_data.h:10344
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:411
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_PIX_FMT_FLAG_BITSTREAM
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:124
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:662
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:748
frame.h
buffer.h
align
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
Definition: bitstream_template.h:411
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:1045
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
av_frame_side_data_remove_by_props
void av_frame_side_data_remove_by_props(AVFrameSideData ***sd, int *nb_sd, int props)
Remove and free all side data instances that match any of the given side data properties.
Definition: frame.c:963
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:809
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:94
AV_FRAME_DATA_VIEW_ID
@ AV_FRAME_DATA_VIEW_ID
This side data must be associated with a video frame.
Definition: frame.h:245
AV_FRAME_DATA_SKIP_SAMPLES
@ AV_FRAME_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
Definition: frame.h:109
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:137
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AV_SIDE_DATA_PROP_CHANNEL_DEPENDENT
@ AV_SIDE_DATA_PROP_CHANNEL_DEPENDENT
Side data depends on the channel layout.
Definition: frame.h:306
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: frame.c:114
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AV_FRAME_DATA_STEREO3D
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
Definition: frame.h:64
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:650
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:623
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
remove_side_data
static void remove_side_data(AVFrameSideData ***sd, int *nb_side_data, const enum AVFrameSideDataType type)
Definition: frame.c:119
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:654
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
AV_SIDE_DATA_PROP_COLOR_DEPENDENT
@ AV_SIDE_DATA_PROP_COLOR_DEPENDENT
Side data depends on the video color space.
Definition: frame.h:299
av_buffer_is_writable
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:147
ret
ret
Definition: filter_design.txt:187
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:783
AV_FRAME_DATA_VIDEO_HINT
@ AV_FRAME_DATA_VIDEO_HINT
Provide encoder-specific hinting information about changed/unchanged portions of a frame.
Definition: frame.h:230
dict.h
av_hwframe_transfer_data
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:433
planes
static const struct @473 planes[]
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:500
AV_FRAME_DATA_DYNAMIC_HDR_PLUS
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:159
channel_layout.h
av_frame_side_data_new
AVFrameSideData * av_frame_side_data_new(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, size_t size, unsigned int flags)
Add new side data entry to an array.
Definition: frame.c:838
AV_FRAME_DATA_VIDEO_ENC_PARAMS
@ AV_FRAME_DATA_VIDEO_ENC_PARAMS
Encoding parameters for a video frame, as described by AVVideoEncParams.
Definition: frame.h:170
av_image_copy2
static void av_image_copy2(uint8_t *const dst_data[4], const int dst_linesizes[4], uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Wrapper around av_image_copy() to workaround the limitation that the conversion from uint8_t * const ...
Definition: imgutils.h:184
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:442
AVFrameSideData::type
enum AVFrameSideDataType type
Definition: frame.h:266
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
AVSideDataDescriptor
This struct describes the properties of a side data type.
Definition: frame.h:313
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:449
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
desc
const char * desc
Definition: libsvtav1.c:79
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
av_frame_side_data_desc
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
Definition: frame.c:1050
calc_cropping_offsets
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:1064
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
add_side_data_from_buf
static AVFrameSideData * add_side_data_from_buf(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef *buf)
Definition: frame.c:792
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
av_frame_side_data_name
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:1058
av_frame_side_data_get
static const AVFrameSideData * av_frame_side_data_get(AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Wrapper around av_frame_side_data_get_c() to workaround the limitation that for any type T the conver...
Definition: frame.h:1193
AV_FRAME_DATA_REGIONS_OF_INTEREST
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:165
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrameSideData::metadata
AVDictionary * metadata
Definition: frame.h:269
av_frame_side_data_get_c
const AVFrameSideData * av_frame_side_data_get_c(const AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Get a side data entry of a specific type from an array.
Definition: frame.c:946
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:97
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
frame_side_data_wipe
static void frame_side_data_wipe(AVFrame *frame)
Definition: frame.c:109
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:491
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
av_frame_side_data_add
AVFrameSideData * av_frame_side_data_add(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef **pbuf, unsigned int flags)
Add a new side data entry to an array from an existing AVBufferRef.
Definition: frame.c:863
src
#define src
Definition: vp8dsp.c:248
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:194