Go to the documentation of this file.
101 for (
int i = 0;
i < *nb_side_data;
i++) {
122 for (
int i = *nb_side_data - 1;
i >= 0;
i--) {
129 ((*sd)[
i]) = ((*sd)[*nb_side_data - 1]);
137 for (
int i = *nb_sd - 1;
i >= 0;
i--) {
144 ((*sd)[
i]) = ((*sd)[*nb_sd - 1]);
172 #define ALIGN (HAVE_SIMD_ALIGN_64 ? 64 : 32)
177 int ret, padded_height;
179 ptrdiff_t linesizes[4];
180 size_t total_size,
sizes[4];
192 if (!
frame->linesize[0]) {
202 for (
int i = 0;
i < 4 &&
frame->linesize[
i];
i++)
206 for (
int i = 0;
i < 4;
i++)
207 linesizes[
i] =
frame->linesize[
i];
211 padded_height, linesizes)) < 0)
214 total_size = 4 * plane_padding + 4 *
align;
215 for (
int i = 0;
i < 4;
i++) {
216 if (
sizes[
i] > SIZE_MAX - total_size)
222 if (!
frame->buf[0]) {
231 for (
int i = 1;
i < 4;
i++) {
233 frame->data[
i] +=
i * plane_padding;
254 if (!
frame->linesize[0]) {
267 sizeof(*
frame->extended_data));
269 sizeof(*
frame->extended_buf));
270 if (!
frame->extended_data || !
frame->extended_buf) {
294 if (!
frame->extended_buf[
i]) {
307 if (
frame->format < 0)
312 else if (
frame->nb_samples > 0 &&
325 dst->key_frame =
src->key_frame;
328 dst->pict_type =
src->pict_type;
329 dst->sample_aspect_ratio =
src->sample_aspect_ratio;
330 dst->crop_top =
src->crop_top;
331 dst->crop_bottom =
src->crop_bottom;
332 dst->crop_left =
src->crop_left;
333 dst->crop_right =
src->crop_right;
335 dst->duration =
src->duration;
336 dst->repeat_pict =
src->repeat_pict;
337 #if FF_API_INTERLACED_FRAME
339 dst->interlaced_frame =
src->interlaced_frame;
340 dst->top_field_first =
src->top_field_first;
343 #if FF_API_PALETTE_HAS_CHANGED
345 dst->palette_has_changed =
src->palette_has_changed;
348 dst->sample_rate =
src->sample_rate;
349 dst->opaque =
src->opaque;
350 dst->pkt_dts =
src->pkt_dts;
353 dst->pkt_pos =
src->pkt_pos;
354 dst->pkt_size =
src->pkt_size;
357 dst->time_base =
src->time_base;
358 dst->quality =
src->quality;
359 dst->best_effort_timestamp =
src->best_effort_timestamp;
361 dst->decode_error_flags =
src->decode_error_flags;
362 dst->color_primaries =
src->color_primaries;
363 dst->color_trc =
src->color_trc;
364 dst->colorspace =
src->colorspace;
365 dst->color_range =
src->color_range;
366 dst->chroma_location =
src->chroma_location;
370 for (
int i = 0;
i <
src->nb_side_data;
i++) {
374 && (
src->width !=
dst->width ||
src->height !=
dst->height))
409 dst->format =
src->format;
411 dst->height =
src->height;
412 dst->nb_samples =
src->nb_samples;
446 if (
src->extended_buf) {
448 sizeof(*
dst->extended_buf));
449 if (!
dst->extended_buf) {
453 dst->nb_extended_buf =
src->nb_extended_buf;
455 for (
int i = 0;
i <
src->nb_extended_buf;
i++) {
457 if (!
dst->extended_buf[
i]) {
464 if (
src->hw_frames_ctx) {
466 if (!
dst->hw_frames_ctx) {
473 if (
src->extended_data !=
src->data) {
474 int ch =
dst->ch_layout.nb_channels;
482 if (!
dst->extended_data) {
486 memcpy(
dst->extended_data,
src->extended_data,
sizeof(*
src->extended_data) * ch);
488 dst->extended_data =
dst->data;
490 memcpy(
dst->data,
src->data,
sizeof(
src->data));
491 memcpy(
dst->linesize,
src->linesize,
sizeof(
src->linesize));
511 if (
src->data[0] ||
src->data[1]
512 ||
src->data[2] ||
src->data[3])
520 dst->format =
src->format;
522 dst->height =
src->height;
523 dst->nb_samples =
src->nb_samples;
542 if (
src->extended_buf) {
543 if (
dst->nb_extended_buf !=
src->nb_extended_buf) {
544 int nb_extended_buf =
FFMIN(
dst->nb_extended_buf,
src->nb_extended_buf);
547 for (
int i = nb_extended_buf;
i <
dst->nb_extended_buf;
i++)
551 src->nb_extended_buf);
557 dst->nb_extended_buf =
src->nb_extended_buf;
559 memset(&
dst->extended_buf[nb_extended_buf], 0,
560 (
src->nb_extended_buf - nb_extended_buf) *
sizeof(*
dst->extended_buf));
563 for (
int i = 0;
i <
src->nb_extended_buf;
i++) {
568 }
else if (
dst->extended_buf) {
569 for (
int i = 0;
i <
dst->nb_extended_buf;
i++)
578 if (
dst->extended_data !=
dst->data)
581 if (
src->extended_data !=
src->data) {
582 int ch =
dst->ch_layout.nb_channels;
589 if (ch > SIZE_MAX /
sizeof(*
dst->extended_data))
593 if (!
dst->extended_data) {
598 dst->extended_data =
dst->data;
600 memcpy(
dst->data,
src->data,
sizeof(
src->data));
601 memcpy(
dst->linesize,
src->linesize,
sizeof(
src->linesize));
632 for (
int i = 0;
i <
frame->nb_extended_buf;
i++)
657 if (
src->extended_data ==
src->data)
658 dst->extended_data =
dst->data;
673 for (
int i = 0;
i <
frame->nb_extended_buf;
i++)
687 memset(&
tmp, 0,
sizeof(
tmp));
698 if (
frame->hw_frames_ctx)
720 if (
tmp.data ==
tmp.extended_data)
736 if (
frame->nb_samples) {
744 if (plane < 0 || plane >=
planes || !
frame->extended_data[plane])
753 for (
int i = 0;
i <
frame->nb_extended_buf;
i++) {
770 if ((
unsigned)*nb_sd >=
FFMIN(INT_MAX, SIZE_MAX))
787 (*sd)[(*nb_sd)++] =
ret;
896 if (!sd || !
src || !nb_sd || (*nb_sd && !*sd))
950 for (
int i = 0;
i < nb_sd;
i++) {
966 for (
int i = *nb_sd - 1;
i >= 0;
i--) {
969 if (!
desc || !(
desc->props & props))
974 ((*sd)[
i]) = ((*sd)[*nb_sd - 1]);
992 if (
dst->width <
src->width ||
993 dst->height <
src->height)
996 if (
src->hw_frames_ctx ||
dst->hw_frames_ctx)
1001 if (!
dst->data[
i] || !
src->data[
i])
1005 src->data,
src->linesize,
1017 if (
dst->nb_samples !=
src->nb_samples ||
1022 if (!
dst->extended_data[
i] || !
src->extended_data[
i])
1033 if (
dst->format !=
src->format ||
dst->format < 0)
1036 if (
dst->width > 0 &&
dst->height > 0)
1038 else if (
dst->nb_samples > 0 &&
1067 for (
int i = 0;
frame->data[
i];
i++) {
1069 int shift_x = (
i == 1 ||
i == 2) ?
desc->log2_chroma_w : 0;
1070 int shift_y = (
i == 1 ||
i == 2) ?
desc->log2_chroma_h : 0;
1078 for (
int j = 0; j <
desc->nb_components; j++) {
1079 if (
desc->comp[j].plane ==
i) {
1088 (
frame->crop_left >> shift_x) *
comp->step;
1099 if (!(
frame->width > 0 &&
frame->height > 0))
1102 if (
frame->crop_left >= INT_MAX -
frame->crop_right ||
1103 frame->crop_top >= INT_MAX -
frame->crop_bottom ||
1119 frame->crop_right = 0;
1120 frame->crop_bottom = 0;
1129 int log2_crop_align =
frame->crop_left ?
ff_ctz(
frame->crop_left) : INT_MAX;
1130 int min_log2_align = INT_MAX;
1132 for (
int i = 0;
frame->data[
i];
i++) {
1134 min_log2_align =
FFMIN(log2_align, min_log2_align);
1139 if (log2_crop_align < min_log2_align)
1142 if (min_log2_align < 5 && log2_crop_align != INT_MAX) {
1143 frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
1148 for (
int i = 0;
frame->data[
i];
i++)
1153 frame->crop_left = 0;
1154 frame->crop_right = 0;
1155 frame->crop_top = 0;
1156 frame->crop_bottom = 0;
int av_samples_copy(uint8_t *const *dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
#define FF_ENABLE_DEPRECATION_WARNINGS
static void free_side_data(AVFrameSideData **ptr_sd)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int get_video_buffer(AVFrame *frame, int align)
static const AVSideDataDescriptor sd_props[]
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
uint8_t * data
The data buffer.
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
static AVFrameSideData * add_side_data_from_buf_ext(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef *buf, uint8_t *data, size_t size)
@ AV_FRAME_DATA_DOVI_METADATA
Parsed Dolby Vision metadata, suitable for passing to a software implementation.
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
static int get_audio_buffer(AVFrame *frame, int align)
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields.
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
@ AV_SIDE_DATA_PROP_SIZE_DEPENDENT
Side data depends on the video dimensions.
@ AV_FRAME_CROP_UNALIGNED
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static void get_frame_defaults(AVFrame *frame)
#define AV_FRAME_SIDE_DATA_FLAG_UNIQUE
Remove existing entries before adding new ones.
#define FF_ARRAY_ELEMS(a)
static void remove_side_data_by_entry(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *target)
#define AV_FRAME_SIDE_DATA_FLAG_NEW_REF
Create a new reference to the passed in buffer instead of taking ownership of it.
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
static const int offsets[]
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
static AVFrameSideData * replace_side_data_from_buf(AVFrameSideData *dst, AVBufferRef *buf, int flags)
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
static void wipe_side_data(AVFrameSideData ***sd, int *nb_side_data)
@ AV_SIDE_DATA_PROP_MULTI
Multiple instances of this side data type can be meaningfully present in a single side data array.
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
@ AV_FRAME_DATA_DYNAMIC_HDR_VIVID
HDR Vivid dynamic metadata associated with a video frame.
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
@ AV_FRAME_DATA_SPHERICAL
The data represents the AVSphericalMapping structure defined in libavutil/spherical....
static const int sizes[][2]
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Rational number (pair of numerator and denominator).
void av_frame_side_data_remove(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type from an array.
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
#define AV_FRAME_SIDE_DATA_FLAG_REPLACE
Don't add a new entry if another of the same type exists.
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
AVBufferRef * av_frame_get_plane_buffer(const AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
@ AVCOL_RANGE_UNSPECIFIED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
@ AV_FRAME_DATA_REPLAYGAIN
ReplayGain information in the form of the AVReplayGain struct.
@ AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT
Ambient viewing environment metadata, as defined by H.274.
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
@ AV_FRAME_DATA_LCEVC
Raw LCEVC payload data, as a uint8_t array, with NAL emulation bytes intact.
#define AV_NUM_DATA_POINTERS
#define AV_NOPTS_VALUE
Undefined timestamp value.
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
@ AVCHROMA_LOC_UNSPECIFIED
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
void av_frame_side_data_remove_by_props(AVFrameSideData ***sd, int *nb_sd, int props)
Remove and free all side data instances that match any of the given side data properties.
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
size_t size
Size of data in bytes.
@ AV_FRAME_DATA_VIEW_ID
This side data must be associated with a video frame.
@ AV_FRAME_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
#define i(width, name, range_min, range_max)
@ AV_SIDE_DATA_PROP_CHANNEL_DEPENDENT
Side data depends on the channel layout.
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
#define av_malloc_array(a, b)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static void remove_side_data(AVFrameSideData ***sd, int *nb_side_data, const enum AVFrameSideDataType type)
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
void * av_calloc(size_t nmemb, size_t size)
@ AV_SIDE_DATA_PROP_COLOR_DEPENDENT
Side data depends on the video color space.
int av_buffer_is_writable(const AVBufferRef *buf)
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
@ AV_FRAME_DATA_VIDEO_HINT
Provide encoder-specific hinting information about changed/unchanged portions of a frame.
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
static const struct @473 planes[]
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
AVFrameSideData * av_frame_side_data_new(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, size_t size, unsigned int flags)
Add new side data entry to an array.
@ AV_FRAME_DATA_VIDEO_ENC_PARAMS
Encoding parameters for a video frame, as described by AVVideoEncParams.
static void av_image_copy2(uint8_t *const dst_data[4], const int dst_linesizes[4], uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Wrapper around av_image_copy() to workaround the limitation that the conversion from uint8_t * const ...
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
enum AVFrameSideDataType type
static int ref[MAX_W *MAX_W]
This struct describes the properties of a side data type.
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
#define FF_DISABLE_DEPRECATION_WARNINGS
A reference to a data buffer.
Structure to hold side data for an AVFrame.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
static AVFrameSideData * add_side_data_from_buf(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef *buf)
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
static const AVFrameSideData * av_frame_side_data_get(AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Wrapper around av_frame_side_data_get_c() to workaround the limitation that for any type T the conver...
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
#define flags(name, subs,...)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
const AVFrameSideData * av_frame_side_data_get_c(const AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Get a side data entry of a specific type from an array.
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
static void frame_side_data_wipe(AVFrame *frame)
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
AVFrameSideData * av_frame_side_data_add(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef **pbuf, unsigned int flags)
Add a new side data entry to an array from an existing AVBufferRef.
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.