Go to the documentation of this file.
80 #define OFFSET(x) offsetof(LUT2Context, x)
81 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
100 for (
i = 0;
i < 4;
i++) {
109 AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, \
110 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, \
111 AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P, \
112 AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \
113 AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, \
114 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP,
117 AV_PIX_FMT_GBRP9, AV_PIX_FMT_GRAY9, \
118 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9, \
119 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
122 AV_PIX_FMT_GRAY10, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRAP10, \
123 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, \
124 AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
127 AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12, \
128 AV_PIX_FMT_GRAY12, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRP12,
131 AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14, \
132 AV_PIX_FMT_GRAY12, AV_PIX_FMT_GBRP14,
135 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, \
136 AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16, \
137 AV_PIX_FMT_GBRP16, AV_PIX_FMT_GBRAP16, AV_PIX_FMT_GRAY16,
176 if (
s->tlut2 || !
s->odepth)
184 case 8:
pix_fmts = bit8_pix_fmts;
break;
185 case 9:
pix_fmts = bit9_pix_fmts;
break;
186 case 10:
pix_fmts = bit10_pix_fmts;
break;
187 case 12:
pix_fmts = bit12_pix_fmts;
break;
188 case 14:
pix_fmts = bit14_pix_fmts;
break;
189 case 16:
pix_fmts = bit16_pix_fmts;
break;
202 int hsub =
desc->log2_chroma_w;
203 int vsub =
desc->log2_chroma_h;
207 s->heightx[0] =
s->heightx[3] =
inlink->h;
209 s->widthx[0] =
s->widthx[3] =
inlink->w;
213 s->depthx =
desc->comp[0].depth;
217 s->depthy =
desc->comp[0].depth;
229 int hsub =
desc->log2_chroma_w;
230 int vsub =
desc->log2_chroma_h;
233 s->depthy =
desc->comp[0].depth;
236 s->heighty[0] =
s->heighty[3] =
inlink->h;
238 s->widthy[0] =
s->widthy[3] =
inlink->w;
243 #define DEFINE_LUT2(zname, xname, yname, ztype, xtype, ytype, zdiv, xdiv, ydiv) \
244 static int lut2_##zname##_##xname##_##yname(AVFilterContext *ctx, \
246 int jobnr, int nb_jobs) \
248 LUT2Context *s = ctx->priv; \
249 ThreadData *td = arg; \
250 AVFrame *out = td->out; \
251 AVFrame *srcx = td->srcx; \
252 AVFrame *srcy = td->srcy; \
253 const int odepth = s->odepth; \
256 for (p = 0; p < s->nb_planes; p++) { \
257 const int slice_start = (s->heightx[p] * jobnr) / nb_jobs; \
258 const int slice_end = (s->heightx[p] * (jobnr+1)) / nb_jobs; \
259 const uint16_t *lut = s->lut[p]; \
260 const xtype *srcxx; \
261 const ytype *srcyy; \
264 dst = (ztype *)(out->data[p] + slice_start * out->linesize[p]); \
265 srcxx = (const xtype *)(srcx->data[p] + slice_start * srcx->linesize[p]);\
266 srcyy = (const ytype *)(srcy->data[p] + slice_start * srcy->linesize[p]);\
268 for (y = slice_start; y < slice_end; y++) { \
269 for (x = 0; x < s->widthx[p]; x++) { \
270 dst[x] = av_clip_uintp2_c(lut[(srcyy[x] << s->depthx) | srcxx[x]], odepth); \
273 dst += out->linesize[p] / zdiv; \
274 srcxx += srcx->linesize[p] / xdiv; \
275 srcyy += srcy->linesize[p] / ydiv; \
288 DEFINE_LUT2(16, 16, 16, uint16_t, uint16_t, uint16_t, 2, 2, 2)
302 if (
ctx->is_disabled || !srcy) {
331 s->depth =
s->depthx +
s->depthy;
332 s->nb_planes =
s->nb_planesx;
334 s->lut2 =
s->depth > 16 ? lut2_16_16_16 : lut2_8_8_8;
336 if (
s->depthx == 8 &&
s->depthy == 8 &&
s->odepth > 8)
337 s->lut2 = lut2_16_8_8;
338 if (
s->depthx > 8 &&
s->depthy == 8 &&
s->odepth > 8)
339 s->lut2 = lut2_16_16_8;
340 if (
s->depthx == 8 &&
s->depthy > 8 &&
s->odepth > 8)
341 s->lut2 = lut2_16_8_16;
342 if (
s->depthx == 8 &&
s->depthy == 8 &&
s->odepth == 8)
343 s->lut2 = lut2_8_8_8;
344 if (
s->depthx > 8 &&
s->depthy == 8 &&
s->odepth == 8)
345 s->lut2 = lut2_8_16_8;
346 if (
s->depthx == 8 &&
s->depthy > 8 &&
s->odepth == 8)
347 s->lut2 = lut2_8_8_16;
348 if (
s->depthx > 8 &&
s->depthy > 8 &&
s->odepth == 8)
349 s->lut2 = lut2_8_16_16;
351 s->odepth =
s->depthx;
354 for (p = 0; p <
s->nb_planes; p++) {
360 for (p = 0; p <
s->nb_planes; p++) {
366 s->comp_expr[p] =
NULL;
371 "Error when parsing the expression '%s' for the component %d.\n",
372 s->comp_expr_str[p], p);
377 for (y = 0; y < (1 <<
s->depthy); y++) {
379 for (x = 0; x < (1 <<
s->depthx); x++) {
384 "Error when evaluating the expression '%s' for the values %d and %d for the component %d.\n",
385 s->comp_expr_str[p], x, y, p);
389 s->lut[p][(y <<
s->depthx) + x] = res;
405 int hsub =
desc->log2_chroma_w;
406 int vsub =
desc->log2_chroma_h;
409 outlink->
w = srcx->
w;
410 outlink->
h = srcx->
h;
417 s->height[0] =
s->height[3] = outlink->
h;
419 s->width[0] =
s->width[3] = outlink->
w;
426 if (srcx->
w != srcy->
w || srcx->
h != srcy->
h) {
428 "(size %dx%d) do not match the corresponding "
429 "second input link %s parameters (size %dx%d)\n",
430 ctx->input_pads[0].name, srcx->
w, srcx->
h,
431 ctx->input_pads[1].name,
436 if (
s->nb_planesx !=
s->nb_planesy) {
438 "(%d) do not match the corresponding "
439 "second input link %s number of planes (%d)\n",
440 ctx->input_pads[0].name,
s->nb_planesx,
441 ctx->input_pads[1].name,
s->nb_planesy);
445 if (
s->nb_planesx !=
s->nb_planes) {
447 "(%d) do not match the corresponding "
448 "output link %s number of planes (%d)\n",
449 ctx->input_pads[0].name,
s->nb_planesx,
450 ctx->output_pads[0].name,
s->nb_planes);
454 if (
s->widthx[1] !=
s->widthy[1] ||
s->heightx[1] !=
s->heighty[1]) {
456 "(size %dx%d) do not match the corresponding "
457 "second input link %s 2nd plane (size %dx%d)\n",
458 ctx->input_pads[0].name,
s->widthx[1],
s->heightx[1],
459 ctx->input_pads[1].name,
460 s->widthy[1],
s->heighty[1]);
464 if (
s->widthx[2] !=
s->widthy[2] ||
s->heightx[2] !=
s->heighty[2]) {
466 "(size %dx%d) do not match the corresponding "
467 "second input link %s 3rd plane (size %dx%d)\n",
468 ctx->input_pads[0].name,
s->widthx[2],
s->heightx[2],
469 ctx->input_pads[1].name,
470 s->widthy[2],
s->heighty[2]);
474 if (
s->widthx[1] !=
s->width[1] ||
s->heightx[1] !=
s->height[1]) {
476 "(size %dx%d) do not match the corresponding "
477 "output link %s 2nd plane (size %dx%d)\n",
478 ctx->input_pads[0].name,
s->widthx[1],
s->heightx[1],
479 ctx->output_pads[0].name,
s->width[1],
s->height[1]);
483 if (
s->widthx[2] !=
s->width[2] ||
s->heightx[2] !=
s->height[2]) {
485 "(size %dx%d) do not match the corresponding "
486 "output link %s 3rd plane (size %dx%d)\n",
487 ctx->input_pads[0].name,
s->widthx[2],
s->heightx[2],
488 ctx->output_pads[0].name,
s->width[2],
s->height[2]);
545 #define lut2_options options
552 .preinit = lut2_framesync_preinit,
554 .priv_class = &lut2_class,
564 #if CONFIG_TLUT2_FILTER
570 s->tlut2 = !strcmp(
ctx->filter->name,
"tlut2");
584 if (
ctx->is_disabled) {
600 td.srcy =
s->prev_frame;
611 static const AVOption tlut2_options[] = {
625 .filter_frame = tlut2_filter_frame,
642 .description =
NULL_IF_CONFIG_SMALL(
"Compute and apply a lookup table from two successive frames."),
644 .priv_class = &tlut2_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
AVPixelFormat
Pixel format.
static int config_output(AVFilterLink *outlink)
static av_cold int init(AVCodecContext *avctx)
int(* lut2)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, unsigned get)
Get the current frame in an input.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
#define DEFINE_LUT2(zname, xname, yname, ztype, xtype, ytype, zdiv, xdiv, ydiv)
const char * name
Filter name.
@ EXT_INFINITY
Extend the frame to infinity.
static int config_inputx(AVFilterLink *inlink)
AVFormatInternal * internal
An opaque field for libavformat internal usage.
A link between two filters.
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
@ EXT_STOP
Completely stop all streams with this one.
static int process_frame(FFFrameSync *fs)
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
A filter pad used for either input or output.
double var_values[VAR_VARS_NB]
static const char *const var_names[]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int activate(AVFilterContext *ctx)
#define AV_CEIL_RSHIFT(a, b)
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
static av_cold void uninit(AVFilterContext *ctx)
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
static enum AVPixelFormat pix_fmts[]
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define fs(width, name, subs,...)
static int lut2_config_output(AVFilterLink *outlink)
uint16_t * lut[4]
lookup table for each component
static const AVFilterPad inputs[]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int format
agreed upon media format
AVFilterContext * src
source filter
static enum AVPixelFormat all_pix_fmts[]
static const AVOption options[]
#define AVFILTER_DEFINE_CLASS(fname)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define i(width, name, range_min, range_max)
int w
agreed upon image width
#define av_malloc_array(a, b)
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Used for passing data between threads.
static int config_inputy(AVFilterLink *inlink)
const char * name
Pad name.
static const AVFilterPad outputs[]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in)
Initialize a frame sync structure.
int h
agreed upon image height
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
#define flags(name, subs,...)
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
static int query_formats(AVFilterContext *ctx)
FRAMESYNC_DEFINE_CLASS(lut2, LUT2Context, fs)