Go to the documentation of this file.
   36 static const uint8_t 
NNEDI_XDIM[] = { 8, 16, 32, 48, 8, 16, 32 };
 
   37 static const uint8_t 
NNEDI_YDIM[] = { 6, 6, 6, 6, 4, 4, 4 };
 
   38 static const uint16_t 
NNEDI_NNS[] = { 16, 32, 64, 128, 256 };
 
  105                  int src_stride, 
int dst_stride,
 
  108                   int src_stride, 
int dst_stride,
 
  111                          const void *
src, ptrdiff_t src_stride,
 
  116 #define OFFSET(x) offsetof(NNEDIContext, x) 
  117 #define RFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM 
  118 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM 
  124         {
"interlaced", 
"only deinterlace frames marked as interlaced", 0, 
AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, 
RFLAGS, 
"deint" },
 
  126         {
"af", 
"use frame flags, both fields",  0, 
AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0, 
RFLAGS, 
"field" },
 
  127         {
"a",  
"use frame flags, single field", 0, 
AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, 
RFLAGS, 
"field" },
 
  130         {
"tf", 
"use both fields, top first",    0, 
AV_OPT_TYPE_CONST, {.i64=2},  0, 0, 
RFLAGS, 
"field" },
 
  131         {
"bf", 
"use both fields, bottom first", 0, 
AV_OPT_TYPE_CONST, {.i64=3},  0, 0, 
RFLAGS, 
"field" },
 
  133     {
"nsize",  
"set size of local neighborhood around each pixel, used by the predictor neural network", 
OFFSET(nsize), 
AV_OPT_TYPE_INT, {.i64=6}, 0, 6, 
RFLAGS, 
"nsize" },
 
  172     outlink->
w             = 
ctx->inputs[0]->w;
 
  173     outlink->
h             = 
ctx->inputs[0]->h;
 
  175     if (
s->field == -2 || 
s->field > 1)
 
  209                      int n, 
float scale, 
float bias)
 
  213     sum = 
s->fdsp->scalarproduct_float(kernel, 
input, n);
 
  215     y = sum * 
scale + bias + 1e-20
f;
 
  222     return x / (1.0f + 
fabsf(x));
 
  232                         const void *
src, ptrdiff_t src_stride,
 
  233                         uint8_t *prescreen, 
int N,
 
  237     const float *src_p = 
src;
 
  240     const float *
window = src_p - 2 * src_stride - 5;
 
  242     for (
int j = 0; j < 
N; j++) {
 
  246         for (
int i = 0; 
i < 4; 
i++)
 
  247             memcpy(
input + 
i * 12, 
window + 
i * src_stride + j, 12 * 
sizeof(
float));
 
  250         for (
int n = 0; n < 4; n++)
 
  255         for (
int n = 0; n < 4; n++)
 
  260         for (
int n = 0; n < 4; n++)
 
  268                         const void *
src, ptrdiff_t src_stride,
 
  269                         uint8_t *prescreen, 
int N,
 
  273     const float *src_p = 
src;
 
  276     const float *
window = src_p - 2 * src_stride - 6;
 
  278     for (
int j = 0; j < 
N; j += 4) {
 
  282         for (
int i = 0; 
i < 4; 
i++)
 
  283             memcpy(
input + 
i * 16, 
window + 
i * src_stride + j, 16 * 
sizeof(
float));
 
  285         for (
int n = 0; n < 4; n++)
 
  289         for (
int n = 0; n < 4; n++)
 
  292         for (
int n = 0; n < 4; n++)
 
  293             prescreen[j + n] = 
state[n + 4] > 0.
f;
 
  299     return nn * model->
nsize;
 
  327                          float *buf, 
float mstd[4],
 
  335     for (
int i = 0; 
i < model->
ydim; 
i++) {
 
  336         memcpy(buf, 
src, model->
xdim * 
sizeof(
float));
 
  338         for (
int j = 0; j < model->
xdim; j++) {
 
  349     mstd[0] = sum * 
scale;
 
  352     tmp = sum_sq * 
scale - mstd[0] * mstd[0];
 
  353     if (
tmp < FLT_EPSILON) {
 
  358         mstd[2] = 1.0f / mstd[1];
 
  373 static void wae5(
const float *softmax, 
const float *el,
 
  374                  int n, 
float mstd[4])
 
  376     float vsum = 0.0f, wsum = 0.0f;
 
  378     for (
int i = 0; 
i < n; 
i++) {
 
  384         mstd[3] += (5.0f * vsum) / wsum * mstd[1] + mstd[0];
 
  390                       const void *
src, ptrdiff_t src_stride, 
void *dst,
 
  391                       const uint8_t *prescreen, 
int N,
 
  395     const float *src_p = 
src;
 
  399     const float *
window = src_p - (model->
ydim / 2) * src_stride - (model->
xdim / 2 - 1);
 
  400     const int filter_size = model->
nsize;
 
  401     const int nns = model->
nns;
 
  403     for (
int i = 0; 
i < 
N; 
i++) {
 
  405         float activation[256 * 2];
 
  415         for (
int nn = 0; nn < nns; nn++)
 
  418         for (
int nn = 0; nn < nns; nn++)
 
  422         wae5(activation, activation + nns, nns, mstd);
 
  425             for (
int nn = 0; nn < nns; nn++)
 
  428             for (
int nn = 0; nn < nns; nn++)
 
  432             wae5(activation, activation + nns, nns, mstd);
 
  435         dst_p[
i] = mstd[3] * (use_q2 ? 0.5f : 1.f);
 
  440                        int src_stride, 
int dst_stride,
 
  443     for (
int y = 0; y < 
height; y++) {
 
  444         for (
int x = 0; x < 32; x++)
 
  445             dst[-x - 1] = 
src[x];
 
  447         for (
int x = 0; x < 
width; x++)
 
  450         for (
int x = 0; x < 32; x++)
 
  459                        int src_stride, 
int dst_stride,
 
  462     const uint16_t *
src = (
const uint16_t *)srcp;
 
  466     for (
int y = 0; y < 
height; y++) {
 
  467         for (
int x = 0; x < 32; x++)
 
  470         for (
int x = 0; x < 
width; x++)
 
  473         for (
int x = 0; x < 32; x++)
 
  482                         int src_stride, 
int dst_stride,
 
  486     for (
int y = 0; y < 
height; y++) {
 
  487         for (
int x = 0; x < 
width; x++)
 
  496                         int src_stride, 
int dst_stride,
 
  500     uint16_t *dst = (uint16_t *)dstp;
 
  504     for (
int y = 0; y < 
height; y++) {
 
  505         for (
int x = 0; x < 
width; x++)
 
  514                           void *dst, 
const uint8_t *prescreen, 
int n)
 
  516     const float *src_p = 
src;
 
  518     const float *
window = src_p - 2 * src_stride;
 
  520     for (
int i = 0; 
i < n; 
i++) {
 
  526         accum += (-3.0f / 32.0f) * 
window[0 * src_stride + 
i];
 
  527         accum += (19.0f / 32.0f) * 
window[1 * src_stride + 
i];
 
  528         accum += (19.0f / 32.0f) * 
window[2 * src_stride + 
i];
 
  529         accum += (-3.0f / 32.0f) * 
window[3 * src_stride + 
i];
 
  540     const float in_scale = 
s->in_scale;
 
  541     const float out_scale = 
s->out_scale;
 
  542     const int depth = 
s->depth;
 
  548     for (
int p = 0; p < 
s->nb_planes; p++) {
 
  549         const int height = 
s->planeheight[p];
 
  550         const int width = 
s->planewidth[p];
 
  551         const int slice_start = 2 * ((
height / 2 * jobnr) / nb_jobs);
 
  553         const uint8_t *src_data = in->
data[p];
 
  554         uint8_t *dst_data = 
out->data[p];
 
  555         uint8_t *dst = 
out->data[p] + slice_start * 
out->linesize[p];
 
  556         const int src_linesize = in->
linesize[p];
 
  557         const int dst_linesize = 
out->linesize[p];
 
  558         uint8_t *prescreen_buf = 
s->prescreen_buf[jobnr];
 
  559         float *srcbuf = 
s->input_buf[jobnr];
 
  560         const int srcbuf_stride = 
width + 64;
 
  561         float *dstbuf = 
s->output_buf[jobnr];
 
  562         const int dstbuf_stride = 
width;
 
  563         const int slice_height = (
slice_end - slice_start) / 2;
 
  565         const uint8_t *in_line;
 
  569         if (!(
s->process_plane & (1 << p))) {
 
  577         y_out    = slice_start + (tff ^ (slice_start & 1));
 
  578         in_line  = src_data + (y_out * src_linesize);
 
  579         out_line = dst_data + (y_out * dst_linesize);
 
  582             memcpy(out_line, in_line, 
s->linesize[p]);
 
  584             in_line  += src_linesize * 2;
 
  585             out_line += dst_linesize * 2;
 
  588         y_out = slice_start + ((!tff) ^ (slice_start & 1));
 
  590         s->read(src_data + 
FFMAX(y_out - 5, tff) * src_linesize,
 
  592                 src_linesize * 2, srcbuf_stride,
 
  594         srcbuf += srcbuf_stride;
 
  596         s->read(src_data + 
FFMAX(y_out - 3, tff) * src_linesize,
 
  598                 src_linesize * 2, srcbuf_stride,
 
  600         srcbuf += srcbuf_stride;
 
  602         s->read(src_data + 
FFMAX(y_out - 1, tff) * src_linesize,
 
  604                 src_linesize * 2, srcbuf_stride,
 
  606         srcbuf += srcbuf_stride;
 
  608         in_line  = src_data + 
FFMIN(y_out + 1, 
height - 1 - !tff) * src_linesize;
 
  609         out_line = dst_data + (y_out * dst_linesize);
 
  611         s->read(in_line, srcbuf + 32, src_linesize * 2, srcbuf_stride,
 
  612                 width, slice_height - last_slice, in_scale);
 
  614         y_out += (slice_height - last_slice) * 2;
 
  616         s->read(src_data + 
FFMIN(y_out + 1, 
height - 1 - !tff) * src_linesize,
 
  617                 srcbuf + 32 + srcbuf_stride * (slice_height - last_slice),
 
  618                 src_linesize * 2, srcbuf_stride,
 
  621         s->read(src_data + 
FFMIN(y_out + 3, 
height - 1 - !tff) * src_linesize,
 
  622                 srcbuf + 32 + srcbuf_stride * (slice_height + 1 - last_slice),
 
  623                 src_linesize * 2, srcbuf_stride,
 
  626         s->read(src_data + 
FFMIN(y_out + 5, 
height - 1 - !tff) * src_linesize,
 
  627                 srcbuf + 32 + srcbuf_stride * (slice_height + 2 - last_slice),
 
  628                 src_linesize * 2, srcbuf_stride,
 
  631         for (
int y = 0; y < 
slice_end - slice_start; y += 2) {
 
  633                 s->prescreen[
s->pscrn > 1](
ctx, srcbuf + (y / 2) * srcbuf_stride + 32,
 
  634                              srcbuf_stride, prescreen_buf, 
width,
 
  635                              &
s->prescreener[
s->pscrn - 1]);
 
  638                       srcbuf + (y / 2) * srcbuf_stride + 32,
 
  640                       dstbuf + (y / 2) * dstbuf_stride,
 
  641                       prescreen_buf, 
width,
 
  642                       &
s->coeffs[
s->etype][
s->nnsparam][
s->nsize], 
s->qual == 2);
 
  647                               dstbuf + (y / 2) * dstbuf_stride,
 
  648                               prescreen_buf, 
width);
 
  651         s->write(dstbuf, out_line, dstbuf_stride, dst_linesize * 2,
 
  652                  width, slice_height, depth, out_scale);
 
  672                       FFMIN(
s->planeheight[1] / 2, 
s->nb_threads));
 
  674     if (
s->field == -2 || 
s->field > 1)
 
  675         s->field_n = !
s->field_n;
 
  691     if ((
s->deint && !
s->prev->interlaced_frame) || 
ctx->is_disabled) {
 
  698     s->pts = 
s->prev->pts * 2;
 
  700     if (
ret < 0 || (
s->field > -2 && 
s->field < 2)) {
 
  706     s->pts = 
s->prev->pts + in->
pts;
 
  731                                                 ctx->outputs[0]->time_base);
 
  735     } 
else if (
ret < 0) {
 
  744     memcpy(dst, *
data, n * 
sizeof(
float));
 
  759     int filter_size = nns * xdim * ydim;
 
  763     data = 
av_calloc(filter_size + bias_size, 4 * 
sizeof(
float));
 
  770     coeffs->
nsize = xdim * ydim;
 
  791     copy_weights(&
s->prescreener[0].kernel_l0[0][0], 4 * 48, &bdata);
 
  794     copy_weights(&
s->prescreener[0].kernel_l1[0][0], 4 * 4, &bdata);
 
  797     copy_weights(&
s->prescreener[0].kernel_l2[0][0], 4 * 8, &bdata);
 
  800     for (
int i = 0; 
i < 3; 
i++) {
 
  802         float kernel_l0_shuffled[4 * 64];
 
  803         float kernel_l1_shuffled[4 * 4];
 
  811         for (
int n = 0; n < 4; n++) {
 
  812             for (
int k = 0; k < 64; k++)
 
  813                 data->kernel_l0[n][k] = kernel_l0_shuffled[(k / 8) * 32 + n * 8 + k % 8];
 
  814             for (
int k = 0; k < 4; k++)
 
  815                 data->kernel_l1[n][k] = kernel_l1_shuffled[k * 4 + n];
 
  819     for (
int m = 0; m < 2; m++) {
 
  821         for (
int i = 0; 
i < 5; 
i++) {
 
  825             for (
int j = 0; j < 7; j++) {
 
  829                 const int filter_size = xdim * ydim;
 
  875     for (
int n = 0; n < 4; n++) {
 
  884     for (
int n = 0; n < 4; n++) {
 
  893     const int filter_size = model->
nsize;
 
  894     const int nns = model->
nns;
 
  895     const float scale = 1.f / nns;
 
  897     double softmax_means[256]; 
 
  898     double elliott_means[256]; 
 
  899     double mean_filter[48 * 6] = { 0 }; 
 
  903     for (
int nn = 0; nn < nns; nn++) {
 
  904         softmax_means[nn] = 
mean(model->
softmax_q1 + nn * filter_size, filter_size);
 
  905         elliott_means[nn] = 
mean(model->
elliott_q1 + nn * filter_size, filter_size);
 
  907         for (
int k = 0; k < filter_size; k++)
 
  908             mean_filter[k] += model->
softmax_q1[nn * filter_size + k] - softmax_means[nn];
 
  911     for (
int k = 0; k < filter_size; k++)
 
  912         mean_filter[k] *= 
scale;
 
  916     for (
int nn = 0; nn < nns; nn++) {
 
  917         for (
int k = 0; k < filter_size; k++) {
 
  918             model->
softmax_q1[nn * filter_size + k] -= softmax_means[nn] + mean_filter[k];
 
  919             model->
elliott_q1[nn * filter_size + k] -= elliott_means[nn];
 
  925     memset(mean_filter, 0, 
sizeof(mean_filter));
 
  927     for (
int nn = 0; nn < nns; nn++) {
 
  928         softmax_means[nn] = 
mean(model->
softmax_q2 + nn * filter_size, filter_size);
 
  929         elliott_means[nn] = 
mean(model->
elliott_q2 + nn * filter_size, filter_size);
 
  931         for (
int k = 0; k < filter_size; k++) {
 
  932             mean_filter[k] += model->
softmax_q2[nn * filter_size + k] - softmax_means[nn];
 
  936     for (
int k = 0; k < filter_size; k++)
 
  937         mean_filter[k] *= 
scale;
 
  941     for (
int nn = 0; nn < nns; nn++) {
 
  942         for (
int k = 0; k < filter_size; k++) {
 
  943             model->
softmax_q2[nn * filter_size + k] -= softmax_means[nn] + mean_filter[k];
 
  944             model->
elliott_q2[nn * filter_size + k] -= elliott_means[nn];
 
  954     FILE *weights_file = 
NULL;
 
  955     int64_t weights_size;
 
  966     if (fseek(weights_file, 0, SEEK_END)) {
 
  968         fclose(weights_file);
 
  972     weights_size = ftell(weights_file);
 
  974     if (weights_size == -1) {
 
  975         fclose(weights_file);
 
  979         fclose(weights_file);
 
  984     if (fseek(weights_file, 0, SEEK_SET)) {
 
  985         fclose(weights_file);
 
  992         fclose(weights_file);
 
  998         fclose(weights_file);
 
 1004     fclose(weights_file);
 
 1028     s->depth = 
desc->comp[0].depth;
 
 1035     s->planewidth[0] = 
s->planewidth[3] = 
inlink->w;
 
 1037     s->planeheight[0] = 
s->planeheight[3] = 
inlink->h;
 
 1039     s->half = ((1 << 8) - 1) / 2.f;
 
 1040     s->out_scale = 1 << (
s->depth - 8);
 
 1041     s->in_scale = 1.f / 
s->out_scale;
 
 1062     for (
int i = 0; 
i < 2; 
i++) {
 
 1063         for (
int j = 0; j < 5; j++) {
 
 1064             for (
int k = 0; k < 7; k++)
 
 1069     s->input_size = (
s->planewidth[0] + 64) * (
s->planeheight[0] + 6);
 
 1070     s->input_buf = 
av_calloc(
s->nb_threads, 
sizeof(*
s->input_buf));
 
 1074     for (
int i = 0; 
i < 
s->nb_threads; 
i++) {
 
 1075         s->input_buf[
i] = 
av_calloc(
s->input_size, 
sizeof(**
s->input_buf));
 
 1076         if (!
s->input_buf[
i])
 
 1080     s->output_buf = 
av_calloc(
s->nb_threads, 
sizeof(*
s->output_buf));
 
 1084     for (
int i = 0; 
i < 
s->nb_threads; 
i++) {
 
 1085         s->output_buf[
i] = 
av_calloc(
s->input_size, 
sizeof(**
s->output_buf));
 
 1086         if (!
s->output_buf[
i])
 
 1090     s->prescreen_buf = 
av_calloc(
s->nb_threads, 
sizeof(*
s->prescreen_buf));
 
 1091     if (!
s->prescreen_buf)
 
 1094     for (
int i = 0; 
i < 
s->nb_threads; 
i++) {
 
 1095         s->prescreen_buf[
i] = 
av_calloc(
s->planewidth[0], 
sizeof(**
s->prescreen_buf));
 
 1096         if (!
s->prescreen_buf[
i])
 
 1107     for (
int i = 0; 
i < 
s->nb_threads && 
s->prescreen_buf; 
i++)
 
 1112     for (
int i = 0; 
i < 
s->nb_threads && 
s->input_buf; 
i++)
 
 1117     for (
int i = 0; 
i < 
s->nb_threads && 
s->output_buf; 
i++)
 
 1123     for (
int i = 0; 
i < 2; 
i++) {
 
 1124         for (
int j = 0; j < 5; j++) {
 
 1125             for (
int k = 0; k < 7; k++) {
 
 1154     .description   = 
NULL_IF_CONFIG_SMALL(
"Apply neural network edge directed interpolation intra-only deinterlacer."),
 
 1156     .priv_class    = &nnedi_class,
 
  
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
 
#define AV_PIX_FMT_YUVA422P16
 
#define AV_PIX_FMT_GBRAP16
 
static float dot_dsp(const NNEDIContext *const s, const float *kernel, const float *input, int n, float scale, float bias)
 
AVPixelFormat
Pixel format.
 
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
 
static int get_frame(AVFilterContext *ctx, int is_second)
 
static void subtract_mean_predictor(PredictorCoefficients *model)
 
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
 
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
 
#define AVERROR_EOF
End of file.
 
#define FILTER_PIXFMTS_ARRAY(array)
 
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
 
static const float * softmax_q2_filter(int nn, const PredictorCoefficients *const model)
 
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
 
#define AV_PIX_FMT_YUVA422P9
 
This structure describes decoded (raw) audio or video data.
 
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
 
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
 
#define AV_PIX_FMT_YUVA420P16
 
#define AV_PIX_FMT_YUVA420P10
 
int top_field_first
If the content is interlaced, is top field displayed first.
 
static void read_bytes(const uint8_t *src, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
 
static uint8_t half(int a, int b)
 
#define AV_PIX_FMT_YUV420P10
 
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
 
static void process_old(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const m_data)
 
void(* prescreen[2])(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const coeffs)
 
AVFILTER_DEFINE_CLASS(nnedi)
 
static av_cold void uninit(AVFilterContext *ctx)
 
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
 
const char * name
Filter name.
 
A link between two filters.
 
#define AV_PIX_FMT_YUVA422P10
 
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
 
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
 
static float elliott(float x)
 
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
 
#define AV_PIX_FMT_YUVA420P9
 
static SDL_Window * window
 
static const float * elliott_q2_filter(int nn, const PredictorCoefficients *const model)
 
#define AV_PIX_FMT_GBRP14
 
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
 
#define AV_PIX_FMT_GBRP10
 
#define AV_PIX_FMT_YUVA444P16
 
#define AV_PIX_FMT_YUV422P9
 
const AVFilter ff_vf_nnedi
 
static double val(void *priv, double ch)
 
static const uint8_t NNEDI_XDIM[]
 
static av_always_inline float scale(float x, float s)
 
#define AV_PIX_FMT_GRAY16
 
static __device__ float fabsf(float a)
 
A filter pad used for either input or output.
 
static void write_words(const float *src, uint8_t *dstp, int src_stride, int dst_stride, int width, int height, int depth, float scale)
 
static float softmax_exp(float x)
 
#define AV_PIX_FMT_YUV444P10
 
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
 
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
 
static const AVFilterPad inputs[]
 
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
 
#define AV_PIX_FMT_YUV422P16
 
static float * allocate(float **ptr, int size)
 
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
 
#define AV_PIX_FMT_GBRAP10
 
static const uint16_t NNEDI_NNS[]
 
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
 
#define AV_PIX_FMT_GBRAP12
 
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
 
#define AV_PIX_FMT_YUV444P16
 
#define AV_CEIL_RSHIFT(a, b)
 
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
 
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
 
float kernel_l0[4][16 *4]
 
static int request_frame(AVFilterLink *link)
 
#define AV_PIX_FMT_YUVA444P12
 
#define AV_PIX_FMT_YUV420P9
 
#define AV_PIX_FMT_YUV420P16
 
#define AV_PIX_FMT_GRAY14
 
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
 
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
 
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
 
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
 
#define FILTER_INPUTS(array)
 
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
 
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
 
#define AV_PIX_FMT_GRAY10
 
#define AV_PIX_FMT_GBRP16
 
Describe the class of an AVClass context structure.
 
static const float * softmax_q1_filter(int nn, const PredictorCoefficients *const model)
 
static void interpolation(const void *src, ptrdiff_t src_stride, void *dst, const uint8_t *prescreen, int n)
 
#define LOCAL_ALIGNED_32(t, v,...)
 
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
 
PredictorCoefficients coeffs[2][5][7]
 
static void subtract_mean_old(PrescreenerCoefficients *coeffs, float half)
 
Rational number (pair of numerator and denominator).
 
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
 
static void transform_elliott(float *input, int size)
 
#define AV_PIX_FMT_YUV440P10
 
void(* write)(const float *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, int depth, float scale)
 
static const uint8_t NNEDI_YDIM[]
 
static __device__ float sqrtf(float a)
 
static const AVOption nnedi_options[]
 
#define AV_PIX_FMT_YUV422P10
 
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
 
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
 
#define AV_PIX_FMT_YUV422P12
 
static void wae5(const float *softmax, const float *el, int n, float mstd[4])
 
#define AV_PIX_FMT_YUV444P12
 
static const float * elliott_q1_filter(int nn, const PredictorCoefficients *const model)
 
AVFilterContext * src
source filter
 
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
 
static void predictor(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, void *dst, const uint8_t *prescreen, int N, const PredictorCoefficients *const model, int use_q2)
 
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
 
#define AV_PIX_FMT_YUVA444P10
 
static void transform_softmax_exp(float *input, int size)
 
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
 
static const AVFilterPad outputs[]
 
static av_cold int init(AVFilterContext *ctx)
 
#define DECLARE_ALIGNED(n, t, v)
 
int interlaced_frame
The content of the picture is interlaced.
 
static int filter_offset(int nn, const PredictorCoefficients *const model)
 
#define i(width, name, range_min, range_max)
 
int w
agreed upon image width
 
#define AV_PIX_FMT_GBRP12
 
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
 
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
 
static enum AVPixelFormat pix_fmts[]
 
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
 
const char * name
Pad name.
 
FILE * avpriv_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
 
static void write_bytes(const float *src, uint8_t *dst, int src_stride, int dst_stride, int width, int height, int depth, float scale)
 
void * av_calloc(size_t nmemb, size_t size)
 
void(* read)(const uint8_t *src, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
 
#define AV_PIX_FMT_YUV444P9
 
static void subtract_mean_new(PrescreenerCoefficients *coeffs, float half)
 
#define AV_PIX_FMT_YUVA444P9
 
static void gather_input(const float *src, ptrdiff_t src_stride, float *buf, float mstd[4], const PredictorCoefficients *const model)
 
#define AV_PIX_FMT_YUV420P12
 
#define AV_PIX_FMT_YUV422P14
 
static int allocate_model(PredictorCoefficients *coeffs, int xdim, int ydim, int nns)
 
int h
agreed upon image height
 
static const size_t NNEDI_WEIGHTS_SIZE
 
#define AV_PIX_FMT_YUVA422P12
 
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 
static void process_new(AVFilterContext *ctx, const void *src, ptrdiff_t src_stride, uint8_t *prescreen, int N, const PrescreenerCoefficients *const m_data)
 
static float mean(const float *input, int size)
 
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
 
static void read_words(const uint8_t *srcp, float *dst, int src_stride, int dst_stride, int width, int height, float scale)
 
static void transform(float *input, int size, float mean, float half)
 
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
 
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
 
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
 
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
 
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
 
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
 
#define FILTER_OUTPUTS(array)
 
static int read_weights(AVFilterContext *ctx, const float *bdata)
 
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
 
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
 
static int config_input(AVFilterLink *inlink)
 
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
 
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
 
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
 
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
 
static void copy_weights(float *dst, int n, const float **data)
 
#define AV_PIX_FMT_YUV440P12
 
#define AV_PIX_FMT_YUV444P14
 
#define AV_PIX_FMT_GRAY12
 
static av_always_inline int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
 
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
 
#define AV_PIX_FMT_YUV420P14
 
PrescreenerCoefficients prescreener[4]
 
static int config_output(AVFilterLink *outlink)