Go to the documentation of this file.
   99     void (*
wae5)(
const float *, 
const int, 
float *);
 
  104 #define OFFSET(x) offsetof(NNEDIContext, x) 
  105 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM 
  111         {
"interlaced", 
"only deinterlace frames marked as interlaced", 0, 
AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, 
FLAGS, 
"deint" },
 
  113         {
"af", 
"use frame flags, both fields",  0, 
AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0, 
FLAGS, 
"field" },
 
  114         {
"a",  
"use frame flags, single field", 0, 
AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, 
FLAGS, 
"field" },
 
  117         {
"tf", 
"use both fields, top first",    0, 
AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, 
FLAGS, 
"field" },
 
  118         {
"bf", 
"use both fields, bottom first", 0, 
AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, 
FLAGS, 
"field" },
 
  120     {
"nsize",  
"set size of local neighborhood around each pixel, used by the predictor neural network", 
OFFSET(
nsize), 
AV_OPT_TYPE_INT, {.i64=6}, 0, 6, 
FLAGS, 
"nsize" },
 
  138         {
"a",  
"weights trained to minimize absolute error", 0, 
AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, 
FLAGS, 
"etype" },
 
  139         {
"s",  
"weights trained to minimize squared error",  0, 
AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, 
FLAGS, 
"etype" },
 
  162     s->planeheight[0] = 
s->planeheight[3] = 
inlink->h;
 
  174     outlink->
w             = 
ctx->inputs[0]->w;
 
  175     outlink->
h             = 
ctx->inputs[0]->h;
 
  177     if (
s->field > 1 || 
s->field == -2)
 
  206     const int off = 1 - 
fn;
 
  209     for (plane = 0; plane < 
s->nb_planes; plane++) {
 
  213         const int src_stride = 
src->linesize[plane];
 
  216         const int src_height = 
s->planeheight[plane];
 
  219         const int src_width = 
s->linesize[plane];
 
  224         if (!(
s->process_plane & (1 << plane)))
 
  228         for (y = off; y < src_height; y += 2)
 
  229             memcpy(dstp + 32 + (6 + y) * dst_stride,
 
  230                    srcp + y * src_stride,
 
  234         dstp += (6 + off) * dst_stride;
 
  235         for (y = 6 + off; y < dst_height - 6; y += 2) {
 
  238             for (x = 0; x < 32; x++)
 
  239                 dstp[x] = dstp[64 - x];
 
  241             for (x = dst_width - 32; x < dst_width; x++, 
c += 2)
 
  242                 dstp[x] = dstp[x - 
c];
 
  244             dstp += dst_stride * 2;
 
  248         for (y = off; y < 6; y += 2)
 
  249             memcpy(dstp + y * dst_stride,
 
  250                    dstp + (12 + 2 * off - y) * dst_stride,
 
  253         for (y = dst_height - 6 + off; y < dst_height; y += 2, 
c += 4)
 
  254             memcpy(dstp + y * dst_stride,
 
  255                    dstp + (y - 
c) * dst_stride,
 
  264     for (
i = 0; 
i < n; 
i++)
 
  272     for (
i = 0; 
i < n; 
i++) {
 
  275         sum = 
s->fdsp->scalarproduct_float(
data, &weights[
i * 
len], 
len);
 
  277         vals[
i] = sum * scale[0] + weights[n * 
len + 
i];
 
  281 static void dot_prods(
NNEDIContext *
s, 
const float *dataf, 
const float *weightsf, 
float *vals, 
const int n, 
const int len, 
const float *scale)
 
  283     const int16_t *
data = (int16_t *)dataf;
 
  284     const int16_t *weights = (int16_t *)weightsf;
 
  285     const float *wf = (
float *)&weights[n * 
len];
 
  288     for (
i = 0; 
i < n; 
i++) {
 
  289         int sum = 0, off = ((
i >> 2) << 3) + (
i & 3);
 
  290         for (j = 0; j < 
len; j++)
 
  291             sum += 
data[j] * weights[
i * 
len + j];
 
  293         vals[
i] = sum * wf[off] * scale[0] + wf[off + 4];
 
  299     float t, 
temp[12], scale = 1.0f;
 
  316     const float *wf = weightsf + 2 * 48;
 
  317     float t, 
temp[12], scale = 1.0f;
 
  337     for (y = 0; y < 4; y++)
 
  338         for (x = 0; x < 12; x++)
 
  339             p[y * 12 + x] = t[y * pitch * 2 + x];
 
  344     int16_t *p = (int16_t *)pf;
 
  347     for (y = 0; y < 4; y++)
 
  348         for (x = 0; x < 12; x++)
 
  349             p[y * 12 + x] = t[y * pitch * 2 + x];
 
  359     for (x = 0; x < 
width; x++) {
 
  361             int tmp = 19 * (src3p[x + src_pitch * 2] + src3p[x + src_pitch * 4]) - 3 * (src3p[x] + src3p[x + src_pitch * 6]);
 
  375     int16_t *ps = (int16_t *)p;
 
  378     for (y = 0; y < 4; y++)
 
  379         for (x = 0; x < 16; x++)
 
  380             ps[y * 16 + x] = t[y * pitch * 2 + x];
 
  385     int16_t *
data = (int16_t *)datai;
 
  386     int16_t *ws = (int16_t *)weights;
 
  387     float *wf = (
float *)&ws[4 * 64];
 
  391     for (
i = 0; 
i < 4; 
i++) {
 
  395         for (j = 0; j < 64; j++)
 
  396             sum += 
data[j] * ws[(
i << 3) + ((j >> 3) << 5) + (j & 7)];
 
  397         t = sum * wf[
i] + wf[4 + 
i];
 
  398         vals[
i] = t / (1.0f + 
FFABS(t));
 
  401     for (
i = 0; 
i < 4; 
i++) {
 
  404         for (j = 0; j < 4; j++)
 
  405             sum += vals[j] * wf[8 + 
i + (j << 2)];
 
  406         vals[4 + 
i] = sum + wf[8 + 16 + 
i];
 
  410     for (
i = 0; 
i < 4; 
i++) {
 
  411         if (vals[4 + 
i] > 0.0
f)
 
  412             mask |= (0x1 << (
i << 3));
 
  415     ((
int *)d)[0] = 
mask;
 
  427     for (plane = 0; plane < 
s->nb_planes; plane++) {
 
  440         if (!(
s->process_plane & (1 << plane)))
 
  444             memcpy(dstp + y * dst_stride,
 
  445                    srcp + 32 + (6 + y) * src_stride,
 
  452         srcp += ystart * src_stride;
 
  453         dstp += (ystart - 6) * dst_stride - 32;
 
  454         src3p = srcp - src_stride * 3;
 
  458             for (y = ystart; y < ystop; y += 2) {
 
  459                 for (x = 32; x < 
width - 32; x++) {
 
  460                     s->readpixels((
const uint8_t *)(src3p + x - 5), src_stride, 
input);
 
  463                 lcount[y] += 
s->process_line0(tempu + 32, 
width - 64, (
uint8_t *)(dstp + 32), (
const uint8_t *)(src3p + 32), src_stride, 
s->max_value, plane);
 
  464                 src3p += src_stride * 2;
 
  465                 dstp += dst_stride * 2;
 
  467         } 
else if (
s->pscrn > 1) { 
 
  468             for (y = ystart; y < ystop; y += 2) {
 
  469                 for (x = 32; x < 
width - 32; x += 4) {
 
  470                     s->readpixels((
const uint8_t *)(src3p + x - 6), src_stride, 
input);
 
  473                 lcount[y] += 
s->process_line0(tempu + 32, 
width - 64, (
uint8_t *)(dstp + 32), (
const uint8_t *)(src3p + 32), src_stride, 
s->max_value, plane);
 
  474                 src3p += src_stride * 2;
 
  475                 dstp += dst_stride * 2;
 
  478             for (y = ystart; y < ystop; y += 2) {
 
  480                 lcount[y] += 
width - 64;
 
  481                 dstp += dst_stride * 2;
 
  495     int64_t sum = 0, sumsq = 0;
 
  498     for (y = 0; y < 
ydia; y++) {
 
  501         for (x = 0; x < 
xdia; x++) {
 
  503             sumsq += (uint32_t)srcpT[x] * (uint32_t)srcpT[x];
 
  509     mstd[0] = sum * scale;
 
  510     tmp = (double)sumsq * scale - (
double)mstd[0] * mstd[0];
 
  512     if (
tmp <= FLT_EPSILON)
 
  513         mstd[1] = mstd[2] = 0.0f;
 
  516         mstd[2] = 1.0f / mstd[1];
 
  522     int16_t *
input = (int16_t *)inputf;
 
  524     int sum = 0, sumsq = 0;
 
  527     for (y = 0; y < 
ydia; y++) {
 
  529         for (x = 0; x < 
xdia; x++) {
 
  531             sumsq += srcpT[x] * srcpT[x];
 
  536     scale = 1.0f / (float)(
xdia * 
ydia);
 
  537     mstd[0] = sum * scale;
 
  538     mstd[1] = sumsq * scale - mstd[0] * mstd[0];
 
  540     if (mstd[1] <= FLT_EPSILON)
 
  541         mstd[1] = mstd[2] = 0.0f;
 
  543         mstd[1] = sqrt(mstd[1]);
 
  544         mstd[2] = 1.0f / mstd[1];
 
  556     for (
i = 0; 
i < n; 
i++)
 
  564     float vsum = 0.0f, wsum = 0.0f;
 
  567     for (
i = 0; 
i < n; 
i++) {
 
  568         vsum += 
w[
i] * (
w[n + 
i] / (1.0f + 
FFABS(
w[n + 
i])));
 
  572         mstd[3] += ((5.0f * vsum) / wsum) * mstd[1] + mstd[0];
 
  583     const int qual = 
s->qual;
 
  584     const int asize = 
s->asize;
 
  585     const int nns = 
s->nns;
 
  586     const int xdia = 
s->xdia;
 
  587     const int xdiad2m1 = (
xdia / 2) - 1;
 
  588     const int ydia = 
s->ydia;
 
  589     const float scale = 1.0f / (float)
qual;
 
  592     for (plane = 0; plane < 
s->nb_planes; plane++) {
 
  603         const int ystop = 
height - 12;
 
  606         if (!(
s->process_plane & (1 << plane)))
 
  609         srcp += (ystart + 6) * src_stride;
 
  610         dstp += ystart * dst_stride - 32;
 
  611         srcpp = srcp - (
ydia - 1) * src_stride - xdiad2m1;
 
  613         for (y = ystart; y < ystop; y += 2) {
 
  614             for (x = 32; x < 
width - 32; x++) {
 
  627                 dstp[x] = 
FFMIN(
FFMAX((
int)(mstd[3] * scale + 0.5
f), 0), 
s->max_value);
 
  629             srcpp += src_stride * 2;
 
  630             dstp += dst_stride * 2;
 
  640     if (
f - floor(
f) >= 0.5)
 
  641         return FFMIN((
int)ceil(
f), 32767);
 
  642     return FFMAX((
int)floor(
f), -32768);
 
  655         if (
s->fapprox & 1) { 
 
  671     if (
s->fapprox & 2) { 
 
  682 static int modnpf(
const int m, 
const int n)
 
  686     return m + n - (m % n);
 
  695     int effective_field = 
s->field;
 
  700     if (effective_field > 1)
 
  701         effective_field -= 2;
 
  702     else if (effective_field < 0)
 
  703         effective_field += 2;
 
  705     if (
s->field < 0 && 
src->interlaced_frame && 
src->top_field_first == 0)
 
  707     else if (
s->field < 0 && 
src->interlaced_frame && 
src->top_field_first == 1)
 
  710         effective_field = !effective_field;
 
  712     if (
s->field > 1 || 
s->field == -2) {
 
  714             field_n = (effective_field == 0);
 
  716             field_n = (effective_field == 1);
 
  719         field_n = effective_field;
 
  726     s->dst->interlaced_frame = 0;
 
  730     for (plane = 0; plane < 
s->nb_planes; plane++) {
 
  731         int dst_height = 
s->planeheight[plane];
 
  732         int dst_width = 
s->linesize[plane];
 
  734         const int min_alignment = 16;
 
  735         const int min_pad = 10;
 
  737         if (!(
s->process_plane & (1 << plane))) {
 
  739                                 src->data[plane], 
src->linesize[plane],
 
  741                                 s->planeheight[plane]);
 
  802          s->field == -2) && !
s->second) {
 
  804     } 
else if (
s->field > 1 ||
 
  829         s->cur_pts = 
s->second->pts;
 
  832         if ((
s->deint && 
src->interlaced_frame &&
 
  833              !
ctx->is_disabled) ||
 
  834             (!
s->deint && !
ctx->is_disabled)) {
 
  839     if ((
s->deint && !
src->interlaced_frame) || 
ctx->is_disabled) {
 
  847         if (
s->field > 1 || 
s->field == -2) {
 
  849             if ((
s->deint && 
src->interlaced_frame) ||
 
  870         s->dst->pts = 
src->pts * 2;
 
  871     if (
s->field <= 1 && 
s->field > -2) {
 
  896         next->
pts = 
s->second->pts * 2 - 
s->cur_pts;
 
  900     } 
else if (
ret < 0) {
 
  911     int64_t expected_size = 13574928;
 
  912     int64_t weights_size;
 
  915     const int xdia_table[
NUM_NSIZE] = { 8, 16, 32, 48, 8, 16, 32 };
 
  916     const int ydia_table[
NUM_NSIZE] = { 6, 6, 6, 6, 4, 4, 4 };
 
  917     const int nns_table[
NUM_NNS] = { 16, 32, 64, 128, 256 };
 
  918     const int dims0 = 49 * 4 + 5 * 4 + 9 * 4;
 
  919     const int dims0new = 4 * 65 + 4 * 5;
 
  920     const int dims1 = nns_table[
s->nnsparam] * 2 * (xdia_table[
s->nsize] * ydia_table[
s->nsize] + 1);
 
  923     int ret = 0, 
i, j, k;
 
  939     if (weights_size == -1) {
 
  943     } 
else if (weights_size != expected_size) {
 
  955     bdata = (
float *)
av_malloc(expected_size);
 
  961     bytes_read = fread(bdata, 1, expected_size, 
weights_file);
 
  963     if (bytes_read != (
size_t)expected_size) {
 
  972     for (j = 0; j < 
NUM_NNS; j++) {
 
  974             if (
i == 
s->nsize && j == 
s->nnsparam)
 
  975                 dims1offset = dims1tsize;
 
  976             dims1tsize += nns_table[j] * 2 * (xdia_table[
i] * ydia_table[
i] + 1) * 2;
 
  986     for (
i = 0; 
i < 2; 
i++) {
 
  988         if (!
s->weights1[
i]) {
 
  999         double mean[4] = { 0.0, 0.0, 0.0, 0.0 };
 
 1000         int *offt = 
av_calloc(4 * 64, 
sizeof(
int));
 
 1007         for (j = 0; j < 4; j++)
 
 1008             for (k = 0; k < 64; k++)
 
 1009                 offt[j * 64 + k] = ((k >> 3) << 5) + ((j & 3) << 3) + (k & 7);
 
 1011         bdw = bdata + dims0 + dims0new * (
s->pscrn - 2);
 
 1012         ws = (int16_t *)
s->weights0;
 
 1013         wf = (
float *)&ws[4 * 64];
 
 1015         for (j = 0; j < 4; j++) {
 
 1017             for (k = 0; k < 64; k++)
 
 1018                 cmean += bdw[offt[j * 64 + k]];
 
 1019             mean[j] = cmean / 64.0;
 
 1023         for (j = 0; j < 4; j++) {
 
 1024             double scale, mval = 0.0;
 
 1026             for (k = 0; k < 64; k++)
 
 1027                 mval = 
FFMAX(mval, 
FFABS((bdw[offt[j * 64 + k]] - mean[j]) / 127.5));
 
 1028             scale = 32767.0 / mval;
 
 1029             for (k = 0; k < 64; k++)
 
 1030                 ws[offt[j * 64 + k]] = 
roundds(((bdw[offt[j * 64 + k]] - mean[j]) / 127.5) * scale);
 
 1031             wf[j] = (float)(mval / 32767.0);
 
 1033         memcpy(wf + 4, bdw + 4 * 64, (dims0new - 4 * 64) * 
sizeof(
float));
 
 1036         double mean[4] = { 0.0, 0.0, 0.0, 0.0 };
 
 1038         for (j = 0; j < 4; j++) {
 
 1040             for (k = 0; k < 48; k++)
 
 1041                 cmean += bdata[j * 48 + k];
 
 1042             mean[j] = cmean / 48.0;
 
 1044         if (
s->fapprox & 1) {
 
 1045             int16_t *ws = (int16_t *)
s->weights0;
 
 1046             float *wf = (
float *)&ws[4 * 48];
 
 1049             for (j = 0; j < 4; j++) {
 
 1050                 double scale, mval = 0.0;
 
 1051                 for (k = 0; k < 48; k++)
 
 1052                     mval = 
FFMAX(mval, 
FFABS((bdata[j * 48 + k] - mean[j]) / 127.5));
 
 1053                 scale = 32767.0 / mval;
 
 1054                 for (k = 0; k < 48; k++)
 
 1055                     ws[j * 48 + k] = 
roundds(((bdata[j * 48 + k] - mean[j]) / 127.5) * scale);
 
 1056                 wf[j] = (float)(mval / 32767.0);
 
 1058             memcpy(wf + 4, bdata + 4 * 48, (dims0 - 4 * 48) * 
sizeof(
float));
 
 1060             double half = (1 << 8) - 1;
 
 1066             for (j = 0; j < 4; j++)
 
 1067                 for (k = 0; k < 48; k++)
 
 1068                     s->weights0[j * 48 + k] = (
float)((bdata[j * 48 + k] - mean[j]) / half);
 
 1069             memcpy(
s->weights0 + 4 * 48, bdata + 4 * 48, (dims0 - 4 * 48) * 
sizeof(
float));
 
 1074     for (
i = 0; 
i < 2; 
i++) {
 
 1075         const float *bdataT = bdata + dims0 + dims0new * 3 + dims1tsize * 
s->etype + dims1offset + 
i * dims1;
 
 1076         const int nnst = nns_table[
s->nnsparam];
 
 1077         const int asize = xdia_table[
s->nsize] * ydia_table[
s->nsize];
 
 1078         const int boff = nnst * 2 * 
asize;
 
 1079         double *mean = (
double *)
av_calloc(
asize + 1 + nnst * 2, 
sizeof(
double));
 
 1087         for (j = 0; j < nnst * 2; j++) {
 
 1089             for (k = 0; k < 
asize; k++)
 
 1090                 cmean += bdataT[j * 
asize + k];
 
 1094         for (j = 0; j < nnst; j++) {
 
 1095             for (k = 0; k < 
asize; k++)
 
 1096                 mean[k] += bdataT[j * 
asize + k] - mean[
asize + 1 + j];
 
 1097             mean[
asize] += bdataT[boff + j];
 
 1099         for (j = 0; j < 
asize + 1; j++)
 
 1100             mean[j] /= (
double)(nnst);
 
 1102         if (
s->fapprox & 2) { 
 
 1103             int16_t *ws = (int16_t *)
s->weights1[
i];
 
 1104             float *wf = (
float *)&ws[nnst * 2 * 
asize];
 
 1107             for (j = 0; j < nnst; j++) { 
 
 1108                 double scale, mval = 0.0;
 
 1109                 for (k = 0; k < 
asize; k++)
 
 1111                 scale = 32767.0 / mval;
 
 1112                 for (k = 0; k < 
asize; k++)
 
 1114                 wf[(j >> 2) * 8 + (j & 3)] = (float)(mval / 32767.0);
 
 1115                 wf[(j >> 2) * 8 + (j & 3) + 4] = (float)(bdataT[boff + j] - mean[
asize]);
 
 1117             for (j = nnst; j < nnst * 2; j++) { 
 
 1118                 double scale, mval = 0.0;
 
 1119                 for (k = 0; k < 
asize; k++)
 
 1121                 scale = 32767.0 / mval;
 
 1122                 for (k = 0; k < 
asize; k++)
 
 1124                 wf[(j >> 2) * 8 + (j & 3)] = (float)(mval / 32767.0);
 
 1125                 wf[(j >> 2) * 8 + (j & 3) + 4] = bdataT[boff + j];
 
 1130             for (j = 0; j < nnst * 2; j++) {
 
 1131                 for (k = 0; k < 
asize; k++) {
 
 1132                     const double q = j < nnst ? mean[k] : 0.0;
 
 1133                     s->weights1[
i][j * 
asize + k] = (float)(bdataT[j * 
asize + k] - mean[
asize + 1 + j] - q);
 
 1135                 s->weights1[
i][boff + j] = (float)(bdataT[boff + j] - (j < nnst ? mean[
asize] : 0.0));
 
 1141     s->nns = nns_table[
s->nnsparam];
 
 1142     s->xdia = xdia_table[
s->nsize];
 
 1143     s->ydia = ydia_table[
s->nsize];
 
 1144     s->asize = xdia_table[
s->nsize] * ydia_table[
s->nsize];
 
 1146     s->max_value = 65535 >> 8;
 
 1166     for (
i = 0; 
i < 2; 
i++)
 
 1169     for (
i = 0; 
i < 
s->nb_planes; 
i++) {
 
 1202     .description   = 
NULL_IF_CONFIG_SMALL(
"Apply neural network edge directed interpolation intra-only deinterlacer."),
 
 1204     .priv_class    = &nnedi_class,
 
  
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
static const float exp_lo
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int get_frame(AVFilterContext *ctx, int is_second)
static void e2_m16(float *s, const int n)
static void copy_pad(const AVFrame *src, FrameData *frame_data, NNEDIContext *s, int fn)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static void compute_network0_i16(NNEDIContext *s, const float *inputf, const float *weightsf, uint8_t *d)
This structure describes decoded (raw) audio or video data.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
AVFILTER_DEFINE_CLASS(nnedi)
void(* readpixels)(const uint8_t *, const int, float *)
static av_cold void uninit(AVFilterContext *ctx)
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
static void dot_prods(NNEDIContext *s, const float *dataf, const float *weightsf, float *vals, const int n, const int len, const float *scale)
const char * name
Filter name.
A link between two filters.
static void elliott(float *data, const int n)
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
void(* evalfunc_0)(struct NNEDIContext *, FrameData *)
static void select_functions(NNEDIContext *s)
A filter pad used for either input or output.
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
static const AVFilterPad inputs[]
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const float exp_hi
static const uint16_t mask[17]
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
#define AV_CEIL_RSHIFT(a, b)
static void compute_network0(NNEDIContext *s, const float *input, const float *weights, uint8_t *d)
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
static int modnpf(const int m, const int n)
static enum AVPixelFormat pix_fmts[]
static int request_frame(AVFilterLink *link)
static void extract_m8_i16(const uint8_t *srcp, const int stride, const int xdia, const int ydia, float *mstd, float *inputf)
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Rational number (pair of numerator and denominator).
void(* compute_network0)(struct NNEDIContext *s, const float *, const float *, uint8_t *)
static int query_formats(AVFilterContext *ctx)
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
static const AVOption nnedi_options[]
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
static void weighted_avg_elliott_mul5_m16(const float *w, const int n, float *mstd)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static void evalfunc_0(NNEDIContext *s, FrameData *frame_data)
void(* expfunc)(float *, const int)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static int filter_frame(AVFilterLink *inlink, AVFrame *src)
#define AV_NOPTS_VALUE
Undefined timestamp value.
AVFilterContext * src
source filter
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
static const AVFilterPad outputs[]
int32_t(* process_line0)(const uint8_t *, int, uint8_t *, const uint8_t *, const int, const int, const int)
static av_cold int init(AVFilterContext *ctx)
static void compute_network0new(NNEDIContext *s, const float *datai, const float *weights, uint8_t *d)
static void byte2word48(const uint8_t *t, const int pitch, float *pf)
#define i(width, name, range_min, range_max)
static void pixel2float48(const uint8_t *t8, const int pitch, float *p)
int w
agreed upon image width
#define av_malloc_array(a, b)
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
const char * name
Pad name.
static int32_t process_line0(const uint8_t *tempu, int width, uint8_t *dstp8, const uint8_t *src3p8, const int src_pitch, const int max_value, const int chroma)
void(* copy_pad)(const AVFrame *, FrameData *, struct NNEDIContext *, int)
static void dot_prod(NNEDIContext *s, const float *data, const float *weights, float *vals, const int n, const int len, const float *scale)
void(* extract)(const uint8_t *, const int, const int, const int, float *, float *)
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
void(* wae5)(const float *, const int, float *)
int h
agreed upon image height
static void extract_m8(const uint8_t *srcp8, const int stride, const int xdia, const int ydia, float *mstd, float *input)
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
void(* dot_prod)(struct NNEDIContext *, const float *, const float *, float *, const int, const int, const float *)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static void byte2word64(const uint8_t *t, const int pitch, float *p)
av_cold AVFloatDSPContext * avpriv_float_dsp_alloc(int bit_exact)
Allocate a float DSP context.
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
void(* evalfunc_1)(struct NNEDIContext *, FrameData *)
static int config_input(AVFilterLink *inlink)
static int roundds(const double f)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
const float min_weight_sum
#define flags(name, subs,...)
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static void evalfunc_1(NNEDIContext *s, FrameData *frame_data)
static int config_output(AVFilterLink *outlink)