83 #define OFFSET(x) offsetof(FrameRateContext, x)
84 #define V AV_OPT_FLAG_VIDEO_PARAM
85 #define F AV_OPT_FLAG_FILTERING_PARAM
86 #define FRAMERATE_FLAG_SCD 01
91 {
"interp_start",
"point to start linear interpolation",
OFFSET(interp_start),
AV_OPT_TYPE_INT, {.i64=15}, 0, 255,
V|
F },
92 {
"interp_end",
"point to end linear interpolation",
OFFSET(interp_end),
AV_OPT_TYPE_INT, {.i64=240}, 0, 255,
V|
F },
109 ff_dlog(ctx,
"next_source()\n");
112 ff_dlog(ctx,
"next_source() unlink %d\n", s->
last);
115 for (i = s->
last; i > s->
frst; i--) {
116 ff_dlog(ctx,
"next_source() copy %d to %d\n", i - 1, i);
119 ff_dlog(ctx,
"next_source() make %d null\n", s->
frst);
124 const uint16_t *src2, ptrdiff_t stride2)
129 for (y = 0; y < 8; y++) {
130 for (x = 0; x < 8; x++)
131 sum +=
FFABS(src1[x] - src2[x]);
143 ff_dlog(ctx,
"get_scene_score16()\n");
151 const uint16_t *p1 = (
const uint16_t *)crnt->
data[0];
152 const uint16_t *p2 = (
const uint16_t *)next->
data[0];
153 const int p1_linesize = crnt->
linesize[0] / 2;
154 const int p2_linesize = next->
linesize[0] / 2;
156 ff_dlog(ctx,
"get_scene_score16() process\n");
158 for (sad = y = 0; y < crnt->
height; y += 8) {
159 for (x = 0; x < p1_linesize; x += 8) {
162 p2 + y * p2_linesize + x,
168 ret = av_clipf(
FFMIN(mafd, diff), 0, 100.0);
171 ff_dlog(ctx,
"get_scene_score16() result is:%f\n", ret);
180 ff_dlog(ctx,
"get_scene_score()\n");
190 const int p1_linesize = crnt->
linesize[0];
191 const int p2_linesize = next->
linesize[0];
193 ff_dlog(ctx,
"get_scene_score() process\n");
195 for (sad = y = 0; y < crnt->
height; y += 8) {
196 for (x = 0; x < p1_linesize; x += 8) {
197 sad += s->
sad(p1 + y * p1_linesize + x,
199 p2 + y * p2_linesize + x,
206 ret = av_clipf(
FFMIN(mafd, diff), 0, 100.0);
209 ff_dlog(ctx,
"get_scene_score() result is:%f\n", ret);
218 double interpolate_scene_score = 0;
222 ff_dlog(ctx,
"blend_frames16() interpolate scene score:%f\n", interpolate_scene_score);
225 if (interpolate_scene_score < s->scene_score && copy_src2) {
226 uint16_t src2_factor = fabsf(interpolate) * (1 << (s->
bitdepth - 8));
227 uint16_t src1_factor = s->
max - src2_factor;
228 const int half = s->
max / 2;
229 const int uv = (s->
max + 1) * half;
240 ff_dlog(ctx,
"blend_frames16() INTERPOLATE to create work frame\n");
241 for (plane = 0; plane < 4 && copy_src1->
data[
plane] && copy_src2->
data[
plane]; plane++) {
243 const uint16_t *cpy_src1_data = (
const uint16_t *)copy_src1->
data[plane];
244 int cpy_src1_line_size = copy_src1->
linesize[plane] / 2;
245 const uint16_t *cpy_src2_data = (
const uint16_t *)copy_src2->
data[
plane];
247 int cpy_src_h = (plane > 0 && plane < 3) ? (copy_src1->
height >> s->
vsub) : (copy_src1->
height);
248 uint16_t *cpy_dst_data = (uint16_t *)s->
work->
data[plane];
251 if (plane <1 || plane >2) {
253 for (line = 0; line < cpy_src_h; line++) {
254 for (pixel = 0; pixel < cpy_line_width; pixel++)
255 cpy_dst_data[pixel] = ((cpy_src1_data[pixel] * src1_factor) + (cpy_src2_data[
pixel] * src2_factor) + half) >>
shift;
256 cpy_src1_data += cpy_src1_line_size;
257 cpy_src2_data += cpy_src2_line_size;
258 cpy_dst_data += cpy_dst_line_size;
262 for (line = 0; line < cpy_src_h; line++) {
263 for (pixel = 0; pixel < cpy_line_width; pixel++) {
264 cpy_dst_data[
pixel] = (((cpy_src1_data[
pixel] - half) * src1_factor) + ((cpy_src2_data[
pixel] - half) * src2_factor) + uv) >> shift;
266 cpy_src1_data += cpy_src1_line_size;
267 cpy_src2_data += cpy_src2_line_size;
268 cpy_dst_data += cpy_dst_line_size;
282 double interpolate_scene_score = 0;
286 ff_dlog(ctx,
"blend_frames8() interpolate scene score:%f\n", interpolate_scene_score);
289 if (interpolate_scene_score < s->scene_score && copy_src2) {
290 uint16_t src2_factor = fabsf(interpolate);
291 uint16_t src1_factor = 256 - src2_factor;
301 ff_dlog(ctx,
"blend_frames8() INTERPOLATE to create work frame\n");
302 for (plane = 0; plane < 4 && copy_src1->
data[
plane] && copy_src2->
data[
plane]; plane++) {
308 int cpy_src_h = (plane > 0 && plane < 3) ? (copy_src1->
height >> s->
vsub) : (copy_src1->
height);
311 if (plane <1 || plane >2) {
313 for (line = 0; line < cpy_src_h; line++) {
314 for (pixel = 0; pixel < cpy_line_width; pixel++) {
318 cpy_dst_data[
pixel] = ((cpy_src1_data[
pixel] * src1_factor) + (cpy_src2_data[pixel] * src2_factor) + 128) >> 8;
320 cpy_src1_data += cpy_src1_line_size;
321 cpy_src2_data += cpy_src2_line_size;
322 cpy_dst_data += cpy_dst_line_size;
326 for (line = 0; line < cpy_src_h; line++) {
327 for (pixel = 0; pixel < cpy_line_width; pixel++) {
331 cpy_dst_data[
pixel] = (((cpy_src1_data[
pixel] - 128) * src1_factor) + ((cpy_src2_data[
pixel] - 128) * src2_factor) + 32896) >> 8;
333 cpy_src1_data += cpy_src1_line_size;
334 cpy_src2_data += cpy_src2_line_size;
335 cpy_dst_data += cpy_dst_line_size;
347 int64_t work_next_pts;
351 ff_dlog(ctx,
"process_work_frame()\n");
361 ff_dlog(ctx,
"process_work_frame() no current frame cached: move on to next frame, do not output a frame\n");
368 ff_dlog(ctx,
"process_work_frame() work crnt pts:%"PRId64
"\n", s->
pts);
369 ff_dlog(ctx,
"process_work_frame() work next pts:%"PRId64
"\n", work_next_pts);
371 ff_dlog(ctx,
"process_work_frame() srce prev pts:%"PRId64
" at dest time base:%u/%u\n",
374 ff_dlog(ctx,
"process_work_frame() srce crnt pts:%"PRId64
" at dest time base:%u/%u\n",
377 ff_dlog(ctx,
"process_work_frame() srce next pts:%"PRId64
" at dest time base:%u/%u\n",
384 ff_dlog(ctx,
"process_work_frame() work crnt pts >= srce next pts: SKIP FRAME, move on to next frame, do not output a frame\n");
392 ff_dlog(ctx,
"process_work_frame() interpolate:%f/256\n", interpolate);
395 ff_dlog(ctx,
"process_work_frame() source is:NEXT\n");
398 if (s->
srce[s->
prev] && interpolate < -s->interp_end) {
399 ff_dlog(ctx,
"process_work_frame() source is:PREV\n");
404 if ((interpolate >= s->
interp_start && interpolate <= s->interp_end) || (interpolate <= -s->interp_start && interpolate >= -s->
interp_end)) {
407 if (interpolate > 0) {
408 ff_dlog(ctx,
"process_work_frame() interpolate source is:NEXT\n");
411 ff_dlog(ctx,
"process_work_frame() interpolate source is:PREV\n");
414 if (s->
blend_frames(ctx, interpolate, copy_src1, copy_src2))
417 ff_dlog(ctx,
"process_work_frame() CUT - DON'T INTERPOLATE\n");
420 ff_dlog(ctx,
"process_work_frame() COPY to the work frame\n");
431 ff_dlog(ctx,
"process_work_frame() REPEAT FRAME\n");
433 ff_dlog(ctx,
"process_work_frame() CONSUME FRAME, move to next frame\n");
437 ff_dlog(ctx,
"process_work_frame() output a frame\n");
450 ff_dlog(ctx,
"set_srce_frame_output_pts()\n");
464 int64_t
pts, average_srce_pts_delta = 0;
466 ff_dlog(ctx,
"set_work_frame_pts()\n");
471 ff_dlog(ctx,
"set_work_frame_pts() srce crnt pts:%"PRId64
"\n", s->
srce[s->
crnt]->
pts);
472 ff_dlog(ctx,
"set_work_frame_pts() srce next pts:%"PRId64
"\n", s->
srce[s->
next]->
pts);
474 ff_dlog(ctx,
"set_work_frame_pts() srce prev pts:%"PRId64
"\n", s->
srce[s->
prev]->
pts);
477 ff_dlog(ctx,
"set_work_frame_pts() initial average srce pts:%"PRId64
"\n", average_srce_pts_delta);
483 average_srce_pts_delta = average_srce_pts_delta?((average_srce_pts_delta+
pts)>>1):
pts;
485 average_srce_pts_delta = average_srce_pts_delta?((average_srce_pts_delta+
pts)>>1):
pts;
489 ff_dlog(ctx,
"set_work_frame_pts() average srce pts:%"PRId64
"\n", average_srce_pts_delta);
490 ff_dlog(ctx,
"set_work_frame_pts() average srce pts:%"PRId64
" at dest time base:%u/%u\n",
505 ff_dlog(ctx,
"set_work_frame_pts() calculated pts:%"PRId64
" at dest time base:%u/%u\n",
529 for (i = s->
frst; i < s->last; i++) {
564 for (plane = 0; plane < 4; plane++) {
622 ff_dlog(ctx,
"config_output()\n");
625 "config_output() input time base:%u/%u (%f)\n",
637 "time base:%u/%u -> %u/%u exact:%d\n",
648 "config_output() output time base:%u/%u (%f) w:%d h:%d\n",
651 outlink->
w, outlink->
h);
654 av_log(ctx,
AV_LOG_INFO,
"fps -> fps:%u/%u scene score:%f interpolate start:%d end:%d\n",
667 ff_dlog(ctx,
"request_frame()\n");
673 ff_dlog(ctx,
"request_frame() REPEAT or FLUSH\n");
676 ff_dlog(ctx,
"request_frame() nothing else to do, return:EOF\n");
681 ff_dlog(ctx,
"request_frame() FLUSH\n");
684 for (i = s->
last; i > s->
frst; i--) {
685 if (!s->
srce[i - 1] && s->
srce[i]) {
686 ff_dlog(ctx,
"request_frame() copy:%d to:%d\n", i, i - 1);
699 ff_dlog(ctx,
"request_frame() call source's request_frame()\n");
702 ff_dlog(ctx,
"request_frame() source's request_frame() returned error:%d\n", ret);
707 ff_dlog(ctx,
"request_frame() source's request_frame() returned:%d\n", ret);
733 .description =
NULL_IF_CONFIG_SMALL(
"Upsamples or downsamples progressive source between specified frame rates."),
735 .priv_class = &framerate_class,
739 .
inputs = framerate_inputs,
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane...
static int shift(int a, int b)
static int blend_frames16(AVFilterContext *ctx, float interpolate, AVFrame *copy_src1, AVFrame *copy_src2)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
static av_cold void uninit(AVFilterContext *ctx)
double scene_score
score that denotes a scene change has happened
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define AV_LOG_WARNING
Something somehow does not look correct.
Main libavfilter public API header.
int h
agreed upon image height
static double get_scene_score(AVFilterContext *ctx, AVFrame *crnt, AVFrame *next)
#define AV_PIX_FMT_YUV420P12
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
AVFILTER_DEFINE_CLASS(framerate)
int interp_end
end of range to apply linear interpolation
const char * name
Pad name.
AVFilterLink ** inputs
array of pointers to input links
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
AVFrame * srce[N_SRCE]
buffered source frames
AVRational dest_frame_rate
output frames per second
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static int blend_frames8(AVFilterContext *ctx, float interpolate, AVFrame *copy_src1, AVFrame *copy_src2)
static void interpolate(float *out, float v1, float v2, int size)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
static int request_frame(AVFilterLink *outlink)
#define AVERROR_EOF
End of file.
static void set_srce_frame_dest_pts(AVFilterContext *ctx)
int interlaced_frame
The content of the picture is interlaced.
int flags
flags affecting frame rate conversion algorithm
int(* blend_frames)(AVFilterContext *ctx, float interpolate, AVFrame *copy_src1, AVFrame *copy_src2)
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
#define AV_PIX_FMT_YUV422P12
int line_size[4]
bytes of pixel data per line for each plane
A filter pad used for either input or output.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
A link between two filters.
int width
width and height of the video frame
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0, will be automatically copied from the first input of the source filter if it exists.
int interp_start
start of range to apply linear interpolation
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
int(* av_pixelutils_sad_fn)(const uint8_t *src1, ptrdiff_t stride1, const uint8_t *src2, ptrdiff_t stride2)
Sum of abs(src1[x] - src2[x])
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link...
simple assert() macros that are a bit more flexible than ISO C assert().
#define FRAMERATE_FLAG_SCD
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
#define AV_PIX_FMT_YUV444P10
static void next_source(AVFilterContext *ctx)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int w
agreed upon image width
common internal API header
#define AV_PIX_FMT_YUV422P9
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
double prev_mafd
previous MAFD (scene detect only)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
int64_t average_srce_pts_dest_delta
average input pts delta converted from input rate to output rate
AVFilterContext * src
source filter
static const AVFilterPad outputs[]
AVRational srce_time_base
timebase of source
static av_always_inline int64_t sad_8x8_16(const uint16_t *src1, ptrdiff_t stride1, const uint16_t *src2, ptrdiff_t stride2)
#define AV_PIX_FMT_YUV444P9
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int format
agreed upon media format
int64_t srce_pts_dest[N_SRCE]
pts for source frames scaled to output timebase
#define AV_LOG_INFO
Standard information.
static const AVFilterPad inputs[]
static const AVFilterPad framerate_outputs[]
static int query_formats(AVFilterContext *ctx)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
int pending_srce_frames
how many input frames are still waiting to be processed
static double get_scene_score16(AVFilterContext *ctx, AVFrame *crnt, AVFrame *next)
#define AV_PIX_FMT_YUV420P10
int64_t last_dest_frame_pts
pts of the last frame output
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
av_pixelutils_sad_fn av_pixelutils_get_sad_fn(int w_bits, int h_bits, int aligned, void *log_ctx)
Get a potentially optimized pointer to a Sum-of-absolute-differences function (see the av_pixelutils_...
Describe the class of an AVClass context structure.
int pending_end_frame
flag indicating we are waiting to call filter_frame()
Rational number (pair of numerator and denominator).
AVRational dest_time_base
timebase of destination
offset must point to AVRational
const char * name
Filter name.
#define AV_PIX_FMT_YUV420P9
static av_cold int init(AVFilterContext *ctx)
AVFilterLink ** outputs
array of pointers to output links
static enum AVPixelFormat pix_fmts[]
static int64_t pts
Global timestamp for the audio frames.
static int process_work_frame(AVFilterContext *ctx, int stop)
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void set_work_frame_pts(AVFilterContext *ctx)
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
av_pixelutils_sad_fn sad
Sum of the absolute difference function (scene detect only)
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
static const AVOption framerate_options[]
AVFilterContext * dst
dest filter
static int config_input(AVFilterLink *inlink)
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
static const AVFilterPad framerate_inputs[]
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
int flush
are we flushing final frames
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
static int config_output(AVFilterLink *outlink)
int64_t average_dest_pts_delta
calculated average output pts delta
int depth
Number of bits in the component.
int64_t pts
pts of frame we are working on
AVPixelFormat
Pixel format.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.