FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/bprint.h"
33 #include "libavutil/display.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/pixfmt.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/time.h"
40 #include "libavutil/timestamp.h"
41 
42 // FIXME private header, used for mid_pred()
43 #include "libavcodec/mathops.h"
44 
45 typedef struct FilterGraphPriv {
47 
48  // name used for logging
49  char log_name[32];
50 
51  int is_simple;
52  // true when the filtergraph contains only meta filters
53  // that do not modify the frame data
54  int is_meta;
55  // source filters are present in the graph
58 
59  unsigned nb_outputs_done;
60 
61  const char *graph_desc;
62 
63  // frame for temporarily holding output from the filtergraph
65  // frame for sending output to the encoder
67 
69  unsigned sch_idx;
71 
73 {
74  return (FilterGraphPriv*)fg;
75 }
76 
77 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
78 {
79  return (const FilterGraphPriv*)fg;
80 }
81 
82 // data that is local to the filter thread and not visible outside of it
83 typedef struct FilterGraphThread {
85 
87 
88  // Temporary buffer for output frames, since on filtergraph reset
89  // we cannot send them to encoders immediately.
90  // The output index is stored in frame opaque.
92 
93  // index of the next input to request from the scheduler
94  unsigned next_in;
95  // set to 1 after at least one frame passed through this output
96  int got_frame;
97 
98  // EOF status of each input/output, as received by the thread
99  uint8_t *eof_in;
100  uint8_t *eof_out;
102 
103 typedef struct InputFilterPriv {
105 
107 
108  int index;
109 
111 
112  // used to hold submitted input
114 
115  /* for filters that are not yet bound to an input stream,
116  * this stores the input linklabel, if any */
117  uint8_t *linklabel;
118 
119  // filter data type
121  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
122  // same as type otherwise
124 
125  int eof;
126  int bound;
127 
128  // parameters configured for this input
129  int format;
130 
131  int width, height;
135 
138 
140 
142 
144 
148 
149  struct {
150  AVFrame *frame;
151 
152  int64_t last_pts;
153  int64_t end_pts;
154 
155  ///< marks if sub2video_update should force an initialization
156  unsigned int initialize;
157  } sub2video;
159 
161 {
162  return (InputFilterPriv*)ifilter;
163 }
164 
165 typedef struct FPSConvContext {
167  /* number of frames emitted by the video-encoding sync code */
168  int64_t frame_number;
169  /* history of nb_frames_prev, i.e. the number of times the
170  * previous frame was duplicated by vsync code in recent
171  * do_video_out() calls */
172  int64_t frames_prev_hist[3];
173 
174  uint64_t dup_warning;
175 
178 
184 
185 typedef struct OutputFilterPriv {
187 
188  int index;
189 
191 
192  /* desired output stream properties */
193  int format;
194  int width, height;
197 
198  // time base in which the output is sent to our downstream
199  // does not need to match the filtersink's timebase
201  // at least one frame with the above timebase was sent
202  // to our downstream, so it cannot change anymore
204 
206 
207  // those are only set if no format is specified and the encoder gives us multiple options
208  // They point directly to the relevant lists of the encoder.
209  const int *formats;
211  const int *sample_rates;
212 
214  // offset for output timestamps, in AV_TIME_BASE_Q
215  int64_t ts_offset;
216  int64_t next_pts;
219 
221 {
222  return (OutputFilterPriv*)ofilter;
223 }
224 
225 typedef struct FilterCommand {
226  char *target;
227  char *command;
228  char *arg;
229 
230  double time;
232 } FilterCommand;
233 
234 static void filter_command_free(void *opaque, uint8_t *data)
235 {
237 
238  av_freep(&fc->target);
239  av_freep(&fc->command);
240  av_freep(&fc->arg);
241 
242  av_free(data);
243 }
244 
246 {
247  AVFrame *frame = ifp->sub2video.frame;
248  int ret;
249 
251 
252  frame->width = ifp->width;
253  frame->height = ifp->height;
254  frame->format = ifp->format;
255  frame->colorspace = ifp->color_space;
256  frame->color_range = ifp->color_range;
257 
259  if (ret < 0)
260  return ret;
261 
262  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
263 
264  return 0;
265 }
266 
267 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
268  AVSubtitleRect *r)
269 {
270  uint32_t *pal, *dst2;
271  uint8_t *src, *src2;
272  int x, y;
273 
274  if (r->type != SUBTITLE_BITMAP) {
275  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
276  return;
277  }
278  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
279  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
280  r->x, r->y, r->w, r->h, w, h
281  );
282  return;
283  }
284 
285  dst += r->y * dst_linesize + r->x * 4;
286  src = r->data[0];
287  pal = (uint32_t *)r->data[1];
288  for (y = 0; y < r->h; y++) {
289  dst2 = (uint32_t *)dst;
290  src2 = src;
291  for (x = 0; x < r->w; x++)
292  *(dst2++) = pal[*(src2++)];
293  dst += dst_linesize;
294  src += r->linesize[0];
295  }
296 }
297 
298 static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
299 {
300  AVFrame *frame = ifp->sub2video.frame;
301  int ret;
302 
303  av_assert1(frame->data[0]);
304  ifp->sub2video.last_pts = frame->pts = pts;
308  if (ret != AVERROR_EOF && ret < 0)
309  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
310  av_err2str(ret));
311 }
312 
313 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
314  const AVSubtitle *sub)
315 {
316  AVFrame *frame = ifp->sub2video.frame;
317  int8_t *dst;
318  int dst_linesize;
319  int num_rects;
320  int64_t pts, end_pts;
321 
322  if (sub) {
323  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
324  AV_TIME_BASE_Q, ifp->time_base);
325  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
326  AV_TIME_BASE_Q, ifp->time_base);
327  num_rects = sub->num_rects;
328  } else {
329  /* If we are initializing the system, utilize current heartbeat
330  PTS as the start time, and show until the following subpicture
331  is received. Otherwise, utilize the previous subpicture's end time
332  as the fall-back value. */
333  pts = ifp->sub2video.initialize ?
334  heartbeat_pts : ifp->sub2video.end_pts;
335  end_pts = INT64_MAX;
336  num_rects = 0;
337  }
338  if (sub2video_get_blank_frame(ifp) < 0) {
340  "Impossible to get a blank canvas.\n");
341  return;
342  }
343  dst = frame->data [0];
344  dst_linesize = frame->linesize[0];
345  for (int i = 0; i < num_rects; i++)
346  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
347  sub2video_push_ref(ifp, pts);
348  ifp->sub2video.end_pts = end_pts;
349  ifp->sub2video.initialize = 0;
350 }
351 
352 /* *dst may return be set to NULL (no pixel format found), a static string or a
353  * string backed by the bprint. Nothing has been written to the AVBPrint in case
354  * NULL is returned. The AVBPrint provided should be clean. */
355 static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint,
356  const char **dst)
357 {
358  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
359  OutputStream *ost = ofilter->ost;
360 
361  *dst = NULL;
362 
363  if (ost->keep_pix_fmt || ofp->format != AV_PIX_FMT_NONE) {
364  *dst = ofp->format == AV_PIX_FMT_NONE ? NULL :
366  } else if (ofp->formats) {
367  const enum AVPixelFormat *p = ofp->formats;
368 
369  for (; *p != AV_PIX_FMT_NONE; p++) {
370  const char *name = av_get_pix_fmt_name(*p);
371  av_bprintf(bprint, "%s%c", name, p[1] == AV_PIX_FMT_NONE ? '\0' : '|');
372  }
373  if (!av_bprint_is_complete(bprint))
374  return AVERROR(ENOMEM);
375 
376  *dst = bprint->str;
377  }
378 
379  return 0;
380 }
381 
382 /* Define a function for appending a list of allowed formats
383  * to an AVBPrint. If nonempty, the list will have a header. */
384 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
385 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
386 { \
387  if (ofp->var == none && !ofp->supported_list) \
388  return; \
389  av_bprintf(bprint, #name "="); \
390  if (ofp->var != none) { \
391  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
392  } else { \
393  const type *p; \
394  \
395  for (p = ofp->supported_list; *p != none; p++) { \
396  av_bprintf(bprint, printf_format "|", get_name(*p)); \
397  } \
398  if (bprint->len > 0) \
399  bprint->str[--bprint->len] = '\0'; \
400  } \
401  av_bprint_chars(bprint, ':', 1); \
402 }
403 
404 //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
405 // GET_PIX_FMT_NAME)
406 
409 
411  "%d", )
412 
413 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
414 {
415  if (av_channel_layout_check(&ofp->ch_layout)) {
416  av_bprintf(bprint, "channel_layouts=");
417  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
418  } else if (ofp->ch_layouts) {
419  const AVChannelLayout *p;
420 
421  av_bprintf(bprint, "channel_layouts=");
422  for (p = ofp->ch_layouts; p->nb_channels; p++) {
424  av_bprintf(bprint, "|");
425  }
426  if (bprint->len > 0)
427  bprint->str[--bprint->len] = '\0';
428  } else
429  return;
430  av_bprint_chars(bprint, ':', 1);
431 }
432 
433 static int read_binary(const char *path, uint8_t **data, int *len)
434 {
435  AVIOContext *io = NULL;
436  int64_t fsize;
437  int ret;
438 
439  *data = NULL;
440  *len = 0;
441 
442  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
443  if (ret < 0) {
444  av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
445  path, av_err2str(ret));
446  return ret;
447  }
448 
449  fsize = avio_size(io);
450  if (fsize < 0 || fsize > INT_MAX) {
451  av_log(NULL, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
452  ret = AVERROR(EIO);
453  goto fail;
454  }
455 
456  *data = av_malloc(fsize);
457  if (!*data) {
458  ret = AVERROR(ENOMEM);
459  goto fail;
460  }
461 
462  ret = avio_read(io, *data, fsize);
463  if (ret != fsize) {
464  av_log(NULL, AV_LOG_ERROR, "Error reading file %s\n", path);
465  ret = ret < 0 ? ret : AVERROR(EIO);
466  goto fail;
467  }
468 
469  *len = fsize;
470 
471  ret = 0;
472 fail:
473  avio_close(io);
474  if (ret < 0) {
475  av_freep(data);
476  *len = 0;
477  }
478  return ret;
479 }
480 
481 static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
482 {
483  const AVOption *o = NULL;
484  int ret;
485 
487  if (ret >= 0)
488  return 0;
489 
490  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
492  if (!o)
493  goto err_apply;
494 
495  // key is a valid option name prefixed with '/'
496  // interpret value as a path from which to load the actual option value
497  key++;
498 
499  if (o->type == AV_OPT_TYPE_BINARY) {
500  uint8_t *data;
501  int len;
502 
503  ret = read_binary(val, &data, &len);
504  if (ret < 0)
505  goto err_load;
506 
508  av_freep(&data);
509  } else {
510  char *data = file_read(val);
511  if (!data) {
512  ret = AVERROR(EIO);
513  goto err_load;
514  }
515 
517  av_freep(&data);
518  }
519  if (ret < 0)
520  goto err_apply;
521 
522  return 0;
523 
524 err_apply:
526  "Error applying option '%s' to filter '%s': %s\n",
527  key, f->filter->name, av_err2str(ret));
528  return ret;
529 err_load:
531  "Error loading value for option '%s' from file '%s'\n",
532  key, val);
533  return ret;
534 }
535 
537 {
538  for (size_t i = 0; i < seg->nb_chains; i++) {
539  AVFilterChain *ch = seg->chains[i];
540 
541  for (size_t j = 0; j < ch->nb_filters; j++) {
542  AVFilterParams *p = ch->filters[j];
543  const AVDictionaryEntry *e = NULL;
544 
545  av_assert0(p->filter);
546 
547  while ((e = av_dict_iterate(p->opts, e))) {
548  int ret = filter_opt_apply(p->filter, e->key, e->value);
549  if (ret < 0)
550  return ret;
551  }
552 
553  av_dict_free(&p->opts);
554  }
555  }
556 
557  return 0;
558 }
559 
560 static int graph_parse(AVFilterGraph *graph, const char *desc,
562  AVBufferRef *hw_device)
563 {
565  int ret;
566 
567  *inputs = NULL;
568  *outputs = NULL;
569 
570  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
571  if (ret < 0)
572  return ret;
573 
575  if (ret < 0)
576  goto fail;
577 
578  if (hw_device) {
579  for (int i = 0; i < graph->nb_filters; i++) {
580  AVFilterContext *f = graph->filters[i];
581 
582  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
583  continue;
584  f->hw_device_ctx = av_buffer_ref(hw_device);
585  if (!f->hw_device_ctx) {
586  ret = AVERROR(ENOMEM);
587  goto fail;
588  }
589  }
590  }
591 
592  ret = graph_opts_apply(seg);
593  if (ret < 0)
594  goto fail;
595 
597 
598 fail:
600  return ret;
601 }
602 
603 // Filters can be configured only if the formats of all inputs are known.
605 {
606  for (int i = 0; i < fg->nb_inputs; i++) {
608  if (ifp->format < 0)
609  return 0;
610  }
611  return 1;
612 }
613 
614 static int filter_thread(void *arg);
615 
616 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
617 {
618  AVFilterContext *ctx = inout->filter_ctx;
619  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
620  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
621 
622  if (nb_pads > 1)
623  return av_strdup(ctx->filter->name);
624  return av_asprintf("%s:%s", ctx->filter->name,
625  avfilter_pad_get_name(pads, inout->pad_idx));
626 }
627 
629 {
630  OutputFilterPriv *ofp;
631  OutputFilter *ofilter;
632 
633  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
634  if (!ofp)
635  return NULL;
636 
637  ofilter = &ofp->ofilter;
638  ofilter->graph = fg;
639  ofp->format = -1;
640  ofp->index = fg->nb_outputs - 1;
641 
642  return ofilter;
643 }
644 
645 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
646 {
647  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
648  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
649  int ret, dec_idx;
650 
651  av_assert0(!ifp->bound);
652  ifp->bound = 1;
653 
654  if (ifp->type != ist->par->codec_type &&
656  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
658  return AVERROR(EINVAL);
659  }
660 
661  ifp->type_src = ist->st->codecpar->codec_type;
662 
663  ifp->opts.fallback = av_frame_alloc();
664  if (!ifp->opts.fallback)
665  return AVERROR(ENOMEM);
666 
667  dec_idx = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
668  &ifp->opts);
669  if (dec_idx < 0)
670  return dec_idx;
671 
672  ret = sch_connect(fgp->sch, SCH_DEC(dec_idx),
673  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
674  if (ret < 0)
675  return ret;
676 
677  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
678  ifp->sub2video.frame = av_frame_alloc();
679  if (!ifp->sub2video.frame)
680  return AVERROR(ENOMEM);
681 
682  ifp->width = ifp->opts.sub2video_width;
683  ifp->height = ifp->opts.sub2video_height;
684 
685  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
686  palettes for all rectangles are identical or compatible */
687  ifp->format = AV_PIX_FMT_RGB32;
688 
689  ifp->time_base = AV_TIME_BASE_Q;
690 
691  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
692  ifp->width, ifp->height);
693  }
694 
695  return 0;
696 }
697 
699 {
701  int ret, dec_idx;
702 
703  av_assert0(!ifp->bound);
704  ifp->bound = 1;
705 
706  if (ifp->type != dec->type) {
707  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
709  return AVERROR(EINVAL);
710  }
711 
712  ifp->type_src = ifp->type;
713 
714  dec_idx = dec_filter_add(dec, &ifp->ifilter, &ifp->opts);
715  if (dec_idx < 0)
716  return dec_idx;
717 
718  ret = sch_connect(fgp->sch, SCH_DEC(dec_idx),
719  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
720  if (ret < 0)
721  return ret;
722 
723  return 0;
724 }
725 
727 {
728  const AVCodec *c = ost->enc_ctx->codec;
729  int i, err;
730 
731  if (ost->enc_ctx->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC) {
732  /* Pass the layout through for all orders but UNSPEC */
733  err = av_channel_layout_copy(&f->ch_layout, &ost->enc_ctx->ch_layout);
734  if (err < 0)
735  return err;
736  return 0;
737  }
738 
739  /* Requested layout is of order UNSPEC */
740  if (!c->ch_layouts) {
741  /* Use the default native layout for the requested amount of channels when the
742  encoder doesn't have a list of supported layouts */
743  av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
744  return 0;
745  }
746  /* Encoder has a list of supported layouts. Pick the first layout in it with the
747  same amount of channels as the requested layout */
748  for (i = 0; c->ch_layouts[i].nb_channels; i++) {
749  if (c->ch_layouts[i].nb_channels == ost->enc_ctx->ch_layout.nb_channels)
750  break;
751  }
752  if (c->ch_layouts[i].nb_channels) {
753  /* Use it if one is found */
754  err = av_channel_layout_copy(&f->ch_layout, &c->ch_layouts[i]);
755  if (err < 0)
756  return err;
757  return 0;
758  }
759  /* If no layout for the amount of channels requested was found, use the default
760  native layout for it. */
761  av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
762 
763  return 0;
764 }
765 
767  unsigned sched_idx_enc)
768 {
769  const OutputFile *of = ost->file;
770  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
771  FilterGraph *fg = ofilter->graph;
772  FilterGraphPriv *fgp = fgp_from_fg(fg);
773  const AVCodec *c = ost->enc_ctx->codec;
774  int ret;
775 
776  av_assert0(!ofilter->ost);
777 
778  ofilter->ost = ost;
779  av_freep(&ofilter->linklabel);
780 
781  ofp->ts_offset = of->start_time == AV_NOPTS_VALUE ? 0 : of->start_time;
782  ofp->enc_timebase = ost->enc_timebase;
783 
784  switch (ost->enc_ctx->codec_type) {
785  case AVMEDIA_TYPE_VIDEO:
786  ofp->width = ost->enc_ctx->width;
787  ofp->height = ost->enc_ctx->height;
788  if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
789  ofp->format = ost->enc_ctx->pix_fmt;
790  } else {
791  ofp->formats = c->pix_fmts;
792 
793  // MJPEG encoder exports a full list of supported pixel formats,
794  // but the full-range ones are experimental-only.
795  // Restrict the auto-conversion list unless -strict experimental
796  // has been specified.
797  if (!strcmp(c->name, "mjpeg")) {
798  // FIXME: YUV420P etc. are actually supported with full color range,
799  // yet the latter information isn't available here.
800  static const enum AVPixelFormat mjpeg_formats[] =
802  AV_PIX_FMT_NONE };
803 
804  const AVDictionaryEntry *strict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
805  int strict_val = ost->enc_ctx->strict_std_compliance;
806 
807  if (strict) {
808  const AVOption *o = av_opt_find(ost->enc_ctx, strict->key, NULL, 0, 0);
809  av_assert0(o);
810  av_opt_eval_int(ost->enc_ctx, o, strict->value, &strict_val);
811  }
812 
813  if (strict_val > FF_COMPLIANCE_UNOFFICIAL)
814  ofp->formats = mjpeg_formats;
815  }
816  }
817 
818  fgp->disable_conversions |= ost->keep_pix_fmt;
819 
820  ofp->fps.last_frame = av_frame_alloc();
821  if (!ofp->fps.last_frame)
822  return AVERROR(ENOMEM);
823 
824  ofp->fps.framerate = ost->frame_rate;
825  ofp->fps.framerate_max = ost->max_frame_rate;
826  ofp->fps.framerate_supported = ost->force_fps ?
827  NULL : c->supported_framerates;
828 
829  // reduce frame rate for mpeg4 to be within the spec limits
830  if (c->id == AV_CODEC_ID_MPEG4)
831  ofp->fps.framerate_clip = 65535;
832 
833  ofp->fps.dup_warning = 1000;
834 
835  break;
836  case AVMEDIA_TYPE_AUDIO:
837  if (ost->enc_ctx->sample_fmt != AV_SAMPLE_FMT_NONE) {
838  ofp->format = ost->enc_ctx->sample_fmt;
839  } else {
840  ofp->formats = c->sample_fmts;
841  }
842  if (ost->enc_ctx->sample_rate) {
843  ofp->sample_rate = ost->enc_ctx->sample_rate;
844  } else {
845  ofp->sample_rates = c->supported_samplerates;
846  }
847  if (ost->enc_ctx->ch_layout.nb_channels) {
848  int ret = set_channel_layout(ofp, ost);
849  if (ret < 0)
850  return ret;
851  } else if (c->ch_layouts) {
852  ofp->ch_layouts = c->ch_layouts;
853  }
854  break;
855  }
856 
857  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofp->index),
858  SCH_ENC(sched_idx_enc));
859  if (ret < 0)
860  return ret;
861 
862  return 0;
863 }
864 
866 {
867  InputFilterPriv *ifp;
868  InputFilter *ifilter;
869 
870  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
871  if (!ifp)
872  return NULL;
873 
874  ifilter = &ifp->ifilter;
875  ifilter->graph = fg;
876 
877  ifp->frame = av_frame_alloc();
878  if (!ifp->frame)
879  return NULL;
880 
881  ifp->index = fg->nb_inputs - 1;
882  ifp->format = -1;
885 
887  if (!ifp->frame_queue)
888  return NULL;
889 
890  return ifilter;
891 }
892 
893 void fg_free(FilterGraph **pfg)
894 {
895  FilterGraph *fg = *pfg;
896  FilterGraphPriv *fgp;
897 
898  if (!fg)
899  return;
900  fgp = fgp_from_fg(fg);
901 
902  for (int j = 0; j < fg->nb_inputs; j++) {
903  InputFilter *ifilter = fg->inputs[j];
904  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
905 
906  if (ifp->frame_queue) {
907  AVFrame *frame;
908  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
911  }
913 
914  av_frame_free(&ifp->frame);
915  av_frame_free(&ifp->opts.fallback);
916 
918  av_freep(&ifp->linklabel);
919  av_freep(&ifp->opts.name);
920  av_freep(&ifilter->name);
921  av_freep(&fg->inputs[j]);
922  }
923  av_freep(&fg->inputs);
924  for (int j = 0; j < fg->nb_outputs; j++) {
925  OutputFilter *ofilter = fg->outputs[j];
926  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
927 
929 
930  av_freep(&ofilter->linklabel);
931  av_freep(&ofilter->name);
933  av_freep(&fg->outputs[j]);
934  }
935  av_freep(&fg->outputs);
936  av_freep(&fgp->graph_desc);
937 
938  av_frame_free(&fgp->frame);
939  av_frame_free(&fgp->frame_enc);
940 
941  av_freep(pfg);
942 }
943 
944 static const char *fg_item_name(void *obj)
945 {
946  const FilterGraphPriv *fgp = obj;
947 
948  return fgp->log_name;
949 }
950 
951 static const AVClass fg_class = {
952  .class_name = "FilterGraph",
953  .version = LIBAVUTIL_VERSION_INT,
954  .item_name = fg_item_name,
955  .category = AV_CLASS_CATEGORY_FILTER,
956 };
957 
958 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
959 {
960  FilterGraphPriv *fgp;
961  FilterGraph *fg;
962 
964  AVFilterGraph *graph;
965  int ret = 0;
966 
967  fgp = allocate_array_elem(&filtergraphs, sizeof(*fgp), &nb_filtergraphs);
968  if (!fgp)
969  return AVERROR(ENOMEM);
970  fg = &fgp->fg;
971 
972  if (pfg)
973  *pfg = fg;
974 
975  fg->class = &fg_class;
976  fg->index = nb_filtergraphs - 1;
977  fgp->graph_desc = graph_desc;
979  fgp->sch = sch;
980 
981  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
982 
983  fgp->frame = av_frame_alloc();
984  fgp->frame_enc = av_frame_alloc();
985  if (!fgp->frame || !fgp->frame_enc)
986  return AVERROR(ENOMEM);
987 
988  /* this graph is only used for determining the kinds of inputs
989  * and outputs we have, and is discarded on exit from this function */
990  graph = avfilter_graph_alloc();
991  if (!graph)
992  return AVERROR(ENOMEM);;
993  graph->nb_threads = 1;
994 
995  ret = graph_parse(graph, fgp->graph_desc, &inputs, &outputs, NULL);
996  if (ret < 0)
997  goto fail;
998 
999  for (unsigned i = 0; i < graph->nb_filters; i++) {
1000  const AVFilter *f = graph->filters[i]->filter;
1001  if (!avfilter_filter_pad_count(f, 0) &&
1002  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) {
1003  fgp->have_sources = 1;
1004  break;
1005  }
1006  }
1007 
1008  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1009  InputFilter *const ifilter = ifilter_alloc(fg);
1010  InputFilterPriv *ifp;
1011 
1012  if (!ifilter) {
1013  ret = AVERROR(ENOMEM);
1014  goto fail;
1015  }
1016 
1017  ifp = ifp_from_ifilter(ifilter);
1018  ifp->linklabel = cur->name;
1019  cur->name = NULL;
1020 
1021  ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1022  cur->pad_idx);
1023 
1024  if (ifp->type != AVMEDIA_TYPE_VIDEO && ifp->type != AVMEDIA_TYPE_AUDIO) {
1025  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1026  "currently.\n");
1027  ret = AVERROR(ENOSYS);
1028  goto fail;
1029  }
1030 
1031  ifilter->name = describe_filter_link(fg, cur, 1);
1032  if (!ifilter->name) {
1033  ret = AVERROR(ENOMEM);
1034  goto fail;
1035  }
1036  }
1037 
1038  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1039  OutputFilter *const ofilter = ofilter_alloc(fg);
1040 
1041  if (!ofilter) {
1042  ret = AVERROR(ENOMEM);
1043  goto fail;
1044  }
1045 
1046  ofilter->linklabel = cur->name;
1047  cur->name = NULL;
1048 
1049  ofilter->type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1050  cur->pad_idx);
1051  ofilter->name = describe_filter_link(fg, cur, 0);
1052  if (!ofilter->name) {
1053  ret = AVERROR(ENOMEM);
1054  goto fail;
1055  }
1056  }
1057 
1058  if (!fg->nb_outputs) {
1059  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1060  ret = AVERROR(ENOSYS);
1061  goto fail;
1062  }
1063 
1064  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1065  filter_thread, fgp);
1066  if (ret < 0)
1067  goto fail;
1068  fgp->sch_idx = ret;
1069 
1070 fail:
1073  avfilter_graph_free(&graph);
1074 
1075  if (ret < 0)
1076  return ret;
1077 
1078  return 0;
1079 }
1080 
1082  char *graph_desc,
1083  Scheduler *sch, unsigned sched_idx_enc)
1084 {
1085  FilterGraph *fg;
1086  FilterGraphPriv *fgp;
1087  int ret;
1088 
1089  ret = fg_create(&fg, graph_desc, sch);
1090  if (ret < 0)
1091  return ret;
1092  fgp = fgp_from_fg(fg);
1093 
1094  fgp->is_simple = 1;
1095 
1096  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf#%d:%d",
1097  av_get_media_type_string(ost->type)[0],
1098  ost->file->index, ost->index);
1099 
1100  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1101  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1102  "to have exactly 1 input and 1 output. "
1103  "However, it had %d input(s) and %d output(s). Please adjust, "
1104  "or use a complex filtergraph (-filter_complex) instead.\n",
1105  graph_desc, fg->nb_inputs, fg->nb_outputs);
1106  return AVERROR(EINVAL);
1107  }
1108 
1109  ost->filter = fg->outputs[0];
1110 
1111  ret = ifilter_bind_ist(fg->inputs[0], ist);
1112  if (ret < 0)
1113  return ret;
1114 
1115  ret = ofilter_bind_ost(fg->outputs[0], ost, sched_idx_enc);
1116  if (ret < 0)
1117  return ret;
1118 
1119  return 0;
1120 }
1121 
1123 {
1124  FilterGraphPriv *fgp = fgp_from_fg(fg);
1125  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1126  InputStream *ist = NULL;
1127  enum AVMediaType type = ifp->type;
1128  int i, ret;
1129 
1130  if (ifp->linklabel && !strncmp(ifp->linklabel, "dec:", 4)) {
1131  // bind to a standalone decoder
1132  int dec_idx;
1133 
1134  dec_idx = strtol(ifp->linklabel + 4, NULL, 0);
1135  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1136  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1137  dec_idx, fgp->graph_desc);
1138  return AVERROR(EINVAL);
1139  }
1140 
1141  ret = ifilter_bind_dec(ifp, decoders[dec_idx]);
1142  if (ret < 0)
1143  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1144  ifilter->name);
1145  return ret;
1146  } else if (ifp->linklabel) {
1147  // bind to an explicitly specified demuxer stream
1148  AVFormatContext *s;
1149  AVStream *st = NULL;
1150  char *p;
1151  int file_idx = strtol(ifp->linklabel, &p, 0);
1152 
1153  if (file_idx < 0 || file_idx >= nb_input_files) {
1154  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1155  file_idx, fgp->graph_desc);
1156  return AVERROR(EINVAL);
1157  }
1158  s = input_files[file_idx]->ctx;
1159 
1160  for (i = 0; i < s->nb_streams; i++) {
1161  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1162  if (stream_type != type &&
1163  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1164  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1165  continue;
1166  if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
1167  st = s->streams[i];
1168  break;
1169  }
1170  }
1171  if (!st) {
1172  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1173  "matches no streams.\n", p, fgp->graph_desc);
1174  return AVERROR(EINVAL);
1175  }
1176  ist = input_files[file_idx]->streams[st->index];
1177 
1178  av_log(fg, AV_LOG_VERBOSE,
1179  "Binding input with label '%s' to input stream %d:%d\n",
1180  ifp->linklabel, ist->file->index, ist->index);
1181  } else {
1182  ist = ist_find_unused(type);
1183  if (!ist) {
1184  av_log(fg, AV_LOG_FATAL, "Cannot find a matching stream for "
1185  "unlabeled input pad %s\n", ifilter->name);
1186  return AVERROR(EINVAL);
1187  }
1188 
1189  av_log(fg, AV_LOG_VERBOSE,
1190  "Binding unlabeled input %d to input stream %d:%d\n",
1191  ifp->index, ist->file->index, ist->index);
1192  }
1193  av_assert0(ist);
1194 
1195  ret = ifilter_bind_ist(ifilter, ist);
1196  if (ret < 0) {
1197  av_log(fg, AV_LOG_ERROR,
1198  "Error binding an input stream to complex filtergraph input %s.\n",
1199  ifilter->name);
1200  return ret;
1201  }
1202 
1203  return 0;
1204 }
1205 
1207 {
1208  // bind filtergraph inputs to input streams
1209  for (int i = 0; i < fg->nb_inputs; i++) {
1211  int ret;
1212 
1213  if (ifp->bound)
1214  continue;
1215 
1216  ret = fg_complex_bind_input(fg, &ifp->ifilter);
1217  if (ret < 0)
1218  return ret;
1219  }
1220 
1221  for (int i = 0; i < fg->nb_outputs; i++) {
1222  OutputFilter *output = fg->outputs[i];
1223  if (!output->ost) {
1225  "Filter %s has an unconnected output\n", output->name);
1226  return AVERROR(EINVAL);
1227  }
1228  }
1229  return 0;
1230 }
1231 
1232 static int insert_trim(int64_t start_time, int64_t duration,
1233  AVFilterContext **last_filter, int *pad_idx,
1234  const char *filter_name)
1235 {
1236  AVFilterGraph *graph = (*last_filter)->graph;
1238  const AVFilter *trim;
1239  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1240  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1241  int ret = 0;
1242 
1243  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1244  return 0;
1245 
1246  trim = avfilter_get_by_name(name);
1247  if (!trim) {
1248  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
1249  "recording time.\n", name);
1250  return AVERROR_FILTER_NOT_FOUND;
1251  }
1252 
1253  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1254  if (!ctx)
1255  return AVERROR(ENOMEM);
1256 
1257  if (duration != INT64_MAX) {
1258  ret = av_opt_set_int(ctx, "durationi", duration,
1260  }
1261  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1262  ret = av_opt_set_int(ctx, "starti", start_time,
1264  }
1265  if (ret < 0) {
1266  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1267  return ret;
1268  }
1269 
1271  if (ret < 0)
1272  return ret;
1273 
1274  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1275  if (ret < 0)
1276  return ret;
1277 
1278  *last_filter = ctx;
1279  *pad_idx = 0;
1280  return 0;
1281 }
1282 
1283 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1284  const char *filter_name, const char *args)
1285 {
1286  AVFilterGraph *graph = (*last_filter)->graph;
1288  int ret;
1289 
1291  avfilter_get_by_name(filter_name),
1292  filter_name, args, NULL, graph);
1293  if (ret < 0)
1294  return ret;
1295 
1296  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1297  if (ret < 0)
1298  return ret;
1299 
1300  *last_filter = ctx;
1301  *pad_idx = 0;
1302  return 0;
1303 }
1304 
1306  OutputFilter *ofilter, AVFilterInOut *out)
1307 {
1308  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1309  OutputStream *ost = ofilter->ost;
1310  OutputFile *of = ost->file;
1311  AVFilterContext *last_filter = out->filter_ctx;
1312  AVBPrint bprint;
1313  int pad_idx = out->pad_idx;
1314  int ret;
1315  const char *pix_fmts;
1316  char name[255];
1317 
1318  snprintf(name, sizeof(name), "out_%d_%d", ost->file->index, ost->index);
1320  avfilter_get_by_name("buffersink"),
1321  name, NULL, NULL, graph);
1322 
1323  if (ret < 0)
1324  return ret;
1325 
1326  if ((ofp->width || ofp->height) && ofilter->ost->autoscale) {
1327  char args[255];
1329  const AVDictionaryEntry *e = NULL;
1330 
1331  snprintf(args, sizeof(args), "%d:%d",
1332  ofp->width, ofp->height);
1333 
1334  while ((e = av_dict_iterate(ost->sws_dict, e))) {
1335  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1336  }
1337 
1338  snprintf(name, sizeof(name), "scaler_out_%d_%d",
1339  ost->file->index, ost->index);
1341  name, args, NULL, graph)) < 0)
1342  return ret;
1343  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1344  return ret;
1345 
1346  last_filter = filter;
1347  pad_idx = 0;
1348  }
1349 
1351  ret = choose_pix_fmts(ofilter, &bprint, &pix_fmts);
1352  if (ret < 0)
1353  return ret;
1354 
1355  if (pix_fmts) {
1357 
1359  avfilter_get_by_name("format"),
1360  "format", pix_fmts, NULL, graph);
1361  av_bprint_finalize(&bprint, NULL);
1362  if (ret < 0)
1363  return ret;
1364  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1365  return ret;
1366 
1367  last_filter = filter;
1368  pad_idx = 0;
1369  }
1370 
1371  snprintf(name, sizeof(name), "trim_out_%d_%d",
1372  ost->file->index, ost->index);
1374  &last_filter, &pad_idx, name);
1375  if (ret < 0)
1376  return ret;
1377 
1378 
1379  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1380  return ret;
1381 
1382  return 0;
1383 }
1384 
1386  OutputFilter *ofilter, AVFilterInOut *out)
1387 {
1388  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1389  OutputStream *ost = ofilter->ost;
1390  OutputFile *of = ost->file;
1391  AVFilterContext *last_filter = out->filter_ctx;
1392  int pad_idx = out->pad_idx;
1393  AVBPrint args;
1394  char name[255];
1395  int ret;
1396 
1397  snprintf(name, sizeof(name), "out_%d_%d", ost->file->index, ost->index);
1399  avfilter_get_by_name("abuffersink"),
1400  name, NULL, NULL, graph);
1401  if (ret < 0)
1402  return ret;
1403  if ((ret = av_opt_set_int(ofp->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1404  return ret;
1405 
1406 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1407  AVFilterContext *filt_ctx; \
1408  \
1409  av_log(fg, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1410  "similarly to -af " filter_name "=%s.\n", arg); \
1411  \
1412  ret = avfilter_graph_create_filter(&filt_ctx, \
1413  avfilter_get_by_name(filter_name), \
1414  filter_name, arg, NULL, graph); \
1415  if (ret < 0) \
1416  goto fail; \
1417  \
1418  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1419  if (ret < 0) \
1420  goto fail; \
1421  \
1422  last_filter = filt_ctx; \
1423  pad_idx = 0; \
1424 } while (0)
1426 
1427  choose_sample_fmts(ofp, &args);
1428  choose_sample_rates(ofp, &args);
1429  choose_channel_layouts(ofp, &args);
1430  if (!av_bprint_is_complete(&args)) {
1431  ret = AVERROR(ENOMEM);
1432  goto fail;
1433  }
1434  if (args.len) {
1436 
1437  snprintf(name, sizeof(name), "format_out_%d_%d",
1438  ost->file->index, ost->index);
1440  avfilter_get_by_name("aformat"),
1441  name, args.str, NULL, graph);
1442  if (ret < 0)
1443  goto fail;
1444 
1445  ret = avfilter_link(last_filter, pad_idx, format, 0);
1446  if (ret < 0)
1447  goto fail;
1448 
1449  last_filter = format;
1450  pad_idx = 0;
1451  }
1452 
1453  if (ost->apad && of->shortest) {
1454  int i;
1455 
1456  for (i = 0; i < of->nb_streams; i++)
1458  break;
1459 
1460  if (i < of->nb_streams) {
1461  AUTO_INSERT_FILTER("-apad", "apad", ost->apad);
1462  }
1463  }
1464 
1465  snprintf(name, sizeof(name), "trim for output stream %d:%d",
1466  ost->file->index, ost->index);
1468  &last_filter, &pad_idx, name);
1469  if (ret < 0)
1470  goto fail;
1471 
1472  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1473  goto fail;
1474 fail:
1475  av_bprint_finalize(&args, NULL);
1476 
1477  return ret;
1478 }
1479 
1481  OutputFilter *ofilter, AVFilterInOut *out)
1482 {
1483  if (!ofilter->ost) {
1484  av_log(fg, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
1485  return AVERROR(EINVAL);
1486  }
1487 
1488  switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
1489  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, graph, ofilter, out);
1490  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, graph, ofilter, out);
1491  default: av_assert0(0); return 0;
1492  }
1493 }
1494 
1496 {
1497  ifp->sub2video.last_pts = INT64_MIN;
1498  ifp->sub2video.end_pts = INT64_MIN;
1499 
1500  /* sub2video structure has been (re-)initialized.
1501  Mark it as such so that the system will be
1502  initialized with the first received heartbeat. */
1503  ifp->sub2video.initialize = 1;
1504 }
1505 
1507  InputFilter *ifilter, AVFilterInOut *in)
1508 {
1509  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1510 
1511  AVFilterContext *last_filter;
1512  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1513  const AVPixFmtDescriptor *desc;
1514  AVRational fr = ifp->opts.framerate;
1515  AVRational sar;
1516  AVBPrint args;
1517  char name[255];
1518  int ret, pad_idx = 0;
1520  if (!par)
1521  return AVERROR(ENOMEM);
1522 
1523  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1524  sub2video_prepare(ifp);
1525 
1526  sar = ifp->sample_aspect_ratio;
1527  if(!sar.den)
1528  sar = (AVRational){0,1};
1530  av_bprintf(&args,
1531  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
1532  "pixel_aspect=%d/%d:colorspace=%d:range=%d",
1533  ifp->width, ifp->height, ifp->format,
1534  ifp->time_base.num, ifp->time_base.den, sar.num, sar.den,
1535  ifp->color_space, ifp->color_range);
1536  if (fr.num && fr.den)
1537  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
1538  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1539  ifp->opts.name);
1540 
1541 
1542  if ((ret = avfilter_graph_create_filter(&ifp->filter, buffer_filt, name,
1543  args.str, NULL, graph)) < 0)
1544  goto fail;
1545  par->hw_frames_ctx = ifp->hw_frames_ctx;
1546  ret = av_buffersrc_parameters_set(ifp->filter, par);
1547  if (ret < 0)
1548  goto fail;
1549  av_freep(&par);
1550  last_filter = ifp->filter;
1551 
1553  av_assert0(desc);
1554 
1555  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1556  ifp->displaymatrix_applied = 0;
1557  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1558  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1559  int32_t *displaymatrix = ifp->displaymatrix;
1560  double theta;
1561 
1562  theta = get_rotation(displaymatrix);
1563 
1564  if (fabs(theta - 90) < 1.0) {
1565  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1566  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1567  } else if (fabs(theta - 180) < 1.0) {
1568  if (displaymatrix[0] < 0) {
1569  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1570  if (ret < 0)
1571  return ret;
1572  }
1573  if (displaymatrix[4] < 0) {
1574  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1575  }
1576  } else if (fabs(theta - 270) < 1.0) {
1577  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1578  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1579  } else if (fabs(theta) > 1.0) {
1580  char rotate_buf[64];
1581  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1582  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1583  } else if (fabs(theta) < 1.0) {
1584  if (displaymatrix && displaymatrix[4] < 0) {
1585  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1586  }
1587  }
1588  if (ret < 0)
1589  return ret;
1590 
1591  ifp->displaymatrix_applied = 1;
1592  }
1593 
1594  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1596  &last_filter, &pad_idx, name);
1597  if (ret < 0)
1598  return ret;
1599 
1600  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1601  return ret;
1602  return 0;
1603 fail:
1604  av_freep(&par);
1605 
1606  return ret;
1607 }
1608 
1610  InputFilter *ifilter, AVFilterInOut *in)
1611 {
1612  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1613  AVFilterContext *last_filter;
1614  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1615  AVBPrint args;
1616  char name[255];
1617  int ret, pad_idx = 0;
1618 
1620  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1621  ifp->time_base.num, ifp->time_base.den,
1622  ifp->sample_rate,
1624  if (av_channel_layout_check(&ifp->ch_layout) &&
1626  av_bprintf(&args, ":channel_layout=");
1628  } else
1629  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1630  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1631 
1632  if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1633  name, args.str, NULL,
1634  graph)) < 0)
1635  return ret;
1636  last_filter = ifp->filter;
1637 
1638  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1640  &last_filter, &pad_idx, name);
1641  if (ret < 0)
1642  return ret;
1643 
1644  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1645  return ret;
1646 
1647  return 0;
1648 }
1649 
1651  InputFilter *ifilter, AVFilterInOut *in)
1652 {
1653  switch (ifp_from_ifilter(ifilter)->type) {
1654  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1655  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1656  default: av_assert0(0); return 0;
1657  }
1658 }
1659 
1661 {
1662  for (int i = 0; i < fg->nb_outputs; i++)
1664  for (int i = 0; i < fg->nb_inputs; i++)
1665  ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1666  avfilter_graph_free(&fgt->graph);
1667 }
1668 
1670 {
1671  return f->nb_inputs == 0 &&
1672  (!strcmp(f->filter->name, "buffer") ||
1673  !strcmp(f->filter->name, "abuffer"));
1674 }
1675 
1676 static int graph_is_meta(AVFilterGraph *graph)
1677 {
1678  for (unsigned i = 0; i < graph->nb_filters; i++) {
1679  const AVFilterContext *f = graph->filters[i];
1680 
1681  /* in addition to filters flagged as meta, also
1682  * disregard sinks and buffersources (but not other sources,
1683  * since they introduce data we are not aware of)
1684  */
1685  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1686  f->nb_outputs == 0 ||
1688  return 0;
1689  }
1690  return 1;
1691 }
1692 
1693 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1694 
1696 {
1697  FilterGraphPriv *fgp = fgp_from_fg(fg);
1698  AVBufferRef *hw_device;
1699  AVFilterInOut *inputs, *outputs, *cur;
1700  int ret, i, simple = filtergraph_is_simple(fg);
1701  int have_input_eof = 0;
1702  const char *graph_desc = fgp->graph_desc;
1703 
1704  cleanup_filtergraph(fg, fgt);
1705  fgt->graph = avfilter_graph_alloc();
1706  if (!fgt->graph)
1707  return AVERROR(ENOMEM);
1708 
1709  if (simple) {
1710  OutputStream *ost = fg->outputs[0]->ost;
1711 
1712  if (filter_nbthreads) {
1713  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1714  if (ret < 0)
1715  goto fail;
1716  } else {
1717  const AVDictionaryEntry *e = NULL;
1718  e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
1719  if (e)
1720  av_opt_set(fgt->graph, "threads", e->value, 0);
1721  }
1722 
1723  if (av_dict_count(ost->sws_dict)) {
1724  ret = av_dict_get_string(ost->sws_dict,
1725  &fgt->graph->scale_sws_opts,
1726  '=', ':');
1727  if (ret < 0)
1728  goto fail;
1729  }
1730 
1731  if (av_dict_count(ost->swr_opts)) {
1732  char *args;
1733  ret = av_dict_get_string(ost->swr_opts, &args, '=', ':');
1734  if (ret < 0)
1735  goto fail;
1736  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1737  av_free(args);
1738  }
1739  } else {
1741  }
1742 
1743  hw_device = hw_device_for_filter();
1744 
1745  if ((ret = graph_parse(fgt->graph, graph_desc, &inputs, &outputs, hw_device)) < 0)
1746  goto fail;
1747 
1748  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1749  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1752  goto fail;
1753  }
1755 
1756  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1757  ret = configure_output_filter(fg, fgt->graph, fg->outputs[i], cur);
1758  if (ret < 0) {
1760  goto fail;
1761  }
1762  }
1764 
1765  if (fgp->disable_conversions)
1767  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
1768  goto fail;
1769 
1770  fgp->is_meta = graph_is_meta(fgt->graph);
1771 
1772  /* limit the lists of allowed formats to the ones selected, to
1773  * make sure they stay the same if the filtergraph is reconfigured later */
1774  for (int i = 0; i < fg->nb_outputs; i++) {
1775  OutputFilter *ofilter = fg->outputs[i];
1776  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1777  AVFilterContext *sink = ofp->filter;
1778 
1779  ofp->format = av_buffersink_get_format(sink);
1780 
1781  ofp->width = av_buffersink_get_w(sink);
1782  ofp->height = av_buffersink_get_h(sink);
1783 
1784  // If the timing parameters are not locked yet, get the tentative values
1785  // here but don't lock them. They will only be used if no output frames
1786  // are ever produced.
1787  if (!ofp->tb_out_locked) {
1789  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
1790  fr.num > 0 && fr.den > 0)
1791  ofp->fps.framerate = fr;
1792  ofp->tb_out = av_buffersink_get_time_base(sink);
1793  }
1795 
1798  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
1799  if (ret < 0)
1800  goto fail;
1801  }
1802 
1803  for (int i = 0; i < fg->nb_inputs; i++) {
1805  AVFrame *tmp;
1806  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
1807  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
1808  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
1809  } else {
1811  }
1812  av_frame_free(&tmp);
1813  if (ret < 0)
1814  goto fail;
1815  }
1816  }
1817 
1818  /* send the EOFs for the finished inputs */
1819  for (int i = 0; i < fg->nb_inputs; i++) {
1821  if (fgt->eof_in[i]) {
1823  if (ret < 0)
1824  goto fail;
1825  have_input_eof = 1;
1826  }
1827  }
1828 
1829  if (have_input_eof) {
1830  // make sure the EOF propagates to the end of the graph
1832  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1833  goto fail;
1834  }
1835 
1836  return 0;
1837 fail:
1838  cleanup_filtergraph(fg, fgt);
1839  return ret;
1840 }
1841 
1843 {
1844  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1845  AVFrameSideData *sd;
1846  int ret;
1847 
1848  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
1849  if (ret < 0)
1850  return ret;
1851 
1852  ifp->time_base = (ifp->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
1853  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
1854  frame->time_base;
1855 
1856  ifp->format = frame->format;
1857 
1858  ifp->width = frame->width;
1859  ifp->height = frame->height;
1860  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
1861  ifp->color_space = frame->colorspace;
1862  ifp->color_range = frame->color_range;
1863 
1864  ifp->sample_rate = frame->sample_rate;
1865  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
1866  if (ret < 0)
1867  return ret;
1868 
1870  if (sd)
1871  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
1872  ifp->displaymatrix_present = !!sd;
1873 
1874  return 0;
1875 }
1876 
1878 {
1879  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
1880  return fgp->is_simple;
1881 }
1882 
1883 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
1884  double time, const char *target,
1885  const char *command, const char *arg, int all_filters)
1886 {
1887  int ret;
1888 
1889  if (!graph)
1890  return;
1891 
1892  if (time < 0) {
1893  char response[4096];
1894  ret = avfilter_graph_send_command(graph, target, command, arg,
1895  response, sizeof(response),
1896  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
1897  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
1898  fg->index, ret, response);
1899  } else if (!all_filters) {
1900  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
1901  } else {
1902  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
1903  if (ret < 0)
1904  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
1905  }
1906 }
1907 
1908 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
1909 {
1910  int nb_requests, nb_requests_max = -1;
1911  int best_input = -1;
1912 
1913  for (int i = 0; i < fg->nb_inputs; i++) {
1914  InputFilter *ifilter = fg->inputs[i];
1915  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1916 
1917  if (fgt->eof_in[i])
1918  continue;
1919 
1920  nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
1921  if (nb_requests > nb_requests_max) {
1922  nb_requests_max = nb_requests;
1923  best_input = i;
1924  }
1925  }
1926 
1927  av_assert0(best_input >= 0);
1928 
1929  return best_input;
1930 }
1931 
1933 {
1934  OutputFilter *ofilter = &ofp->ofilter;
1935  FPSConvContext *fps = &ofp->fps;
1936  AVRational tb = (AVRational){ 0, 0 };
1937  AVRational fr;
1938  const FrameData *fd;
1939 
1940  fd = frame_data_c(frame);
1941 
1942  // apply -enc_time_base
1943  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
1944  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
1945  av_log(ofilter->ost, AV_LOG_ERROR,
1946  "Demuxing timebase not available - cannot use it for encoding\n");
1947  return AVERROR(EINVAL);
1948  }
1949 
1950  switch (ofp->enc_timebase.num) {
1951  case 0: break;
1952  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
1953  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
1954  default: tb = ofp->enc_timebase; break;
1955  }
1956 
1957  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
1958  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
1959  goto finish;
1960  }
1961 
1962  fr = fps->framerate;
1963  if (!fr.num) {
1965  if (fr_sink.num > 0 && fr_sink.den > 0)
1966  fr = fr_sink;
1967  }
1968 
1969  if (ofilter->ost->is_cfr) {
1970  if (!fr.num && !fps->framerate_max.num) {
1971  fr = (AVRational){25, 1};
1972  av_log(ofilter->ost, AV_LOG_WARNING,
1973  "No information "
1974  "about the input framerate is available. Falling "
1975  "back to a default value of 25fps. Use the -r option "
1976  "if you want a different framerate.\n");
1977  }
1978 
1979  if (fps->framerate_max.num &&
1980  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
1981  !fr.den))
1982  fr = fps->framerate_max;
1983  }
1984 
1985  if (fr.num > 0) {
1986  if (fps->framerate_supported) {
1987  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
1988  fr = fps->framerate_supported[idx];
1989  }
1990  if (fps->framerate_clip) {
1991  av_reduce(&fr.num, &fr.den,
1992  fr.num, fr.den, fps->framerate_clip);
1993  }
1994  }
1995 
1996  if (!(tb.num > 0 && tb.den > 0))
1997  tb = av_inv_q(fr);
1998  if (!(tb.num > 0 && tb.den > 0))
1999  tb = frame->time_base;
2000 
2001  fps->framerate = fr;
2002 finish:
2003  ofp->tb_out = tb;
2004  ofp->tb_out_locked = 1;
2005 
2006  return 0;
2007 }
2008 
2010  int64_t start_time)
2011 {
2012  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2013 
2014  AVRational tb = tb_dst;
2015  AVRational filter_tb = frame->time_base;
2016  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2017 
2018  if (frame->pts == AV_NOPTS_VALUE)
2019  goto early_exit;
2020 
2021  tb.den <<= extra_bits;
2022  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2024  float_pts /= 1 << extra_bits;
2025  // when float_pts is not exactly an integer,
2026  // avoid exact midpoints to reduce the chance of rounding differences, this
2027  // can be removed in case the fps code is changed to work with integers
2028  if (float_pts != llrint(float_pts))
2029  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2030 
2031  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2033  frame->time_base = tb_dst;
2034 
2035 early_exit:
2036 
2037  if (debug_ts) {
2038  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2039  frame ? av_ts2str(frame->pts) : "NULL",
2040  av_ts2timestr(frame->pts, &tb_dst),
2041  float_pts, tb_dst.num, tb_dst.den);
2042  }
2043 
2044  return float_pts;
2045 }
2046 
2047 /* Convert frame timestamps to the encoder timebase and decide how many times
2048  * should this (and possibly previous) frame be repeated in order to conform to
2049  * desired target framerate (if any).
2050  */
2052  int64_t *nb_frames, int64_t *nb_frames_prev)
2053 {
2054  OutputFilter *ofilter = &ofp->ofilter;
2055  OutputStream *ost = ofilter->ost;
2056  FPSConvContext *fps = &ofp->fps;
2057  double delta0, delta, sync_ipts, duration;
2058 
2059  if (!frame) {
2060  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2061  fps->frames_prev_hist[1],
2062  fps->frames_prev_hist[2]);
2063 
2064  if (!*nb_frames && fps->last_dropped) {
2065  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2066  fps->last_dropped++;
2067  }
2068 
2069  goto finish;
2070  }
2071 
2072  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2073 
2074  sync_ipts = adjust_frame_pts_to_encoder_tb(frame, ofp->tb_out, ofp->ts_offset);
2075  /* delta0 is the "drift" between the input frame and
2076  * where it would fall in the output. */
2077  delta0 = sync_ipts - ofp->next_pts;
2078  delta = delta0 + duration;
2079 
2080  // tracks the number of times the PREVIOUS frame should be duplicated,
2081  // mostly for variable framerate (VFR)
2082  *nb_frames_prev = 0;
2083  /* by default, we output a single frame */
2084  *nb_frames = 1;
2085 
2086  if (delta0 < 0 &&
2087  delta > 0 &&
2088  ost->vsync_method != VSYNC_PASSTHROUGH
2090  && ost->vsync_method != VSYNC_DROP
2091 #endif
2092  ) {
2093  if (delta0 < -0.6) {
2094  av_log(ost, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2095  } else
2096  av_log(ost, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2097  sync_ipts = ofp->next_pts;
2098  duration += delta0;
2099  delta0 = 0;
2100  }
2101 
2102  switch (ost->vsync_method) {
2103  case VSYNC_VSCFR:
2104  if (fps->frame_number == 0 && delta0 >= 0.5) {
2105  av_log(ost, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2106  delta = duration;
2107  delta0 = 0;
2108  ofp->next_pts = llrint(sync_ipts);
2109  }
2110  case VSYNC_CFR:
2111  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2112  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2113  *nb_frames = 0;
2114  } else if (delta < -1.1)
2115  *nb_frames = 0;
2116  else if (delta > 1.1) {
2117  *nb_frames = llrintf(delta);
2118  if (delta0 > 1.1)
2119  *nb_frames_prev = llrintf(delta0 - 0.6);
2120  }
2121  frame->duration = 1;
2122  break;
2123  case VSYNC_VFR:
2124  if (delta <= -0.6)
2125  *nb_frames = 0;
2126  else if (delta > 0.6)
2127  ofp->next_pts = llrint(sync_ipts);
2128  frame->duration = llrint(duration);
2129  break;
2130 #if FFMPEG_OPT_VSYNC_DROP
2131  case VSYNC_DROP:
2132 #endif
2133  case VSYNC_PASSTHROUGH:
2134  ofp->next_pts = llrint(sync_ipts);
2135  frame->duration = llrint(duration);
2136  break;
2137  default:
2138  av_assert0(0);
2139  }
2140 
2141 finish:
2142  memmove(fps->frames_prev_hist + 1,
2143  fps->frames_prev_hist,
2144  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2145  fps->frames_prev_hist[0] = *nb_frames_prev;
2146 
2147  if (*nb_frames_prev == 0 && fps->last_dropped) {
2148  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2150  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2151  fps->frame_number, fps->last_frame->pts);
2152  }
2153  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2154  uint64_t nb_frames_dup;
2155  if (*nb_frames > dts_error_threshold * 30) {
2156  av_log(ost, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2157  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2158  *nb_frames = 0;
2159  return;
2160  }
2161  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2162  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2163  av_log(ost, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2164  if (nb_frames_dup > fps->dup_warning) {
2165  av_log(ost, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2166  fps->dup_warning *= 10;
2167  }
2168  }
2169 
2170  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2171  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2172 }
2173 
2175 {
2177  int ret;
2178 
2179  // we are finished and no frames were ever seen at this output,
2180  // at least initialize the encoder with a dummy frame
2181  if (!fgt->got_frame) {
2182  AVFrame *frame = fgt->frame;
2183  FrameData *fd;
2184 
2185  frame->time_base = ofp->tb_out;
2186  frame->format = ofp->format;
2187 
2188  frame->width = ofp->width;
2189  frame->height = ofp->height;
2190  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2191 
2192  frame->sample_rate = ofp->sample_rate;
2193  if (ofp->ch_layout.nb_channels) {
2194  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2195  if (ret < 0)
2196  return ret;
2197  }
2198 
2199  fd = frame_data(frame);
2200  if (!fd)
2201  return AVERROR(ENOMEM);
2202 
2203  fd->frame_rate_filter = ofp->fps.framerate;
2204 
2205  av_assert0(!frame->buf[0]);
2206 
2208  "No filtered frames for output stream, trying to "
2209  "initialize anyway.\n");
2210 
2211  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame);
2212  if (ret < 0) {
2214  return ret;
2215  }
2216  }
2217 
2218  fgt->eof_out[ofp->index] = 1;
2219 
2220  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, NULL);
2221  return (ret == AVERROR_EOF) ? 0 : ret;
2222 }
2223 
2225  AVFrame *frame)
2226 {
2228  AVFrame *frame_prev = ofp->fps.last_frame;
2229  enum AVMediaType type = ofp->ofilter.type;
2230 
2231  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2232 
2233  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2234  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2235 
2236  for (int64_t i = 0; i < nb_frames; i++) {
2237  AVFrame *frame_out;
2238  int ret;
2239 
2240  if (type == AVMEDIA_TYPE_VIDEO) {
2241  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2242  frame_prev : frame;
2243  if (!frame_in)
2244  break;
2245 
2246  frame_out = fgp->frame_enc;
2247  ret = av_frame_ref(frame_out, frame_in);
2248  if (ret < 0)
2249  return ret;
2250 
2251  frame_out->pts = ofp->next_pts;
2252 
2253  if (ofp->fps.dropped_keyframe) {
2254  frame_out->flags |= AV_FRAME_FLAG_KEY;
2255  ofp->fps.dropped_keyframe = 0;
2256  }
2257  } else {
2258  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2259  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2261 
2262  frame->time_base = ofp->tb_out;
2263  frame->duration = av_rescale_q(frame->nb_samples,
2264  (AVRational){ 1, frame->sample_rate },
2265  ofp->tb_out);
2266 
2267  ofp->next_pts = frame->pts + frame->duration;
2268 
2269  frame_out = frame;
2270  }
2271 
2272  // send the frame to consumers
2273  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame_out);
2274  if (ret < 0) {
2275  av_frame_unref(frame_out);
2276 
2277  if (!fgt->eof_out[ofp->index]) {
2278  fgt->eof_out[ofp->index] = 1;
2279  fgp->nb_outputs_done++;
2280  }
2281 
2282  return ret == AVERROR_EOF ? 0 : ret;
2283  }
2284 
2285  if (type == AVMEDIA_TYPE_VIDEO) {
2286  ofp->fps.frame_number++;
2287  ofp->next_pts++;
2288 
2289  if (i == nb_frames_prev && frame)
2290  frame->flags &= ~AV_FRAME_FLAG_KEY;
2291  }
2292 
2293  fgt->got_frame = 1;
2294  }
2295 
2296  if (frame && frame_prev) {
2297  av_frame_unref(frame_prev);
2298  av_frame_move_ref(frame_prev, frame);
2299  }
2300 
2301  if (!frame)
2302  return close_output(ofp, fgt);
2303 
2304  return 0;
2305 }
2306 
2308  AVFrame *frame)
2309 {
2311  OutputStream *ost = ofp->ofilter.ost;
2312  AVFilterContext *filter = ofp->filter;
2313  FrameData *fd;
2314  int ret;
2315 
2318  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->index]) {
2319  ret = fg_output_frame(ofp, fgt, NULL);
2320  return (ret < 0) ? ret : 1;
2321  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2322  return 1;
2323  } else if (ret < 0) {
2324  av_log(fgp, AV_LOG_WARNING,
2325  "Error in retrieving a frame from the filtergraph: %s\n",
2326  av_err2str(ret));
2327  return ret;
2328  }
2329 
2330  if (fgt->eof_out[ofp->index]) {
2332  return 0;
2333  }
2334 
2336 
2337  if (debug_ts)
2338  av_log(fgp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2339  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2340  frame->time_base.num, frame->time_base.den);
2341 
2342  // Choose the output timebase the first time we get a frame.
2343  if (!ofp->tb_out_locked) {
2344  ret = choose_out_timebase(ofp, frame);
2345  if (ret < 0) {
2346  av_log(ost, AV_LOG_ERROR, "Could not choose an output time base\n");
2348  return ret;
2349  }
2350  }
2351 
2352  fd = frame_data(frame);
2353  if (!fd) {
2355  return AVERROR(ENOMEM);
2356  }
2357 
2359 
2360  // only use bits_per_raw_sample passed through from the decoder
2361  // if the filtergraph did not touch the frame data
2362  if (!fgp->is_meta)
2363  fd->bits_per_raw_sample = 0;
2364 
2365  if (ost->type == AVMEDIA_TYPE_VIDEO) {
2366  if (!frame->duration) {
2368  if (fr.num > 0 && fr.den > 0)
2369  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2370  }
2371 
2372  fd->frame_rate_filter = ofp->fps.framerate;
2373  }
2374 
2375  ret = fg_output_frame(ofp, fgt, frame);
2377  if (ret < 0)
2378  return ret;
2379 
2380  return 0;
2381 }
2382 
2383 /* retrieve all frames available at filtergraph outputs
2384  * and send them to consumers */
2386  AVFrame *frame)
2387 {
2388  FilterGraphPriv *fgp = fgp_from_fg(fg);
2389  int did_step = 0;
2390 
2391  // graph not configured, just select the input to request
2392  if (!fgt->graph) {
2393  for (int i = 0; i < fg->nb_inputs; i++) {
2395  if (ifp->format < 0 && !fgt->eof_in[i]) {
2396  fgt->next_in = i;
2397  return 0;
2398  }
2399  }
2400 
2401  // This state - graph is not configured, but all inputs are either
2402  // initialized or EOF - should be unreachable because sending EOF to a
2403  // filter without even a fallback format should fail
2404  av_assert0(0);
2405  return AVERROR_BUG;
2406  }
2407 
2408  while (fgp->nb_outputs_done < fg->nb_outputs) {
2409  int ret;
2410 
2412  if (ret == AVERROR(EAGAIN)) {
2413  fgt->next_in = choose_input(fg, fgt);
2414  break;
2415  } else if (ret < 0) {
2416  if (ret == AVERROR_EOF)
2417  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2418  else
2419  av_log(fg, AV_LOG_ERROR,
2420  "Error requesting a frame from the filtergraph: %s\n",
2421  av_err2str(ret));
2422  return ret;
2423  }
2424  fgt->next_in = fg->nb_inputs;
2425 
2426  // return after one iteration, so that scheduler can rate-control us
2427  if (did_step && fgp->have_sources)
2428  return 0;
2429 
2430  /* Reap all buffers present in the buffer sinks */
2431  for (int i = 0; i < fg->nb_outputs; i++) {
2433 
2434  ret = 0;
2435  while (!ret) {
2436  ret = fg_output_step(ofp, fgt, frame);
2437  if (ret < 0)
2438  return ret;
2439  }
2440  }
2441  did_step = 1;
2442  }
2443 
2444  return (fgp->nb_outputs_done == fg->nb_outputs) ? AVERROR_EOF : 0;
2445 }
2446 
2447 static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
2448 {
2449  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2450  int64_t pts2;
2451 
2452  /* subtitles seem to be usually muxed ahead of other streams;
2453  if not, subtracting a larger time here is necessary */
2454  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2455 
2456  /* do not send the heartbeat frame if the subtitle is already ahead */
2457  if (pts2 <= ifp->sub2video.last_pts)
2458  return;
2459 
2460  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2461  /* if we have hit the end of the current displayed subpicture,
2462  or if we need to initialize the system, update the
2463  overlayed subpicture and its start/end times */
2464  sub2video_update(ifp, pts2 + 1, NULL);
2465  else
2466  sub2video_push_ref(ifp, pts2);
2467 }
2468 
2469 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2470 {
2471  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2472  int ret;
2473 
2474  if (buffer) {
2475  AVFrame *tmp;
2476 
2477  if (!frame)
2478  return 0;
2479 
2480  tmp = av_frame_alloc();
2481  if (!tmp)
2482  return AVERROR(ENOMEM);
2483 
2485 
2486  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2487  if (ret < 0) {
2488  av_frame_free(&tmp);
2489  return ret;
2490  }
2491 
2492  return 0;
2493  }
2494 
2495  // heartbeat frame
2496  if (frame && !frame->buf[0]) {
2497  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2498  return 0;
2499  }
2500 
2501  if (!frame) {
2502  if (ifp->sub2video.end_pts < INT64_MAX)
2503  sub2video_update(ifp, INT64_MAX, NULL);
2504 
2505  return av_buffersrc_add_frame(ifp->filter, NULL);
2506  }
2507 
2508  ifp->width = frame->width ? frame->width : ifp->width;
2509  ifp->height = frame->height ? frame->height : ifp->height;
2510 
2511  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2512 
2513  return 0;
2514 }
2515 
2516 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2517  int64_t pts, AVRational tb)
2518 {
2519  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2520  int ret;
2521 
2522  if (fgt->eof_in[ifp->index])
2523  return 0;
2524 
2525  fgt->eof_in[ifp->index] = 1;
2526 
2527  if (ifp->filter) {
2528  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2530 
2532  if (ret < 0)
2533  return ret;
2534  } else {
2535  if (ifp->format < 0) {
2536  // the filtergraph was never configured, use the fallback parameters
2537  ifp->format = ifp->opts.fallback->format;
2538  ifp->sample_rate = ifp->opts.fallback->sample_rate;
2539  ifp->width = ifp->opts.fallback->width;
2540  ifp->height = ifp->opts.fallback->height;
2542  ifp->color_space = ifp->opts.fallback->colorspace;
2543  ifp->color_range = ifp->opts.fallback->color_range;
2544  ifp->time_base = ifp->opts.fallback->time_base;
2545 
2547  &ifp->opts.fallback->ch_layout);
2548  if (ret < 0)
2549  return ret;
2550 
2551  if (ifilter_has_all_input_formats(ifilter->graph)) {
2552  ret = configure_filtergraph(ifilter->graph, fgt);
2553  if (ret < 0) {
2554  av_log(NULL, AV_LOG_ERROR, "Error initializing filters!\n");
2555  return ret;
2556  }
2557  }
2558  }
2559 
2560  if (ifp->format < 0) {
2562  "Cannot determine format of input %s after EOF\n",
2563  ifp->opts.name);
2564  return AVERROR_INVALIDDATA;
2565  }
2566  }
2567 
2568  return 0;
2569 }
2570 
2572  VIDEO_CHANGED = (1 << 0),
2573  AUDIO_CHANGED = (1 << 1),
2574  MATRIX_CHANGED = (1 << 2),
2575  HWACCEL_CHANGED = (1 << 3)
2576 };
2577 
2578 static const char *unknown_if_null(const char *str)
2579 {
2580  return str ? str : "unknown";
2581 }
2582 
2584  InputFilter *ifilter, AVFrame *frame)
2585 {
2586  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2587  FrameData *fd;
2588  AVFrameSideData *sd;
2589  int need_reinit = 0, ret;
2590 
2591  /* determine if the parameters for this input changed */
2592  switch (ifp->type) {
2593  case AVMEDIA_TYPE_AUDIO:
2594  if (ifp->format != frame->format ||
2595  ifp->sample_rate != frame->sample_rate ||
2596  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
2597  need_reinit |= AUDIO_CHANGED;
2598  break;
2599  case AVMEDIA_TYPE_VIDEO:
2600  if (ifp->format != frame->format ||
2601  ifp->width != frame->width ||
2602  ifp->height != frame->height ||
2603  ifp->color_space != frame->colorspace ||
2604  ifp->color_range != frame->color_range)
2605  need_reinit |= VIDEO_CHANGED;
2606  break;
2607  }
2608 
2610  if (!ifp->displaymatrix_present ||
2611  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2612  need_reinit |= MATRIX_CHANGED;
2613  } else if (ifp->displaymatrix_present)
2614  need_reinit |= MATRIX_CHANGED;
2615 
2616  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2617  need_reinit = 0;
2618 
2619  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2620  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2621  need_reinit |= HWACCEL_CHANGED;
2622 
2623  if (need_reinit) {
2625  if (ret < 0)
2626  return ret;
2627  }
2628 
2629  /* (re)init the graph if possible, otherwise buffer the frame and return */
2630  if (need_reinit || !fgt->graph) {
2631  AVFrame *tmp = av_frame_alloc();
2632 
2633  if (!tmp)
2634  return AVERROR(ENOMEM);
2635 
2636  if (!ifilter_has_all_input_formats(fg)) {
2638 
2639  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2640  if (ret < 0)
2641  av_frame_free(&tmp);
2642 
2643  return ret;
2644  }
2645 
2646  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2647  av_frame_free(&tmp);
2648  if (ret < 0)
2649  return ret;
2650 
2651  if (fgt->graph) {
2652  AVBPrint reason;
2654  if (need_reinit & AUDIO_CHANGED) {
2655  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2656  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2657  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
2658  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2659  }
2660  if (need_reinit & VIDEO_CHANGED) {
2661  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2662  const char *color_space_name = av_color_space_name(frame->colorspace);
2663  const char *color_range_name = av_color_range_name(frame->color_range);
2664  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, ",
2665  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2666  unknown_if_null(color_space_name), frame->width, frame->height);
2667  }
2668  if (need_reinit & MATRIX_CHANGED)
2669  av_bprintf(&reason, "display matrix changed, ");
2670  if (need_reinit & HWACCEL_CHANGED)
2671  av_bprintf(&reason, "hwaccel changed, ");
2672  if (reason.len > 1)
2673  reason.str[reason.len - 2] = '\0'; // remove last comma
2674  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2675  }
2676 
2677  ret = configure_filtergraph(fg, fgt);
2678  if (ret < 0) {
2679  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2680  return ret;
2681  }
2682  }
2683 
2684  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
2685  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
2686  frame->time_base = ifp->time_base;
2687 
2688  if (ifp->displaymatrix_applied)
2690 
2691  fd = frame_data(frame);
2692  if (!fd)
2693  return AVERROR(ENOMEM);
2695 
2698  if (ret < 0) {
2700  if (ret != AVERROR_EOF)
2701  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2702  return ret;
2703  }
2704 
2705  return 0;
2706 }
2707 
2708 static void fg_thread_set_name(const FilterGraph *fg)
2709 {
2710  char name[16];
2711  if (filtergraph_is_simple(fg)) {
2712  OutputStream *ost = fg->outputs[0]->ost;
2713  snprintf(name, sizeof(name), "%cf#%d:%d",
2714  av_get_media_type_string(ost->type)[0],
2715  ost->file->index, ost->index);
2716  } else {
2717  snprintf(name, sizeof(name), "fc%d", fg->index);
2718  }
2719 
2721 }
2722 
2724 {
2725  if (fgt->frame_queue_out) {
2726  AVFrame *frame;
2727  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
2728  av_frame_free(&frame);
2730  }
2731 
2732  av_frame_free(&fgt->frame);
2733  av_freep(&fgt->eof_in);
2734  av_freep(&fgt->eof_out);
2735 
2736  avfilter_graph_free(&fgt->graph);
2737 
2738  memset(fgt, 0, sizeof(*fgt));
2739 }
2740 
2741 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
2742 {
2743  memset(fgt, 0, sizeof(*fgt));
2744 
2745  fgt->frame = av_frame_alloc();
2746  if (!fgt->frame)
2747  goto fail;
2748 
2749  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
2750  if (!fgt->eof_in)
2751  goto fail;
2752 
2753  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
2754  if (!fgt->eof_out)
2755  goto fail;
2756 
2758  if (!fgt->frame_queue_out)
2759  goto fail;
2760 
2761  return 0;
2762 
2763 fail:
2764  fg_thread_uninit(fgt);
2765  return AVERROR(ENOMEM);
2766 }
2767 
2768 static int filter_thread(void *arg)
2769 {
2770  FilterGraphPriv *fgp = arg;
2771  FilterGraph *fg = &fgp->fg;
2772 
2773  FilterGraphThread fgt;
2774  int ret = 0, input_status = 0;
2775 
2776  ret = fg_thread_init(&fgt, fg);
2777  if (ret < 0)
2778  goto finish;
2779 
2780  fg_thread_set_name(fg);
2781 
2782  // if we have all input parameters the graph can now be configured
2784  ret = configure_filtergraph(fg, &fgt);
2785  if (ret < 0) {
2786  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
2787  av_err2str(ret));
2788  goto finish;
2789  }
2790  }
2791 
2792  while (1) {
2793  InputFilter *ifilter;
2794  InputFilterPriv *ifp;
2795  enum FrameOpaque o;
2796  unsigned input_idx = fgt.next_in;
2797 
2798  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
2799  &input_idx, fgt.frame);
2800  if (input_status == AVERROR_EOF) {
2801  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
2802  break;
2803  } else if (input_status == AVERROR(EAGAIN)) {
2804  // should only happen when we didn't request any input
2805  av_assert0(input_idx == fg->nb_inputs);
2806  goto read_frames;
2807  }
2808  av_assert0(input_status >= 0);
2809 
2810  o = (intptr_t)fgt.frame->opaque;
2811 
2812  o = (intptr_t)fgt.frame->opaque;
2813 
2814  // message on the control stream
2815  if (input_idx == fg->nb_inputs) {
2816  FilterCommand *fc;
2817 
2818  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
2819 
2820  fc = (FilterCommand*)fgt.frame->buf[0]->data;
2821  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
2822  fc->all_filters);
2823  av_frame_unref(fgt.frame);
2824  continue;
2825  }
2826 
2827  // we received an input frame or EOF
2828  ifilter = fg->inputs[input_idx];
2829  ifp = ifp_from_ifilter(ifilter);
2830 
2831  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2832  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
2833  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
2834  !fgt.graph);
2835  } else if (fgt.frame->buf[0]) {
2836  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
2837  } else {
2839  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
2840  }
2841  av_frame_unref(fgt.frame);
2842  if (ret == AVERROR_EOF) {
2843  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
2844  input_idx);
2845  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
2846  continue;
2847  }
2848  if (ret < 0)
2849  goto finish;
2850 
2851 read_frames:
2852  // retrieve all newly avalable frames
2853  ret = read_frames(fg, &fgt, fgt.frame);
2854  if (ret == AVERROR_EOF) {
2855  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
2856  break;
2857  } else if (ret < 0) {
2858  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
2859  av_err2str(ret));
2860  goto finish;
2861  }
2862  }
2863 
2864  for (unsigned i = 0; i < fg->nb_outputs; i++) {
2866 
2867  if (fgt.eof_out[i] || !fgt.graph)
2868  continue;
2869 
2870  ret = fg_output_frame(ofp, &fgt, NULL);
2871  if (ret < 0)
2872  goto finish;
2873  }
2874 
2875 finish:
2876  // EOF is normal termination
2877  if (ret == AVERROR_EOF)
2878  ret = 0;
2879 
2880  fg_thread_uninit(&fgt);
2881 
2882  return ret;
2883 }
2884 
2885 void fg_send_command(FilterGraph *fg, double time, const char *target,
2886  const char *command, const char *arg, int all_filters)
2887 {
2888  FilterGraphPriv *fgp = fgp_from_fg(fg);
2889  AVBufferRef *buf;
2890  FilterCommand *fc;
2891 
2892  fc = av_mallocz(sizeof(*fc));
2893  if (!fc)
2894  return;
2895 
2896  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
2897  if (!buf) {
2898  av_freep(&fc);
2899  return;
2900  }
2901 
2902  fc->target = av_strdup(target);
2903  fc->command = av_strdup(command);
2904  fc->arg = av_strdup(arg);
2905  if (!fc->target || !fc->command || !fc->arg) {
2906  av_buffer_unref(&buf);
2907  return;
2908  }
2909 
2910  fc->time = time;
2911  fc->all_filters = all_filters;
2912 
2913  fgp->frame->buf[0] = buf;
2914  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
2915 
2916  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
2917 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:522
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:122
AVSubtitle
Definition: avcodec.h:2227
formats
formats
Definition: signature.h:48
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, OutputStream *ost)
Definition: ffmpeg_filter.c:726
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1650
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:94
AVCodec
AVCodec.
Definition: codec.h:187
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:693
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:654
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:198
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:615
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:119
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:273
av_clip
#define av_clip
Definition: common.h:98
InputFilterPriv::type
enum AVMediaType type
Definition: ffmpeg_filter.c:120
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2437
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:69
OutputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:190
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:126
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2232
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1908
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1110
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:64
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:68
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:96
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:839
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:137
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:972
FrameData
Definition: ffmpeg.h:593
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:1883
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:152
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:288
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:858
InputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:110
init_simple_filtergraph
int init_simple_filtergraph(InputStream *ist, OutputStream *ost, char *graph_desc, Scheduler *sch, unsigned sched_idx_enc)
Definition: ffmpeg_filter.c:1081
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:290
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:120
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:39
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1842
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:2575
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:68
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:139
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:228
AVSubtitleRect
Definition: avcodec.h:2200
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2231
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:893
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:172
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
OutputFile::start_time
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:586
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:523
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:665
InputFile::index
int index
Definition: ffmpeg.h:392
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:967
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:487
AVFrame::width
int width
Definition: frame.h:447
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:49
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:56
AVOption
AVOption.
Definition: opt.h:346
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2224
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:174
FilterGraph::index
int index
Definition: ffmpeg.h:288
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:136
data
const char data[16]
Definition: mxf.c:148
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:176
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:215
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1660
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:464
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:291
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:2572
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:647
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:220
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:308
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:239
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:588
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:322
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
ost
static AVStream * ost
Definition: vaapi_transcode.c:42
sample_rate
sample_rate
Definition: ffmpeg_filter.c:410
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2307
FilterGraphPriv
Definition: ffmpeg_filter.c:45
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:590
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:99
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:116
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1695
choose_pix_fmts
static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint, const char **dst)
Definition: ffmpeg_filter.c:355
OutputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:583
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
InputStream
Definition: ffmpeg.h:347
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:82
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:76
fg_finalise_bindings
int fg_finalise_bindings(FilterGraph *fg)
Definition: ffmpeg_filter.c:1206
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:245
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:261
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:137
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:164
finish
static void finish(void)
Definition: movenc.c:342
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
offset must point to a pointer immediately followed by an int for the length
Definition: opt.h:241
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3338
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:88
OutputFilterPriv
Definition: ffmpeg_filter.c:185
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2723
fail
#define fail()
Definition: checkasm.h:179
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:298
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
samplefmt.h
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:245
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:604
val
static double val(void *priv, double ch)
Definition: aeval.c:78
OutputFilterPriv::index
int index
Definition: ffmpeg_filter.c:188
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:776
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:116
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1506
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
pts
static int64_t pts
Definition: transcode_aac.c:643
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:740
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1676
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:86
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:106
FrameData::tb
AVRational tb
Definition: ffmpeg.h:603
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:72
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:195
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:177
AVRational::num
int num
Numerator.
Definition: rational.h:59
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:102
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:246
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:862
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2447
check_stream_specifier
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
Definition: cmdutils.c:982
OutputFile::shortest
int shortest
Definition: ffmpeg.h:588
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
avassert.h
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:606
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2516
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:103
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:1122
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:626
duration
int64_t duration
Definition: movenc.c:64
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:865
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:1143
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:234
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:70
llrintf
#define llrintf(x)
Definition: libm.h:399
s
#define s(width, name)
Definition: cbs_vp9.c:198
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:66
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:113
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:292
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:628
AVDictionaryEntry::key
char * key
Definition: dict.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
configure_output_video_filter
static int configure_output_video_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1305
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:112
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:278
InputFilter
Definition: ffmpeg.h:266
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:59
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
OutputFilter::ost
struct OutputStream * ost
Definition: ffmpeg.h:272
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, InputFilterOptions *opts)
Definition: ffmpeg_demux.c:983
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:48
nb_streams
static int nb_streams
Definition: ffprobe.c:383
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2233
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:2741
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:248
graph_opts_apply
static int graph_opts_apply(AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:536
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:267
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:217
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:944
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1185
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:210
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:194
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3278
AVFormatContext
Format I/O context.
Definition: avformat.h:1255
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:629
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:766
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:274
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1243
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:213
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:881
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:133
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:806
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:196
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:1075
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:174
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:355
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts)
Definition: ffmpeg_dec.c:1373
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:159
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:145
Decoder
Definition: ffmpeg.h:333
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:1086
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:1144
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:815
InputFilterPriv::linklabel
uint8_t * linklabel
Definition: ffmpeg_filter.c:117
filter_opt_apply
static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:481
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:205
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2174
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:91
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
Definition: ffmpeg_filter.c:645
mathops.h
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1385
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:69
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:610
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1373
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:1167
AVFilterGraph
Definition: avfilter.h:813
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
InputFilterOptions
Definition: ffmpeg.h:244
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:132
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:649
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:293
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:209
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:791
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:363
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:125
OutputFile::streams
OutputStream ** streams
Definition: ffmpeg.h:582
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:263
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:46
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:186
FilterGraph
Definition: ffmpeg.h:286
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:1156
file_read
char * file_read(const char *filename)
Definition: cmdutils.c:1128
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:77
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:257
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:964
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:278
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:818
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:1877
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1952
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:240
f
f
Definition: af_crystalizer.c:121
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
configure_output_filter
static int configure_output_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1480
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:2768
AVMediaType
AVMediaType
Definition: avutil.h:199
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:143
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:90
FilterGraphThread
Definition: ffmpeg_filter.c:83
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:384
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:147
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:303
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:84
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:105
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:961
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:574
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:203
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:148
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:329
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:138
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2009
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:134
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:2574
FilterCommand::time
double time
Definition: ffmpeg_filter.c:230
insert_trim
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1232
InputFilterPriv::initialize
unsigned int initialize
Definition: ffmpeg_filter.c:156
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.c:146
graph_parse
static int graph_parse(AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:560
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1291
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:502
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
read_binary
static int read_binary(const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:433
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:462
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:57
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:473
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2230
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:100
FilterGraphPriv::graph_desc
const char * graph_desc
Definition: ffmpeg_filter.c:61
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1100
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:924
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:131
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:451
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1669
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:2573
SCH_DEC
#define SCH_DEC(decoder)
Definition: ffmpeg_sched.h:113
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2372
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:2578
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:258
FF_COMPLIANCE_UNOFFICIAL
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: defs.h:61
decoders
Decoder ** decoders
Definition: ffmpeg.c:134
nb_decoders
int nb_decoders
Definition: ffmpeg.c:135
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:280
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2385
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:800
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2183
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
sample_rates
sample_rates
Definition: ffmpeg_filter.c:410
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:2583
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:944
buffersink.h
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:830
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:231
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:182
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:168
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:118
FPSConvContext
Definition: ffmpeg_filter.c:165
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
InputFilterPriv::index
int index
Definition: ffmpeg_filter.c:108
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:608
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2885
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:51
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:263
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:193
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:68
src2
const pixel * src2
Definition: h264pred_template.c:422
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1609
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:609
display.h
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:180
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
InputFilterPriv::sub2video
struct InputFilterPriv::@6 sub2video
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:89
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:394
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:633
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:77
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:606
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:125
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
len
int len
Definition: vorbis_enc_data.h:426
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
ofilter_bind_ost
int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost, unsigned sched_idx_enc)
Definition: ffmpeg_filter.c:766
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:131
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:327
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:960
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:59
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2474
AVFilter
Filter definition.
Definition: avfilter.h:166
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2051
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:160
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:958
mid_pred
#define mid_pred
Definition: mathops.h:98
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:97
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:743
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:268
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:72
av_opt_eval_int
int av_opt_eval_int(void *obj, const AVOption *o, const char *val, int *int_out)
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:774
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:166
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:408
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1283
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:1127
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:216
choose_channel_layouts
static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
Definition: ffmpeg_filter.c:413
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:2571
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:362
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:482
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:977
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:87
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:447
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:749
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:384
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:616
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:126
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:351
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:1142
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:141
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:131
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:123
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:432
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:54
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:241
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:133
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:611
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:1932
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:211
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:267
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:1168
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:287
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:439
OutputFilter
Definition: ffmpeg.h:271
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2469
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:104
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
desc
const char * desc
Definition: libsvtav1.c:75
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:313
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:490
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:283
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:85
llrint
#define llrint(x)
Definition: libm.h:394
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
OutputStream::is_cfr
int is_cfr
Definition: ffmpeg.h:527
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
OutputStream::autoscale
int autoscale
Definition: ffmpeg.h:532
InputStream::index
int index
Definition: ffmpeg.h:353
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2416
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:78
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:226
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:951
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:250
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:336
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:129
InputFilterPriv::end_pts
int64_t end_pts
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:153
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:132
d
d
Definition: ffmpeg_filter.c:410
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:313
imgutils.h
timestamp.h
OutputStream
Definition: mux.c:53
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:615
OutputStream::st
AVStream * st
Definition: mux.c:54
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:193
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1261
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:106
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:63
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec)
Definition: ffmpeg_filter.c:698
h
h
Definition: vp9dsp_template.c:2038
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:300
AVDictionaryEntry::value
char * value
Definition: dict.h:91
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:816
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:410
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:648
OutputFile::recording_time
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:585
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:479
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:200
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:956
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:67
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:194
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:119
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:179
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2708
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:155
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1495
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2229
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:227
FilterCommand
Definition: ffmpeg_filter.c:225
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:131
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2882
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:282
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:83
OutputFile
Definition: ffmpeg.h:574
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:929
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:255
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:103
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:181