FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 #include "graph/graphprint.h"
25 
26 #include "libavfilter/avfilter.h"
27 #include "libavfilter/buffersink.h"
28 #include "libavfilter/buffersrc.h"
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
34 #include "libavutil/downmix_info.h"
35 #include "libavutil/mem.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/pixfmt.h"
39 #include "libavutil/samplefmt.h"
40 #include "libavutil/time.h"
41 #include "libavutil/timestamp.h"
42 
43 // FIXME private header, used for mid_pred()
44 #include "libavcodec/mathops.h"
45 
46 typedef struct FilterGraphPriv {
48 
49  // name used for logging
50  char log_name[32];
51 
52  int is_simple;
53  // true when the filtergraph contains only meta filters
54  // that do not modify the frame data
55  int is_meta;
56  // source filters are present in the graph
59 
60  unsigned nb_outputs_done;
61 
63 
64  // frame for temporarily holding output from the filtergraph
66  // frame for sending output to the encoder
68 
70  unsigned sch_idx;
72 
74 {
75  return (FilterGraphPriv*)fg;
76 }
77 
78 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
79 {
80  return (const FilterGraphPriv*)fg;
81 }
82 
83 // data that is local to the filter thread and not visible outside of it
84 typedef struct FilterGraphThread {
86 
88 
89  // Temporary buffer for output frames, since on filtergraph reset
90  // we cannot send them to encoders immediately.
91  // The output index is stored in frame opaque.
93 
94  // index of the next input to request from the scheduler
95  unsigned next_in;
96  // set to 1 after at least one frame passed through this output
97  int got_frame;
98 
99  // EOF status of each input/output, as received by the thread
100  uint8_t *eof_in;
101  uint8_t *eof_out;
103 
104 typedef struct InputFilterPriv {
106 
108 
109  // used to hold submitted input
111 
112  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
113  // same as type otherwise
115 
116  int eof;
117  int bound;
119  uint64_t nb_dropped;
120 
121  // parameters configured for this input
122  int format;
123 
124  int width, height;
129 
132 
134 
137 
139 
141 
145 
148 
149  struct {
150  AVFrame *frame;
151 
154 
155  /// marks if sub2video_update should force an initialization
156  unsigned int initialize;
157  } sub2video;
159 
161 {
162  return (InputFilterPriv*)ifilter;
163 }
164 
165 typedef struct FPSConvContext {
167  /* number of frames emitted by the video-encoding sync code */
169  /* history of nb_frames_prev, i.e. the number of times the
170  * previous frame was duplicated by vsync code in recent
171  * do_video_out() calls */
173 
174  uint64_t dup_warning;
175 
178 
180 
186 
187 typedef struct OutputFilterPriv {
189 
190  void *log_parent;
191  char log_name[32];
192 
193  /* desired output stream properties */
194  int format;
195  int width, height;
201 
204 
205  // time base in which the output is sent to our downstream
206  // does not need to match the filtersink's timebase
208  // at least one frame with the above timebase was sent
209  // to our downstream, so it cannot change anymore
211 
213 
216 
217  // those are only set if no format is specified and the encoder gives us multiple options
218  // They point directly to the relevant lists of the encoder.
219  const int *formats;
221  const int *sample_rates;
225 
229  // offset for output timestamps, in AV_TIME_BASE_Q
233 
234  unsigned flags;
236 
238 {
239  return (OutputFilterPriv*)ofilter;
240 }
241 
242 typedef struct FilterCommand {
243  char *target;
244  char *command;
245  char *arg;
246 
247  double time;
249 } FilterCommand;
250 
251 static void filter_command_free(void *opaque, uint8_t *data)
252 {
254 
255  av_freep(&fc->target);
256  av_freep(&fc->command);
257  av_freep(&fc->arg);
258 
259  av_free(data);
260 }
261 
263 {
264  AVFrame *frame = ifp->sub2video.frame;
265  int ret;
266 
268 
269  frame->width = ifp->width;
270  frame->height = ifp->height;
271  frame->format = ifp->format;
272  frame->colorspace = ifp->color_space;
273  frame->color_range = ifp->color_range;
274  frame->alpha_mode = ifp->alpha_mode;
275 
277  if (ret < 0)
278  return ret;
279 
280  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
281 
282  return 0;
283 }
284 
285 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
286  AVSubtitleRect *r)
287 {
288  uint32_t *pal, *dst2;
289  uint8_t *src, *src2;
290  int x, y;
291 
292  if (r->type != SUBTITLE_BITMAP) {
293  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
294  return;
295  }
296  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
297  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
298  r->x, r->y, r->w, r->h, w, h
299  );
300  return;
301  }
302 
303  dst += r->y * dst_linesize + r->x * 4;
304  src = r->data[0];
305  pal = (uint32_t *)r->data[1];
306  for (y = 0; y < r->h; y++) {
307  dst2 = (uint32_t *)dst;
308  src2 = src;
309  for (x = 0; x < r->w; x++)
310  *(dst2++) = pal[*(src2++)];
311  dst += dst_linesize;
312  src += r->linesize[0];
313  }
314 }
315 
317 {
318  AVFrame *frame = ifp->sub2video.frame;
319  int ret;
320 
321  av_assert1(frame->data[0]);
322  ifp->sub2video.last_pts = frame->pts = pts;
326  if (ret != AVERROR_EOF && ret < 0)
328  "Error while add the frame to buffer source(%s).\n",
329  av_err2str(ret));
330 }
331 
332 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
333  const AVSubtitle *sub)
334 {
335  AVFrame *frame = ifp->sub2video.frame;
336  int8_t *dst;
337  int dst_linesize;
338  int num_rects;
339  int64_t pts, end_pts;
340 
341  if (sub) {
342  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
343  AV_TIME_BASE_Q, ifp->time_base);
344  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
345  AV_TIME_BASE_Q, ifp->time_base);
346  num_rects = sub->num_rects;
347  } else {
348  /* If we are initializing the system, utilize current heartbeat
349  PTS as the start time, and show until the following subpicture
350  is received. Otherwise, utilize the previous subpicture's end time
351  as the fall-back value. */
352  pts = ifp->sub2video.initialize ?
353  heartbeat_pts : ifp->sub2video.end_pts;
354  end_pts = INT64_MAX;
355  num_rects = 0;
356  }
357  if (sub2video_get_blank_frame(ifp) < 0) {
359  "Impossible to get a blank canvas.\n");
360  return;
361  }
362  dst = frame->data [0];
363  dst_linesize = frame->linesize[0];
364  for (int i = 0; i < num_rects; i++)
365  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
366  sub2video_push_ref(ifp, pts);
367  ifp->sub2video.end_pts = end_pts;
368  ifp->sub2video.initialize = 0;
369 }
370 
371 /* Define a function for appending a list of allowed formats
372  * to an AVBPrint. If nonempty, the list will have a header. */
373 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
374 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
375 { \
376  if (ofp->var == none && !ofp->supported_list) \
377  return; \
378  av_bprintf(bprint, #name "="); \
379  if (ofp->var != none) { \
380  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
381  } else { \
382  const type *p; \
383  \
384  for (p = ofp->supported_list; *p != none; p++) { \
385  av_bprintf(bprint, printf_format "|", get_name(*p)); \
386  } \
387  if (bprint->len > 0) \
388  bprint->str[--bprint->len] = '\0'; \
389  } \
390  av_bprint_chars(bprint, ':', 1); \
391 }
392 
395 
398 
400  "%d", )
401 
402 DEF_CHOOSE_FORMAT(color_spaces, enum AVColorSpace, color_space, color_spaces,
404 
405 DEF_CHOOSE_FORMAT(color_ranges, enum AVColorRange, color_range, color_ranges,
407 
408 DEF_CHOOSE_FORMAT(alpha_modes, enum AVAlphaMode, alpha_mode, alpha_modes,
410 
411 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
412 {
413  if (av_channel_layout_check(&ofp->ch_layout)) {
414  av_bprintf(bprint, "channel_layouts=");
415  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
416  } else if (ofp->ch_layouts) {
417  const AVChannelLayout *p;
418 
419  av_bprintf(bprint, "channel_layouts=");
420  for (p = ofp->ch_layouts; p->nb_channels; p++) {
422  av_bprintf(bprint, "|");
423  }
424  if (bprint->len > 0)
425  bprint->str[--bprint->len] = '\0';
426  } else
427  return;
428  av_bprint_chars(bprint, ':', 1);
429 }
430 
431 static int read_binary(void *logctx, const char *path,
432  uint8_t **data, int *len)
433 {
434  AVIOContext *io = NULL;
435  int64_t fsize;
436  int ret;
437 
438  *data = NULL;
439  *len = 0;
440 
441  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
442  if (ret < 0) {
443  av_log(logctx, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
444  path, av_err2str(ret));
445  return ret;
446  }
447 
448  fsize = avio_size(io);
449  if (fsize < 0 || fsize > INT_MAX) {
450  av_log(logctx, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
451  ret = AVERROR(EIO);
452  goto fail;
453  }
454 
455  *data = av_malloc(fsize);
456  if (!*data) {
457  ret = AVERROR(ENOMEM);
458  goto fail;
459  }
460 
461  ret = avio_read(io, *data, fsize);
462  if (ret != fsize) {
463  av_log(logctx, AV_LOG_ERROR, "Error reading file %s\n", path);
464  ret = ret < 0 ? ret : AVERROR(EIO);
465  goto fail;
466  }
467 
468  *len = fsize;
469 
470  ret = 0;
471 fail:
472  avio_close(io);
473  if (ret < 0) {
474  av_freep(data);
475  *len = 0;
476  }
477  return ret;
478 }
479 
480 static int filter_opt_apply(void *logctx, AVFilterContext *f,
481  const char *key, const char *val)
482 {
483  const AVOption *o = NULL;
484  int ret;
485 
487  if (ret >= 0)
488  return 0;
489 
490  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
492  if (!o)
493  goto err_apply;
494 
495  // key is a valid option name prefixed with '/'
496  // interpret value as a path from which to load the actual option value
497  key++;
498 
499  if (o->type == AV_OPT_TYPE_BINARY) {
500  uint8_t *data;
501  int len;
502 
503  ret = read_binary(logctx, val, &data, &len);
504  if (ret < 0)
505  goto err_load;
506 
508  av_freep(&data);
509  } else {
510  char *data = file_read(val);
511  if (!data) {
512  ret = AVERROR(EIO);
513  goto err_load;
514  }
515 
517  av_freep(&data);
518  }
519  if (ret < 0)
520  goto err_apply;
521 
522  return 0;
523 
524 err_apply:
525  av_log(logctx, AV_LOG_ERROR,
526  "Error applying option '%s' to filter '%s': %s\n",
527  key, f->filter->name, av_err2str(ret));
528  return ret;
529 err_load:
530  av_log(logctx, AV_LOG_ERROR,
531  "Error loading value for option '%s' from file '%s'\n",
532  key, val);
533  return ret;
534 }
535 
536 static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
537 {
538  for (size_t i = 0; i < seg->nb_chains; i++) {
539  AVFilterChain *ch = seg->chains[i];
540 
541  for (size_t j = 0; j < ch->nb_filters; j++) {
542  AVFilterParams *p = ch->filters[j];
543  const AVDictionaryEntry *e = NULL;
544 
545  av_assert0(p->filter);
546 
547  while ((e = av_dict_iterate(p->opts, e))) {
548  int ret = filter_opt_apply(logctx, p->filter, e->key, e->value);
549  if (ret < 0)
550  return ret;
551  }
552 
553  av_dict_free(&p->opts);
554  }
555  }
556 
557  return 0;
558 }
559 
560 static int graph_parse(void *logctx,
561  AVFilterGraph *graph, const char *desc,
563  AVBufferRef *hw_device)
564 {
566  int ret;
567 
568  *inputs = NULL;
569  *outputs = NULL;
570 
571  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
572  if (ret < 0)
573  return ret;
574 
576  if (ret < 0)
577  goto fail;
578 
579  if (hw_device) {
580  for (int i = 0; i < graph->nb_filters; i++) {
581  AVFilterContext *f = graph->filters[i];
582 
583  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
584  continue;
585  f->hw_device_ctx = av_buffer_ref(hw_device);
586  if (!f->hw_device_ctx) {
587  ret = AVERROR(ENOMEM);
588  goto fail;
589  }
590  }
591  }
592 
593  ret = graph_opts_apply(logctx, seg);
594  if (ret < 0)
595  goto fail;
596 
598 
599 fail:
601  return ret;
602 }
603 
604 // Filters can be configured only if the formats of all inputs are known.
606 {
607  for (int i = 0; i < fg->nb_inputs; i++) {
609  if (ifp->format < 0)
610  return 0;
611  }
612  return 1;
613 }
614 
615 static int filter_thread(void *arg);
616 
617 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
618 {
619  AVFilterContext *ctx = inout->filter_ctx;
620  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
621  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
622 
623  if (nb_pads > 1)
624  return av_strdup(ctx->filter->name);
625  return av_asprintf("%s:%s", ctx->filter->name,
626  avfilter_pad_get_name(pads, inout->pad_idx));
627 }
628 
629 static const char *ofilter_item_name(void *obj)
630 {
631  OutputFilterPriv *ofp = obj;
632  return ofp->log_name;
633 }
634 
635 static const AVClass ofilter_class = {
636  .class_name = "OutputFilter",
637  .version = LIBAVUTIL_VERSION_INT,
638  .item_name = ofilter_item_name,
639  .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
640  .category = AV_CLASS_CATEGORY_FILTER,
641 };
642 
644 {
645  OutputFilterPriv *ofp;
646  OutputFilter *ofilter;
647 
648  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
649  if (!ofp)
650  return NULL;
651 
652  ofilter = &ofp->ofilter;
653  ofilter->class = &ofilter_class;
654  ofp->log_parent = fg;
655  ofilter->graph = fg;
656  ofilter->type = type;
657  ofp->format = -1;
661  ofilter->index = fg->nb_outputs - 1;
662 
663  snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
664  av_get_media_type_string(type)[0], ofilter->index);
665 
666  return ofilter;
667 }
668 
669 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist,
670  const ViewSpecifier *vs)
671 {
672  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
673  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
675  int ret;
676 
677  av_assert0(!ifp->bound);
678  ifp->bound = 1;
679 
680  if (ifilter->type != ist->par->codec_type &&
681  !(ifilter->type == AVMEDIA_TYPE_VIDEO && ist->par->codec_type == AVMEDIA_TYPE_SUBTITLE)) {
682  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
684  return AVERROR(EINVAL);
685  }
686 
687  ifp->type_src = ist->st->codecpar->codec_type;
688 
689  ifp->opts.fallback = av_frame_alloc();
690  if (!ifp->opts.fallback)
691  return AVERROR(ENOMEM);
692 
693  ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
694  vs, &ifp->opts, &src);
695  if (ret < 0)
696  return ret;
697 
698  ifilter->input_name = av_strdup(ifp->opts.name);
699  if (!ifilter->input_name)
700  return AVERROR(EINVAL);
701 
702  ret = sch_connect(fgp->sch,
703  src, SCH_FILTER_IN(fgp->sch_idx, ifilter->index));
704  if (ret < 0)
705  return ret;
706 
707  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
708  ifp->sub2video.frame = av_frame_alloc();
709  if (!ifp->sub2video.frame)
710  return AVERROR(ENOMEM);
711 
712  ifp->width = ifp->opts.sub2video_width;
713  ifp->height = ifp->opts.sub2video_height;
714 
715  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
716  palettes for all rectangles are identical or compatible */
717  ifp->format = AV_PIX_FMT_RGB32;
718 
719  ifp->time_base = AV_TIME_BASE_Q;
720 
721  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
722  ifp->width, ifp->height);
723  }
724 
725  return 0;
726 }
727 
729  const ViewSpecifier *vs)
730 {
733  int ret;
734 
735  av_assert0(!ifp->bound);
736  ifp->bound = 1;
737 
738  if (ifp->ifilter.type != dec->type) {
739  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
741  return AVERROR(EINVAL);
742  }
743 
744  ifp->type_src = ifp->ifilter.type;
745 
746  ret = dec_filter_add(dec, &ifp->ifilter, &ifp->opts, vs, &src);
747  if (ret < 0)
748  return ret;
749 
750  ifp->ifilter.input_name = av_strdup(ifp->opts.name);
751  if (!ifp->ifilter.input_name)
752  return AVERROR(EINVAL);
753 
754  ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index));
755  if (ret < 0)
756  return ret;
757 
758  return 0;
759 }
760 
761 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
762  const AVChannelLayout *layout_requested)
763 {
764  int i, err;
765 
766  if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
767  /* Pass the layout through for all orders but UNSPEC */
768  err = av_channel_layout_copy(&f->ch_layout, layout_requested);
769  if (err < 0)
770  return err;
771  return 0;
772  }
773 
774  /* Requested layout is of order UNSPEC */
775  if (!layouts_allowed) {
776  /* Use the default native layout for the requested amount of channels when the
777  encoder doesn't have a list of supported layouts */
778  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
779  return 0;
780  }
781  /* Encoder has a list of supported layouts. Pick the first layout in it with the
782  same amount of channels as the requested layout */
783  for (i = 0; layouts_allowed[i].nb_channels; i++) {
784  if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
785  break;
786  }
787  if (layouts_allowed[i].nb_channels) {
788  /* Use it if one is found */
789  err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
790  if (err < 0)
791  return err;
792  return 0;
793  }
794  /* If no layout for the amount of channels requested was found, use the default
795  native layout for it. */
796  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
797 
798  return 0;
799 }
800 
801 int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc,
802  const OutputFilterOptions *opts)
803 {
804  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
805  FilterGraph *fg = ofilter->graph;
806  FilterGraphPriv *fgp = fgp_from_fg(fg);
807  int ret;
808 
809  av_assert0(!ofilter->bound);
810  av_assert0(!opts->enc ||
811  ofilter->type == opts->enc->type);
812 
813  ofilter->bound = 1;
814  av_freep(&ofilter->linklabel);
815 
816  ofp->flags = opts->flags;
817  ofp->ts_offset = opts->ts_offset;
818  ofp->enc_timebase = opts->output_tb;
819 
820  ofp->trim_start_us = opts->trim_start_us;
821  ofp->trim_duration_us = opts->trim_duration_us;
822 
823  ofilter->output_name = av_strdup(opts->name);
824  if (!ofilter->output_name)
825  return AVERROR(EINVAL);
826 
827  ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
828  if (ret < 0)
829  return ret;
830 
831  ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
832  if (ret < 0)
833  return ret;
834 
835  if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
836  av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
837 
838  if (fgp->is_simple) {
839  // for simple filtergraph there is just one output,
840  // so use only graph-level information for logging
841  ofp->log_parent = NULL;
842  av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
843  } else
844  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name);
845 
846  switch (ofilter->type) {
847  case AVMEDIA_TYPE_VIDEO:
848  ofp->width = opts->width;
849  ofp->height = opts->height;
850  if (opts->format != AV_PIX_FMT_NONE) {
851  ofp->format = opts->format;
852  } else
853  ofp->formats = opts->formats;
854 
855  if (opts->color_space != AVCOL_SPC_UNSPECIFIED)
856  ofp->color_space = opts->color_space;
857  else
858  ofp->color_spaces = opts->color_spaces;
859 
860  if (opts->color_range != AVCOL_RANGE_UNSPECIFIED)
861  ofp->color_range = opts->color_range;
862  else
863  ofp->color_ranges = opts->color_ranges;
864 
865  if (opts->alpha_mode != AVALPHA_MODE_UNSPECIFIED)
866  ofp->alpha_mode = opts->alpha_mode;
867  else
868  ofp->alpha_modes = opts->alpha_modes;
869 
871 
872  ofp->fps.last_frame = av_frame_alloc();
873  if (!ofp->fps.last_frame)
874  return AVERROR(ENOMEM);
875 
876  ofp->fps.vsync_method = opts->vsync_method;
877  ofp->fps.framerate = opts->frame_rate;
878  ofp->fps.framerate_max = opts->max_frame_rate;
879  ofp->fps.framerate_supported = opts->frame_rates;
880 
881  // reduce frame rate for mpeg4 to be within the spec limits
882  if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
883  ofp->fps.framerate_clip = 65535;
884 
885  ofp->fps.dup_warning = 1000;
886 
887  break;
888  case AVMEDIA_TYPE_AUDIO:
889  if (opts->format != AV_SAMPLE_FMT_NONE) {
890  ofp->format = opts->format;
891  } else {
892  ofp->formats = opts->formats;
893  }
894  if (opts->sample_rate) {
895  ofp->sample_rate = opts->sample_rate;
896  } else
897  ofp->sample_rates = opts->sample_rates;
898  if (opts->ch_layout.nb_channels) {
899  int ret = set_channel_layout(ofp, opts->ch_layouts, &opts->ch_layout);
900  if (ret < 0)
901  return ret;
902  } else {
903  ofp->ch_layouts = opts->ch_layouts;
904  }
905  break;
906  }
907 
908  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofilter->index),
909  SCH_ENC(sched_idx_enc));
910  if (ret < 0)
911  return ret;
912 
913  return 0;
914 }
915 
917  const OutputFilterOptions *opts)
918 {
919  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
920 
921  av_assert0(!ofilter->bound);
922  av_assert0(ofilter->type == ifp->ifilter.type);
923 
924  ofilter->bound = 1;
925  av_freep(&ofilter->linklabel);
926 
927  ofilter->output_name = av_strdup(opts->name);
928  if (!ofilter->output_name)
929  return AVERROR(EINVAL);
930 
931  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name);
932 
933  return 0;
934 }
935 
936 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
937 {
939  OutputFilter *ofilter_src = fg_src->outputs[out_idx];
941  char name[32];
942  int ret;
943 
944  av_assert0(!ifp->bound);
945  ifp->bound = 1;
946 
947  if (ifp->ifilter.type != ofilter_src->type) {
948  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
949  av_get_media_type_string(ofilter_src->type),
951  return AVERROR(EINVAL);
952  }
953 
954  ifp->type_src = ifp->ifilter.type;
955 
956  memset(&opts, 0, sizeof(opts));
957 
958  snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->ifilter.index);
959  opts.name = name;
960 
961  ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
962  if (ret < 0)
963  return ret;
964 
965  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
966  SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index));
967  if (ret < 0)
968  return ret;
969 
970  return 0;
971 }
972 
974 {
975  InputFilterPriv *ifp;
976  InputFilter *ifilter;
977 
978  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
979  if (!ifp)
980  return NULL;
981 
982  ifilter = &ifp->ifilter;
983  ifilter->graph = fg;
984 
985  ifp->frame = av_frame_alloc();
986  if (!ifp->frame)
987  return NULL;
988 
989  ifilter->index = fg->nb_inputs - 1;
990  ifp->format = -1;
994 
996  if (!ifp->frame_queue)
997  return NULL;
998 
999  return ifilter;
1000 }
1001 
1003 {
1004  FilterGraph *fg = *pfg;
1005  FilterGraphPriv *fgp;
1006 
1007  if (!fg)
1008  return;
1009  fgp = fgp_from_fg(fg);
1010 
1011  for (int j = 0; j < fg->nb_inputs; j++) {
1012  InputFilter *ifilter = fg->inputs[j];
1013  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1014 
1015  if (ifp->frame_queue) {
1016  AVFrame *frame;
1017  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
1018  av_frame_free(&frame);
1019  av_fifo_freep2(&ifp->frame_queue);
1020  }
1021  av_frame_free(&ifp->sub2video.frame);
1022 
1023  av_frame_free(&ifp->frame);
1024  av_frame_free(&ifp->opts.fallback);
1025 
1027  av_freep(&ifilter->linklabel);
1028  av_freep(&ifp->opts.name);
1030  av_freep(&ifilter->name);
1031  av_freep(&ifilter->input_name);
1032  av_freep(&fg->inputs[j]);
1033  }
1034  av_freep(&fg->inputs);
1035  for (int j = 0; j < fg->nb_outputs; j++) {
1036  OutputFilter *ofilter = fg->outputs[j];
1037  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1038 
1039  av_frame_free(&ofp->fps.last_frame);
1040  av_dict_free(&ofp->sws_opts);
1041  av_dict_free(&ofp->swr_opts);
1042 
1043  av_freep(&ofilter->linklabel);
1044  av_freep(&ofilter->name);
1045  av_freep(&ofilter->output_name);
1046  av_freep(&ofilter->apad);
1049  av_freep(&fg->outputs[j]);
1050  }
1051  av_freep(&fg->outputs);
1052  av_freep(&fg->graph_desc);
1053 
1054  av_frame_free(&fgp->frame);
1055  av_frame_free(&fgp->frame_enc);
1056 
1057  av_freep(pfg);
1058 }
1059 
1060 static const char *fg_item_name(void *obj)
1061 {
1062  const FilterGraphPriv *fgp = obj;
1063 
1064  return fgp->log_name;
1065 }
1066 
1067 static const AVClass fg_class = {
1068  .class_name = "FilterGraph",
1069  .version = LIBAVUTIL_VERSION_INT,
1070  .item_name = fg_item_name,
1071  .category = AV_CLASS_CATEGORY_FILTER,
1072 };
1073 
1074 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
1075 {
1076  FilterGraphPriv *fgp;
1077  FilterGraph *fg;
1078 
1080  AVFilterGraph *graph;
1081  int ret = 0;
1082 
1083  fgp = av_mallocz(sizeof(*fgp));
1084  if (!fgp) {
1085  av_freep(&graph_desc);
1086  return AVERROR(ENOMEM);
1087  }
1088  fg = &fgp->fg;
1089 
1090  if (pfg) {
1091  *pfg = fg;
1092  fg->index = -1;
1093  } else {
1095  if (ret < 0) {
1096  av_freep(&graph_desc);
1097  av_freep(&fgp);
1098  return ret;
1099  }
1100 
1101  fg->index = nb_filtergraphs - 1;
1102  }
1103 
1104  fg->class = &fg_class;
1105  fg->graph_desc = graph_desc;
1107  fgp->nb_threads = -1;
1108  fgp->sch = sch;
1109 
1110  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
1111 
1112  fgp->frame = av_frame_alloc();
1113  fgp->frame_enc = av_frame_alloc();
1114  if (!fgp->frame || !fgp->frame_enc)
1115  return AVERROR(ENOMEM);
1116 
1117  /* this graph is only used for determining the kinds of inputs
1118  * and outputs we have, and is discarded on exit from this function */
1119  graph = avfilter_graph_alloc();
1120  if (!graph)
1121  return AVERROR(ENOMEM);;
1122  graph->nb_threads = 1;
1123 
1124  ret = graph_parse(fg, graph, fg->graph_desc, &inputs, &outputs,
1126  if (ret < 0)
1127  goto fail;
1128 
1129  for (unsigned i = 0; i < graph->nb_filters; i++) {
1130  const AVFilter *f = graph->filters[i]->filter;
1131  if ((!avfilter_filter_pad_count(f, 0) &&
1132  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) ||
1133  !strcmp(f->name, "apad")) {
1134  fgp->have_sources = 1;
1135  break;
1136  }
1137  }
1138 
1139  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1140  InputFilter *const ifilter = ifilter_alloc(fg);
1141 
1142  if (!ifilter) {
1143  ret = AVERROR(ENOMEM);
1144  goto fail;
1145  }
1146 
1147  ifilter->linklabel = cur->name;
1148  cur->name = NULL;
1149 
1150  ifilter->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1151  cur->pad_idx);
1152 
1153  if (ifilter->type != AVMEDIA_TYPE_VIDEO && ifilter->type != AVMEDIA_TYPE_AUDIO) {
1154  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1155  "currently.\n");
1156  ret = AVERROR(ENOSYS);
1157  goto fail;
1158  }
1159 
1160  ifilter->name = describe_filter_link(fg, cur, 1);
1161  if (!ifilter->name) {
1162  ret = AVERROR(ENOMEM);
1163  goto fail;
1164  }
1165  }
1166 
1167  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1168  const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1169  cur->pad_idx);
1170  OutputFilter *const ofilter = ofilter_alloc(fg, type);
1171 
1172  if (!ofilter) {
1173  ret = AVERROR(ENOMEM);
1174  goto fail;
1175  }
1176 
1177  ofilter->linklabel = cur->name;
1178  cur->name = NULL;
1179 
1180  ofilter->name = describe_filter_link(fg, cur, 0);
1181  if (!ofilter->name) {
1182  ret = AVERROR(ENOMEM);
1183  goto fail;
1184  }
1185  }
1186 
1187  if (!fg->nb_outputs) {
1188  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1189  ret = AVERROR(ENOSYS);
1190  goto fail;
1191  }
1192 
1193  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1194  filter_thread, fgp);
1195  if (ret < 0)
1196  goto fail;
1197  fgp->sch_idx = ret;
1198 
1199 fail:
1202  avfilter_graph_free(&graph);
1203 
1204  if (ret < 0)
1205  return ret;
1206 
1207  return 0;
1208 }
1209 
1211  InputStream *ist,
1212  char *graph_desc,
1213  Scheduler *sch, unsigned sched_idx_enc,
1214  const OutputFilterOptions *opts)
1215 {
1216  const enum AVMediaType type = ist->par->codec_type;
1217  FilterGraph *fg;
1218  FilterGraphPriv *fgp;
1219  int ret;
1220 
1221  ret = fg_create(pfg, graph_desc, sch);
1222  if (ret < 0)
1223  return ret;
1224  fg = *pfg;
1225  fgp = fgp_from_fg(fg);
1226 
1227  fgp->is_simple = 1;
1228 
1229  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1230  av_get_media_type_string(type)[0], opts->name);
1231 
1232  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1233  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1234  "to have exactly 1 input and 1 output. "
1235  "However, it had %d input(s) and %d output(s). Please adjust, "
1236  "or use a complex filtergraph (-filter_complex) instead.\n",
1237  graph_desc, fg->nb_inputs, fg->nb_outputs);
1238  return AVERROR(EINVAL);
1239  }
1240  if (fg->outputs[0]->type != type) {
1241  av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1242  "it to %s output stream\n",
1245  return AVERROR(EINVAL);
1246  }
1247 
1248  ret = ifilter_bind_ist(fg->inputs[0], ist, opts->vs);
1249  if (ret < 0)
1250  return ret;
1251 
1252  ret = ofilter_bind_enc(fg->outputs[0], sched_idx_enc, opts);
1253  if (ret < 0)
1254  return ret;
1255 
1256  if (opts->nb_threads >= 0)
1257  fgp->nb_threads = opts->nb_threads;
1258 
1259  return 0;
1260 }
1261 
1263 {
1264  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1265  InputStream *ist = NULL;
1266  enum AVMediaType type = ifilter->type;
1268  const char *spec;
1269  char *p;
1270  int i, ret;
1271 
1272  if (ifilter->linklabel && !strncmp(ifilter->linklabel, "dec:", 4)) {
1273  // bind to a standalone decoder
1274  int dec_idx;
1275 
1276  dec_idx = strtol(ifilter->linklabel + 4, &p, 0);
1277  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1278  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1279  dec_idx, fg->graph_desc);
1280  return AVERROR(EINVAL);
1281  }
1282 
1283  if (type == AVMEDIA_TYPE_VIDEO) {
1284  spec = *p == ':' ? p + 1 : p;
1285  ret = view_specifier_parse(&spec, &vs);
1286  if (ret < 0)
1287  return ret;
1288  }
1289 
1290  ret = ifilter_bind_dec(ifp, decoders[dec_idx], &vs);
1291  if (ret < 0)
1292  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1293  ifilter->name);
1294  return ret;
1295  } else if (ifilter->linklabel) {
1297  AVFormatContext *s;
1298  AVStream *st = NULL;
1299  int file_idx;
1300 
1301  // try finding an unbound filtergraph output with this label
1302  for (int i = 0; i < nb_filtergraphs; i++) {
1303  FilterGraph *fg_src = filtergraphs[i];
1304 
1305  if (fg == fg_src)
1306  continue;
1307 
1308  for (int j = 0; j < fg_src->nb_outputs; j++) {
1309  OutputFilter *ofilter = fg_src->outputs[j];
1310 
1311  if (!ofilter->bound && ofilter->linklabel &&
1312  !strcmp(ofilter->linklabel, ifilter->linklabel)) {
1313  av_log(fg, AV_LOG_VERBOSE,
1314  "Binding input with label '%s' to filtergraph output %d:%d\n",
1315  ifilter->linklabel, i, j);
1316 
1317  ret = ifilter_bind_fg(ifp, fg_src, j);
1318  if (ret < 0)
1319  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1320  ifilter->linklabel);
1321  return ret;
1322  }
1323  }
1324  }
1325 
1326  // bind to an explicitly specified demuxer stream
1327  file_idx = strtol(ifilter->linklabel, &p, 0);
1328  if (file_idx < 0 || file_idx >= nb_input_files) {
1329  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1330  file_idx, fg->graph_desc);
1331  return AVERROR(EINVAL);
1332  }
1333  s = input_files[file_idx]->ctx;
1334 
1335  ret = stream_specifier_parse(&ss, *p == ':' ? p + 1 : p, 1, fg);
1336  if (ret < 0) {
1337  av_log(fg, AV_LOG_ERROR, "Invalid stream specifier: %s\n", p);
1338  return ret;
1339  }
1340 
1341  if (type == AVMEDIA_TYPE_VIDEO) {
1342  spec = ss.remainder ? ss.remainder : "";
1343  ret = view_specifier_parse(&spec, &vs);
1344  if (ret < 0) {
1346  return ret;
1347  }
1348  }
1349 
1350  for (i = 0; i < s->nb_streams; i++) {
1351  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1352  if (stream_type != type &&
1353  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1354  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1355  continue;
1356  if (stream_specifier_match(&ss, s, s->streams[i], fg)) {
1357  st = s->streams[i];
1358  break;
1359  }
1360  }
1362  if (!st) {
1363  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1364  "matches no streams.\n", p, fg->graph_desc);
1365  return AVERROR(EINVAL);
1366  }
1367  ist = input_files[file_idx]->streams[st->index];
1368 
1369  av_log(fg, AV_LOG_VERBOSE,
1370  "Binding input with label '%s' to input stream %d:%d\n",
1371  ifilter->linklabel, ist->file->index, ist->index);
1372  } else {
1373  ist = ist_find_unused(type);
1374  if (!ist) {
1375  av_log(fg, AV_LOG_FATAL,
1376  "Cannot find an unused %s input stream to feed the "
1377  "unlabeled input pad %s.\n",
1378  av_get_media_type_string(type), ifilter->name);
1379  return AVERROR(EINVAL);
1380  }
1381 
1382  av_log(fg, AV_LOG_VERBOSE,
1383  "Binding unlabeled input %d to input stream %d:%d\n",
1384  ifilter->index, ist->file->index, ist->index);
1385  }
1386  av_assert0(ist);
1387 
1388  ret = ifilter_bind_ist(ifilter, ist, &vs);
1389  if (ret < 0) {
1390  av_log(fg, AV_LOG_ERROR,
1391  "Error binding an input stream to complex filtergraph input %s.\n",
1392  ifilter->name);
1393  return ret;
1394  }
1395 
1396  return 0;
1397 }
1398 
1399 static int bind_inputs(FilterGraph *fg)
1400 {
1401  // bind filtergraph inputs to input streams or other filtergraphs
1402  for (int i = 0; i < fg->nb_inputs; i++) {
1404  int ret;
1405 
1406  if (ifp->bound)
1407  continue;
1408 
1409  ret = fg_complex_bind_input(fg, &ifp->ifilter);
1410  if (ret < 0)
1411  return ret;
1412  }
1413 
1414  return 0;
1415 }
1416 
1418 {
1419  int ret;
1420 
1421  for (int i = 0; i < nb_filtergraphs; i++) {
1423  if (ret < 0)
1424  return ret;
1425  }
1426 
1427  // check that all outputs were bound
1428  for (int i = 0; i < nb_filtergraphs; i++) {
1429  FilterGraph *fg = filtergraphs[i];
1430 
1431  for (int j = 0; j < fg->nb_outputs; j++) {
1432  OutputFilter *output = fg->outputs[j];
1433  if (!output->bound) {
1434  av_log(fg, AV_LOG_FATAL,
1435  "Filter '%s' has output %d (%s) unconnected\n",
1436  output->name, j,
1437  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1438  return AVERROR(EINVAL);
1439  }
1440  }
1441  }
1442 
1443  return 0;
1444 }
1445 
1446 static int insert_trim(void *logctx, int64_t start_time, int64_t duration,
1447  AVFilterContext **last_filter, int *pad_idx,
1448  const char *filter_name)
1449 {
1450  AVFilterGraph *graph = (*last_filter)->graph;
1452  const AVFilter *trim;
1453  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1454  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1455  int ret = 0;
1456 
1457  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1458  return 0;
1459 
1460  trim = avfilter_get_by_name(name);
1461  if (!trim) {
1462  av_log(logctx, AV_LOG_ERROR, "%s filter not present, cannot limit "
1463  "recording time.\n", name);
1464  return AVERROR_FILTER_NOT_FOUND;
1465  }
1466 
1467  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1468  if (!ctx)
1469  return AVERROR(ENOMEM);
1470 
1471  if (duration != INT64_MAX) {
1472  ret = av_opt_set_int(ctx, "durationi", duration,
1474  }
1475  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1476  ret = av_opt_set_int(ctx, "starti", start_time,
1478  }
1479  if (ret < 0) {
1480  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1481  return ret;
1482  }
1483 
1485  if (ret < 0)
1486  return ret;
1487 
1488  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1489  if (ret < 0)
1490  return ret;
1491 
1492  *last_filter = ctx;
1493  *pad_idx = 0;
1494  return 0;
1495 }
1496 
1497 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1498  const char *filter_name, const char *args)
1499 {
1500  AVFilterGraph *graph = (*last_filter)->graph;
1501  const AVFilter *filter = avfilter_get_by_name(filter_name);
1503  int ret;
1504 
1505  if (!filter)
1506  return AVERROR_BUG;
1507 
1509  filter,
1510  filter_name, args, NULL, graph);
1511  if (ret < 0)
1512  return ret;
1513 
1514  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1515  if (ret < 0)
1516  return ret;
1517 
1518  *last_filter = ctx;
1519  *pad_idx = 0;
1520  return 0;
1521 }
1522 
1524  OutputFilter *ofilter, AVFilterInOut *out)
1525 {
1526  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1527  AVFilterContext *last_filter = out->filter_ctx;
1528  AVBPrint bprint;
1529  int pad_idx = out->pad_idx;
1530  int ret;
1531  char name[255];
1532 
1533  snprintf(name, sizeof(name), "out_%s", ofilter->output_name);
1535  avfilter_get_by_name("buffersink"),
1536  name, NULL, NULL, graph);
1537 
1538  if (ret < 0)
1539  return ret;
1540 
1541  if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE)) {
1542  char args[255];
1544  const AVDictionaryEntry *e = NULL;
1545 
1546  snprintf(args, sizeof(args), "%d:%d",
1547  ofp->width, ofp->height);
1548 
1549  while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1550  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1551  }
1552 
1553  snprintf(name, sizeof(name), "scaler_out_%s", ofilter->output_name);
1555  name, args, NULL, graph)) < 0)
1556  return ret;
1557  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1558  return ret;
1559 
1560  last_filter = filter;
1561  pad_idx = 0;
1562  }
1563 
1565  ofp->format != AV_PIX_FMT_NONE || !ofp->formats);
1567  choose_pix_fmts(ofp, &bprint);
1568  choose_color_spaces(ofp, &bprint);
1569  choose_color_ranges(ofp, &bprint);
1570  choose_alpha_modes(ofp, &bprint);
1571  if (!av_bprint_is_complete(&bprint))
1572  return AVERROR(ENOMEM);
1573 
1574  if (bprint.len) {
1576 
1578  avfilter_get_by_name("format"),
1579  "format", bprint.str, NULL, graph);
1580  av_bprint_finalize(&bprint, NULL);
1581  if (ret < 0)
1582  return ret;
1583  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1584  return ret;
1585 
1586  last_filter = filter;
1587  pad_idx = 0;
1588  }
1589 
1590  snprintf(name, sizeof(name), "trim_out_%s", ofilter->output_name);
1591  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1592  &last_filter, &pad_idx, name);
1593  if (ret < 0)
1594  return ret;
1595 
1596 
1597  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1598  return ret;
1599 
1600  return 0;
1601 }
1602 
1604  OutputFilter *ofilter, AVFilterInOut *out)
1605 {
1606  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1607  AVFilterContext *last_filter = out->filter_ctx;
1608  int pad_idx = out->pad_idx;
1609  AVBPrint args;
1610  char name[255];
1611  int ret;
1612 
1613  snprintf(name, sizeof(name), "out_%s", ofilter->output_name);
1615  avfilter_get_by_name("abuffersink"),
1616  name, NULL, NULL, graph);
1617  if (ret < 0)
1618  return ret;
1619 
1620 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1621  AVFilterContext *filt_ctx; \
1622  \
1623  av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1624  "similarly to -af " filter_name "=%s.\n", arg); \
1625  \
1626  ret = avfilter_graph_create_filter(&filt_ctx, \
1627  avfilter_get_by_name(filter_name), \
1628  filter_name, arg, NULL, graph); \
1629  if (ret < 0) \
1630  goto fail; \
1631  \
1632  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1633  if (ret < 0) \
1634  goto fail; \
1635  \
1636  last_filter = filt_ctx; \
1637  pad_idx = 0; \
1638 } while (0)
1640 
1641  choose_sample_fmts(ofp, &args);
1642  choose_sample_rates(ofp, &args);
1643  choose_channel_layouts(ofp, &args);
1644  if (!av_bprint_is_complete(&args)) {
1645  ret = AVERROR(ENOMEM);
1646  goto fail;
1647  }
1648  if (args.len) {
1650 
1651  snprintf(name, sizeof(name), "format_out_%s", ofilter->output_name);
1653  avfilter_get_by_name("aformat"),
1654  name, args.str, NULL, graph);
1655  if (ret < 0)
1656  goto fail;
1657 
1658  ret = avfilter_link(last_filter, pad_idx, format, 0);
1659  if (ret < 0)
1660  goto fail;
1661 
1662  last_filter = format;
1663  pad_idx = 0;
1664  }
1665 
1666  if (ofilter->apad) {
1667  AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1668  fgp->have_sources = 1;
1669  }
1670 
1671  snprintf(name, sizeof(name), "trim for output %s", ofilter->output_name);
1672  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1673  &last_filter, &pad_idx, name);
1674  if (ret < 0)
1675  goto fail;
1676 
1677  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1678  goto fail;
1679 fail:
1680  av_bprint_finalize(&args, NULL);
1681 
1682  return ret;
1683 }
1684 
1686  OutputFilter *ofilter, AVFilterInOut *out)
1687 {
1688  switch (ofilter->type) {
1689  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fgp, graph, ofilter, out);
1690  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fgp, graph, ofilter, out);
1691  default: av_assert0(0); return 0;
1692  }
1693 }
1694 
1696 {
1697  ifp->sub2video.last_pts = INT64_MIN;
1698  ifp->sub2video.end_pts = INT64_MIN;
1699 
1700  /* sub2video structure has been (re-)initialized.
1701  Mark it as such so that the system will be
1702  initialized with the first received heartbeat. */
1703  ifp->sub2video.initialize = 1;
1704 }
1705 
1707  InputFilter *ifilter, AVFilterInOut *in)
1708 {
1709  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1710 
1711  AVFilterContext *last_filter;
1712  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1713  const AVPixFmtDescriptor *desc;
1714  char name[255];
1715  int ret, pad_idx = 0;
1717  if (!par)
1718  return AVERROR(ENOMEM);
1719 
1720  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1721  sub2video_prepare(ifp);
1722 
1723  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1724  ifp->opts.name);
1725 
1726  ifilter->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name);
1727  if (!ifilter->filter) {
1728  ret = AVERROR(ENOMEM);
1729  goto fail;
1730  }
1731 
1732  par->format = ifp->format;
1733  par->time_base = ifp->time_base;
1734  par->frame_rate = ifp->opts.framerate;
1735  par->width = ifp->width;
1736  par->height = ifp->height;
1737  par->sample_aspect_ratio = ifp->sample_aspect_ratio.den > 0 ?
1738  ifp->sample_aspect_ratio : (AVRational){ 0, 1 };
1739  par->color_space = ifp->color_space;
1740  par->color_range = ifp->color_range;
1741  par->alpha_mode = ifp->alpha_mode;
1742  par->hw_frames_ctx = ifp->hw_frames_ctx;
1743  par->side_data = ifp->side_data;
1744  par->nb_side_data = ifp->nb_side_data;
1745 
1746  ret = av_buffersrc_parameters_set(ifilter->filter, par);
1747  if (ret < 0)
1748  goto fail;
1749  av_freep(&par);
1750 
1751  ret = avfilter_init_dict(ifilter->filter, NULL);
1752  if (ret < 0)
1753  goto fail;
1754 
1755  last_filter = ifilter->filter;
1756 
1758  av_assert0(desc);
1759 
1760  if ((ifp->opts.flags & IFILTER_FLAG_CROP)) {
1761  char crop_buf[64];
1762  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1763  ifp->opts.crop_left, ifp->opts.crop_right,
1764  ifp->opts.crop_top, ifp->opts.crop_bottom,
1765  ifp->opts.crop_left, ifp->opts.crop_top);
1766  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1767  if (ret < 0)
1768  return ret;
1769  }
1770 
1771  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1772  ifp->displaymatrix_applied = 0;
1773  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1774  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1775  int32_t *displaymatrix = ifp->displaymatrix;
1776  double theta;
1777 
1778  theta = get_rotation(displaymatrix);
1779 
1780  if (fabs(theta - 90) < 1.0) {
1781  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1782  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1783  } else if (fabs(theta - 180) < 1.0) {
1784  if (displaymatrix[0] < 0) {
1785  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1786  if (ret < 0)
1787  return ret;
1788  }
1789  if (displaymatrix[4] < 0) {
1790  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1791  }
1792  } else if (fabs(theta - 270) < 1.0) {
1793  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1794  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1795  } else if (fabs(theta) > 1.0) {
1796  char rotate_buf[64];
1797  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1798  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1799  } else if (fabs(theta) < 1.0) {
1800  if (displaymatrix && displaymatrix[4] < 0) {
1801  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1802  }
1803  }
1804  if (ret < 0)
1805  return ret;
1806 
1807  ifp->displaymatrix_applied = 1;
1808  }
1809 
1810  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1811  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1812  &last_filter, &pad_idx, name);
1813  if (ret < 0)
1814  return ret;
1815 
1816  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1817  return ret;
1818  return 0;
1819 fail:
1820  av_freep(&par);
1821 
1822  return ret;
1823 }
1824 
1826  InputFilter *ifilter, AVFilterInOut *in)
1827 {
1828  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1829  AVFilterContext *last_filter;
1830  AVBufferSrcParameters *par;
1831  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1832  AVBPrint args;
1833  char name[255];
1834  int ret, pad_idx = 0;
1835 
1837  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1838  ifp->time_base.num, ifp->time_base.den,
1839  ifp->sample_rate,
1841  if (av_channel_layout_check(&ifp->ch_layout) &&
1843  av_bprintf(&args, ":channel_layout=");
1845  } else
1846  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1847  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1848 
1849  if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
1850  name, args.str, NULL,
1851  graph)) < 0)
1852  return ret;
1854  if (!par)
1855  return AVERROR(ENOMEM);
1856  par->side_data = ifp->side_data;
1857  par->nb_side_data = ifp->nb_side_data;
1858  ret = av_buffersrc_parameters_set(ifilter->filter, par);
1859  av_free(par);
1860  if (ret < 0)
1861  return ret;
1862  last_filter = ifilter->filter;
1863 
1864  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1865  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1866  &last_filter, &pad_idx, name);
1867  if (ret < 0)
1868  return ret;
1869 
1870  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1871  return ret;
1872 
1873  return 0;
1874 }
1875 
1877  InputFilter *ifilter, AVFilterInOut *in)
1878 {
1879  switch (ifilter->type) {
1880  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1881  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1882  default: av_assert0(0); return 0;
1883  }
1884 }
1885 
1887 {
1888  for (int i = 0; i < fg->nb_outputs; i++)
1889  fg->outputs[i]->filter = NULL;
1890  for (int i = 0; i < fg->nb_inputs; i++)
1891  fg->inputs[i]->filter = NULL;
1892  avfilter_graph_free(&fgt->graph);
1893 }
1894 
1896 {
1897  return f->nb_inputs == 0 &&
1898  (!strcmp(f->filter->name, "buffer") ||
1899  !strcmp(f->filter->name, "abuffer"));
1900 }
1901 
1902 static int graph_is_meta(AVFilterGraph *graph)
1903 {
1904  for (unsigned i = 0; i < graph->nb_filters; i++) {
1905  const AVFilterContext *f = graph->filters[i];
1906 
1907  /* in addition to filters flagged as meta, also
1908  * disregard sinks and buffersources (but not other sources,
1909  * since they introduce data we are not aware of)
1910  */
1911  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1912  f->nb_outputs == 0 ||
1914  return 0;
1915  }
1916  return 1;
1917 }
1918 
1919 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1920 
1922 {
1923  FilterGraphPriv *fgp = fgp_from_fg(fg);
1924  AVBufferRef *hw_device;
1925  AVFilterInOut *inputs, *outputs, *cur;
1926  int ret = AVERROR_BUG, i, simple = filtergraph_is_simple(fg);
1927  int have_input_eof = 0;
1928  const char *graph_desc = fg->graph_desc;
1929 
1930  cleanup_filtergraph(fg, fgt);
1931  fgt->graph = avfilter_graph_alloc();
1932  if (!fgt->graph)
1933  return AVERROR(ENOMEM);
1934 
1935  if (simple) {
1936  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
1937 
1938  if (filter_nbthreads) {
1939  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1940  if (ret < 0)
1941  goto fail;
1942  } else if (fgp->nb_threads >= 0) {
1943  ret = av_opt_set_int(fgt->graph, "threads", fgp->nb_threads, 0);
1944  if (ret < 0)
1945  return ret;
1946  }
1947 
1948  if (av_dict_count(ofp->sws_opts)) {
1950  &fgt->graph->scale_sws_opts,
1951  '=', ':');
1952  if (ret < 0)
1953  goto fail;
1954  }
1955 
1956  if (av_dict_count(ofp->swr_opts)) {
1957  char *args;
1958  ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
1959  if (ret < 0)
1960  goto fail;
1961  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1962  av_free(args);
1963  }
1964  } else {
1966  }
1967 
1968  if (filter_buffered_frames) {
1969  ret = av_opt_set_int(fgt->graph, "max_buffered_frames", filter_buffered_frames, 0);
1970  if (ret < 0)
1971  return ret;
1972  }
1973 
1974  hw_device = hw_device_for_filter();
1975 
1976  ret = graph_parse(fg, fgt->graph, graph_desc, &inputs, &outputs, hw_device);
1977  if (ret < 0)
1978  goto fail;
1979 
1980  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1981  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1984  goto fail;
1985  }
1987 
1988  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1989  ret = configure_output_filter(fgp, fgt->graph, fg->outputs[i], cur);
1990  if (ret < 0) {
1992  goto fail;
1993  }
1994  }
1996 
1997  if (fgp->disable_conversions)
1999  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
2000  goto fail;
2001 
2002  fgp->is_meta = graph_is_meta(fgt->graph);
2003 
2004  /* limit the lists of allowed formats to the ones selected, to
2005  * make sure they stay the same if the filtergraph is reconfigured later */
2006  for (int i = 0; i < fg->nb_outputs; i++) {
2007  const AVFrameSideData *const *sd;
2008  int nb_sd;
2009  OutputFilter *ofilter = fg->outputs[i];
2010  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
2011  AVFilterContext *sink = ofilter->filter;
2012 
2013  ofp->format = av_buffersink_get_format(sink);
2014 
2015  ofp->width = av_buffersink_get_w(sink);
2016  ofp->height = av_buffersink_get_h(sink);
2020 
2021  // If the timing parameters are not locked yet, get the tentative values
2022  // here but don't lock them. They will only be used if no output frames
2023  // are ever produced.
2024  if (!ofp->tb_out_locked) {
2026  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
2027  fr.num > 0 && fr.den > 0)
2028  ofp->fps.framerate = fr;
2029  ofp->tb_out = av_buffersink_get_time_base(sink);
2030  }
2032 
2035  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
2036  if (ret < 0)
2037  goto fail;
2039  sd = av_buffersink_get_side_data(sink, &nb_sd);
2040  if (nb_sd)
2041  for (int j = 0; j < nb_sd; j++) {
2043  sd[j], 0);
2044  if (ret < 0) {
2046  goto fail;
2047  }
2048  }
2049  }
2050 
2051  for (int i = 0; i < fg->nb_inputs; i++) {
2052  InputFilter *ifilter = fg->inputs[i];
2054  AVFrame *tmp;
2055  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
2056  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2057  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
2058  } else {
2059  if (ifp->type_src == AVMEDIA_TYPE_VIDEO) {
2060  if (ifp->displaymatrix_applied)
2062  }
2063  ret = av_buffersrc_add_frame(ifilter->filter, tmp);
2064  }
2065  av_frame_free(&tmp);
2066  if (ret < 0)
2067  goto fail;
2068  }
2069  }
2070 
2071  /* send the EOFs for the finished inputs */
2072  for (int i = 0; i < fg->nb_inputs; i++) {
2073  InputFilter *ifilter = fg->inputs[i];
2074  if (fgt->eof_in[i]) {
2075  ret = av_buffersrc_add_frame(ifilter->filter, NULL);
2076  if (ret < 0)
2077  goto fail;
2078  have_input_eof = 1;
2079  }
2080  }
2081 
2082  if (have_input_eof) {
2083  // make sure the EOF propagates to the end of the graph
2085  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2086  goto fail;
2087  }
2088 
2089  return 0;
2090 fail:
2091  cleanup_filtergraph(fg, fgt);
2092  return ret;
2093 }
2094 
2096 {
2097  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2098  AVFrameSideData *sd;
2099  int ret;
2100 
2101  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
2102  if (ret < 0)
2103  return ret;
2104 
2105  ifp->time_base = (ifilter->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
2106  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
2107  frame->time_base;
2108 
2109  ifp->format = frame->format;
2110 
2111  ifp->width = frame->width;
2112  ifp->height = frame->height;
2113  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
2114  ifp->color_space = frame->colorspace;
2115  ifp->color_range = frame->color_range;
2116  ifp->alpha_mode = frame->alpha_mode;
2117 
2118  ifp->sample_rate = frame->sample_rate;
2119  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
2120  if (ret < 0)
2121  return ret;
2122 
2124  for (int i = 0; i < frame->nb_side_data; i++) {
2125  const AVSideDataDescriptor *desc = av_frame_side_data_desc(frame->side_data[i]->type);
2126 
2127  if (!(desc->props & AV_SIDE_DATA_PROP_GLOBAL))
2128  continue;
2129 
2131  &ifp->nb_side_data,
2132  frame->side_data[i], 0);
2133  if (ret < 0)
2134  return ret;
2135  }
2136 
2138  if (sd)
2139  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
2140  ifp->displaymatrix_present = !!sd;
2141 
2142  /* Copy downmix related side data to InputFilterPriv so it may be propagated
2143  * to the filter chain even though it's not "global", as filters like aresample
2144  * require this information during init and not when remixing a frame */
2146  if (sd) {
2148  &ifp->nb_side_data, sd, 0);
2149  if (ret < 0)
2150  return ret;
2151  memcpy(&ifp->downmixinfo, sd->data, sizeof(ifp->downmixinfo));
2152  }
2153  ifp->downmixinfo_present = !!sd;
2154 
2155  return 0;
2156 }
2157 
2159 {
2160  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
2161  return fgp->is_simple;
2162 }
2163 
2164 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
2165  double time, const char *target,
2166  const char *command, const char *arg, int all_filters)
2167 {
2168  int ret;
2169 
2170  if (!graph)
2171  return;
2172 
2173  if (time < 0) {
2174  char response[4096];
2175  ret = avfilter_graph_send_command(graph, target, command, arg,
2176  response, sizeof(response),
2177  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
2178  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
2179  fg->index, ret, response);
2180  } else if (!all_filters) {
2181  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
2182  } else {
2183  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
2184  if (ret < 0)
2185  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
2186  }
2187 }
2188 
2189 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
2190 {
2191  int nb_requests, nb_requests_max = -1;
2192  int best_input = -1;
2193 
2194  for (int i = 0; i < fg->nb_inputs; i++) {
2195  InputFilter *ifilter = fg->inputs[i];
2196 
2197  if (fgt->eof_in[i])
2198  continue;
2199 
2200  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
2201  if (nb_requests > nb_requests_max) {
2202  nb_requests_max = nb_requests;
2203  best_input = i;
2204  }
2205  }
2206 
2207  av_assert0(best_input >= 0);
2208 
2209  return best_input;
2210 }
2211 
2213 {
2214  OutputFilter *ofilter = &ofp->ofilter;
2215  FPSConvContext *fps = &ofp->fps;
2216  AVRational tb = (AVRational){ 0, 0 };
2217  AVRational fr;
2218  const FrameData *fd;
2219 
2220  fd = frame_data_c(frame);
2221 
2222  // apply -enc_time_base
2223  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2224  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2225  av_log(ofp, AV_LOG_ERROR,
2226  "Demuxing timebase not available - cannot use it for encoding\n");
2227  return AVERROR(EINVAL);
2228  }
2229 
2230  switch (ofp->enc_timebase.num) {
2231  case 0: break;
2232  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2233  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2234  default: tb = ofp->enc_timebase; break;
2235  }
2236 
2237  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2238  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2239  goto finish;
2240  }
2241 
2242  fr = fps->framerate;
2243  if (!fr.num) {
2244  AVRational fr_sink = av_buffersink_get_frame_rate(ofilter->filter);
2245  if (fr_sink.num > 0 && fr_sink.den > 0)
2246  fr = fr_sink;
2247  }
2248 
2249  if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2250  if (!fr.num && !fps->framerate_max.num) {
2251  fr = (AVRational){25, 1};
2252  av_log(ofp, AV_LOG_WARNING,
2253  "No information "
2254  "about the input framerate is available. Falling "
2255  "back to a default value of 25fps. Use the -r option "
2256  "if you want a different framerate.\n");
2257  }
2258 
2259  if (fps->framerate_max.num &&
2260  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2261  !fr.den))
2262  fr = fps->framerate_max;
2263  }
2264 
2265  if (fr.num > 0) {
2266  if (fps->framerate_supported) {
2267  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2268  fr = fps->framerate_supported[idx];
2269  }
2270  if (fps->framerate_clip) {
2271  av_reduce(&fr.num, &fr.den,
2272  fr.num, fr.den, fps->framerate_clip);
2273  }
2274  }
2275 
2276  if (!(tb.num > 0 && tb.den > 0))
2277  tb = av_inv_q(fr);
2278  if (!(tb.num > 0 && tb.den > 0))
2279  tb = frame->time_base;
2280 
2281  fps->framerate = fr;
2282 finish:
2283  ofp->tb_out = tb;
2284  ofp->tb_out_locked = 1;
2285 
2286  return 0;
2287 }
2288 
2289 static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame,
2290  AVRational tb_dst, int64_t start_time)
2291 {
2292  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2293 
2294  AVRational tb = tb_dst;
2295  AVRational filter_tb = frame->time_base;
2296  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2297 
2298  if (frame->pts == AV_NOPTS_VALUE)
2299  goto early_exit;
2300 
2301  tb.den <<= extra_bits;
2302  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2304  float_pts /= 1 << extra_bits;
2305  // when float_pts is not exactly an integer,
2306  // avoid exact midpoints to reduce the chance of rounding differences, this
2307  // can be removed in case the fps code is changed to work with integers
2308  if (float_pts != llrint(float_pts))
2309  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2310 
2311  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2313  frame->time_base = tb_dst;
2314 
2315 early_exit:
2316 
2317  if (debug_ts) {
2318  av_log(logctx, AV_LOG_INFO,
2319  "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2320  frame ? av_ts2str(frame->pts) : "NULL",
2321  av_ts2timestr(frame->pts, &tb_dst),
2322  float_pts, tb_dst.num, tb_dst.den);
2323  }
2324 
2325  return float_pts;
2326 }
2327 
2328 /* Convert frame timestamps to the encoder timebase and decide how many times
2329  * should this (and possibly previous) frame be repeated in order to conform to
2330  * desired target framerate (if any).
2331  */
2333  int64_t *nb_frames, int64_t *nb_frames_prev)
2334 {
2335  OutputFilter *ofilter = &ofp->ofilter;
2336  FPSConvContext *fps = &ofp->fps;
2337  double delta0, delta, sync_ipts, duration;
2338 
2339  if (!frame) {
2340  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2341  fps->frames_prev_hist[1],
2342  fps->frames_prev_hist[2]);
2343 
2344  if (!*nb_frames && fps->last_dropped) {
2345  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2346  fps->last_dropped++;
2347  }
2348 
2349  goto finish;
2350  }
2351 
2352  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2353 
2354  sync_ipts = adjust_frame_pts_to_encoder_tb(ofilter->graph, frame,
2355  ofp->tb_out, ofp->ts_offset);
2356  /* delta0 is the "drift" between the input frame and
2357  * where it would fall in the output. */
2358  delta0 = sync_ipts - ofp->next_pts;
2359  delta = delta0 + duration;
2360 
2361  // tracks the number of times the PREVIOUS frame should be duplicated,
2362  // mostly for variable framerate (VFR)
2363  *nb_frames_prev = 0;
2364  /* by default, we output a single frame */
2365  *nb_frames = 1;
2366 
2367  if (delta0 < 0 &&
2368  delta > 0 &&
2371  && fps->vsync_method != VSYNC_DROP
2372 #endif
2373  ) {
2374  if (delta0 < -0.6) {
2375  av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2376  } else
2377  av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2378  sync_ipts = ofp->next_pts;
2379  duration += delta0;
2380  delta0 = 0;
2381  }
2382 
2383  switch (fps->vsync_method) {
2384  case VSYNC_VSCFR:
2385  if (fps->frame_number == 0 && delta0 >= 0.5) {
2386  av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2387  delta = duration;
2388  delta0 = 0;
2389  ofp->next_pts = llrint(sync_ipts);
2390  }
2391  case VSYNC_CFR:
2392  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2393  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2394  *nb_frames = 0;
2395  } else if (delta < -1.1)
2396  *nb_frames = 0;
2397  else if (delta > 1.1) {
2398  *nb_frames = llrintf(delta);
2399  if (delta0 > 1.1)
2400  *nb_frames_prev = llrintf(delta0 - 0.6);
2401  }
2402  frame->duration = 1;
2403  break;
2404  case VSYNC_VFR:
2405  if (delta <= -0.6)
2406  *nb_frames = 0;
2407  else if (delta > 0.6)
2408  ofp->next_pts = llrint(sync_ipts);
2409  frame->duration = llrint(duration);
2410  break;
2411 #if FFMPEG_OPT_VSYNC_DROP
2412  case VSYNC_DROP:
2413 #endif
2414  case VSYNC_PASSTHROUGH:
2415  ofp->next_pts = llrint(sync_ipts);
2416  frame->duration = llrint(duration);
2417  break;
2418  default:
2419  av_assert0(0);
2420  }
2421 
2422 finish:
2423  memmove(fps->frames_prev_hist + 1,
2424  fps->frames_prev_hist,
2425  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2426  fps->frames_prev_hist[0] = *nb_frames_prev;
2427 
2428  if (*nb_frames_prev == 0 && fps->last_dropped) {
2429  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2430  av_log(ofp, AV_LOG_VERBOSE,
2431  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2432  fps->frame_number, fps->last_frame->pts);
2433  }
2434  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2435  uint64_t nb_frames_dup;
2436  if (*nb_frames > dts_error_threshold * 30) {
2437  av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2438  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2439  *nb_frames = 0;
2440  return;
2441  }
2442  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2443  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2444  av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2445  if (nb_frames_dup > fps->dup_warning) {
2446  av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2447  fps->dup_warning *= 10;
2448  }
2449  }
2450 
2451  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2452  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2453 }
2454 
2456 {
2458  int ret;
2459 
2460  // we are finished and no frames were ever seen at this output,
2461  // at least initialize the encoder with a dummy frame
2462  if (!fgt->got_frame) {
2463  AVFrame *frame = fgt->frame;
2464  FrameData *fd;
2465 
2466  frame->time_base = ofp->tb_out;
2467  frame->format = ofp->format;
2468 
2469  frame->width = ofp->width;
2470  frame->height = ofp->height;
2471  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2472 
2473  frame->sample_rate = ofp->sample_rate;
2474  if (ofp->ch_layout.nb_channels) {
2475  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2476  if (ret < 0)
2477  return ret;
2478  }
2479  av_frame_side_data_free(&frame->side_data, &frame->nb_side_data);
2480  ret = clone_side_data(&frame->side_data, &frame->nb_side_data,
2481  ofp->side_data, ofp->nb_side_data, 0);
2482  if (ret < 0)
2483  return ret;
2484 
2485  fd = frame_data(frame);
2486  if (!fd)
2487  return AVERROR(ENOMEM);
2488 
2489  fd->frame_rate_filter = ofp->fps.framerate;
2490 
2491  av_assert0(!frame->buf[0]);
2492 
2493  av_log(ofp, AV_LOG_WARNING,
2494  "No filtered frames for output stream, trying to "
2495  "initialize anyway.\n");
2496 
2497  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame);
2498  if (ret < 0) {
2500  return ret;
2501  }
2502  }
2503 
2504  fgt->eof_out[ofp->ofilter.index] = 1;
2505 
2506  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, NULL);
2507  return (ret == AVERROR_EOF) ? 0 : ret;
2508 }
2509 
2511  AVFrame *frame)
2512 {
2514  AVFrame *frame_prev = ofp->fps.last_frame;
2515  enum AVMediaType type = ofp->ofilter.type;
2516 
2517  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2518 
2519  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2520  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2521 
2522  for (int64_t i = 0; i < nb_frames; i++) {
2523  AVFrame *frame_out;
2524  int ret;
2525 
2526  if (type == AVMEDIA_TYPE_VIDEO) {
2527  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2528  frame_prev : frame;
2529  if (!frame_in)
2530  break;
2531 
2532  frame_out = fgp->frame_enc;
2533  ret = av_frame_ref(frame_out, frame_in);
2534  if (ret < 0)
2535  return ret;
2536 
2537  frame_out->pts = ofp->next_pts;
2538 
2539  if (ofp->fps.dropped_keyframe) {
2540  frame_out->flags |= AV_FRAME_FLAG_KEY;
2541  ofp->fps.dropped_keyframe = 0;
2542  }
2543  } else {
2544  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2545  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2547 
2548  frame->time_base = ofp->tb_out;
2549  frame->duration = av_rescale_q(frame->nb_samples,
2550  (AVRational){ 1, frame->sample_rate },
2551  ofp->tb_out);
2552 
2553  ofp->next_pts = frame->pts + frame->duration;
2554 
2555  frame_out = frame;
2556  }
2557 
2558  // send the frame to consumers
2559  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame_out);
2560  if (ret < 0) {
2561  av_frame_unref(frame_out);
2562 
2563  if (!fgt->eof_out[ofp->ofilter.index]) {
2564  fgt->eof_out[ofp->ofilter.index] = 1;
2565  fgp->nb_outputs_done++;
2566  }
2567 
2568  return ret == AVERROR_EOF ? 0 : ret;
2569  }
2570 
2571  if (type == AVMEDIA_TYPE_VIDEO) {
2572  ofp->fps.frame_number++;
2573  ofp->next_pts++;
2574 
2575  if (i == nb_frames_prev && frame)
2576  frame->flags &= ~AV_FRAME_FLAG_KEY;
2577  }
2578 
2579  fgt->got_frame = 1;
2580  }
2581 
2582  if (frame && frame_prev) {
2583  av_frame_unref(frame_prev);
2584  av_frame_move_ref(frame_prev, frame);
2585  }
2586 
2587  if (!frame)
2588  return close_output(ofp, fgt);
2589 
2590  return 0;
2591 }
2592 
2594  AVFrame *frame)
2595 {
2598  FrameData *fd;
2599  int ret;
2600 
2603  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->ofilter.index]) {
2604  ret = fg_output_frame(ofp, fgt, NULL);
2605  return (ret < 0) ? ret : 1;
2606  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2607  return 1;
2608  } else if (ret < 0) {
2609  av_log(ofp, AV_LOG_WARNING,
2610  "Error in retrieving a frame from the filtergraph: %s\n",
2611  av_err2str(ret));
2612  return ret;
2613  }
2614 
2615  if (fgt->eof_out[ofp->ofilter.index]) {
2617  return 0;
2618  }
2619 
2621 
2622  if (debug_ts)
2623  av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2624  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2625  frame->time_base.num, frame->time_base.den);
2626 
2627  // Choose the output timebase the first time we get a frame.
2628  if (!ofp->tb_out_locked) {
2629  ret = choose_out_timebase(ofp, frame);
2630  if (ret < 0) {
2631  av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2633  return ret;
2634  }
2635  }
2636 
2637  fd = frame_data(frame);
2638  if (!fd) {
2640  return AVERROR(ENOMEM);
2641  }
2642 
2644 
2645  // only use bits_per_raw_sample passed through from the decoder
2646  // if the filtergraph did not touch the frame data
2647  if (!fgp->is_meta)
2648  fd->bits_per_raw_sample = 0;
2649 
2650  if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2651  if (!frame->duration) {
2653  if (fr.num > 0 && fr.den > 0)
2654  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2655  }
2656 
2657  fd->frame_rate_filter = ofp->fps.framerate;
2658  }
2659 
2660  ret = fg_output_frame(ofp, fgt, frame);
2662  if (ret < 0)
2663  return ret;
2664 
2665  return 0;
2666 }
2667 
2668 /* retrieve all frames available at filtergraph outputs
2669  * and send them to consumers */
2671  AVFrame *frame)
2672 {
2673  FilterGraphPriv *fgp = fgp_from_fg(fg);
2674  int did_step = 0;
2675 
2676  // graph not configured, just select the input to request
2677  if (!fgt->graph) {
2678  for (int i = 0; i < fg->nb_inputs; i++) {
2680  if (ifp->format < 0 && !fgt->eof_in[i]) {
2681  fgt->next_in = i;
2682  return 0;
2683  }
2684  }
2685 
2686  // This state - graph is not configured, but all inputs are either
2687  // initialized or EOF - should be unreachable because sending EOF to a
2688  // filter without even a fallback format should fail
2689  av_assert0(0);
2690  return AVERROR_BUG;
2691  }
2692 
2693  while (fgp->nb_outputs_done < fg->nb_outputs) {
2694  int ret;
2695 
2696  /* Reap all buffers present in the buffer sinks */
2697  for (int i = 0; i < fg->nb_outputs; i++) {
2699 
2700  ret = 0;
2701  while (!ret) {
2702  ret = fg_output_step(ofp, fgt, frame);
2703  if (ret < 0)
2704  return ret;
2705  }
2706  }
2707 
2708  // return after one iteration, so that scheduler can rate-control us
2709  if (did_step && fgp->have_sources)
2710  return 0;
2711 
2713  if (ret == AVERROR(EAGAIN)) {
2714  fgt->next_in = choose_input(fg, fgt);
2715  return 0;
2716  } else if (ret < 0) {
2717  if (ret == AVERROR_EOF)
2718  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2719  else
2720  av_log(fg, AV_LOG_ERROR,
2721  "Error requesting a frame from the filtergraph: %s\n",
2722  av_err2str(ret));
2723  return ret;
2724  }
2725  fgt->next_in = fg->nb_inputs;
2726 
2727  did_step = 1;
2728  }
2729 
2730  return AVERROR_EOF;
2731 }
2732 
2734 {
2735  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2736  int64_t pts2;
2737 
2738  /* subtitles seem to be usually muxed ahead of other streams;
2739  if not, subtracting a larger time here is necessary */
2740  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2741 
2742  /* do not send the heartbeat frame if the subtitle is already ahead */
2743  if (pts2 <= ifp->sub2video.last_pts)
2744  return;
2745 
2746  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2747  /* if we have hit the end of the current displayed subpicture,
2748  or if we need to initialize the system, update the
2749  overlaid subpicture and its start/end times */
2750  sub2video_update(ifp, pts2 + 1, NULL);
2751  else
2752  sub2video_push_ref(ifp, pts2);
2753 }
2754 
2755 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2756 {
2757  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2758  int ret;
2759 
2760  if (buffer) {
2761  AVFrame *tmp;
2762 
2763  if (!frame)
2764  return 0;
2765 
2766  tmp = av_frame_alloc();
2767  if (!tmp)
2768  return AVERROR(ENOMEM);
2769 
2771 
2772  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2773  if (ret < 0) {
2774  av_frame_free(&tmp);
2775  return ret;
2776  }
2777 
2778  return 0;
2779  }
2780 
2781  // heartbeat frame
2782  if (frame && !frame->buf[0]) {
2783  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2784  return 0;
2785  }
2786 
2787  if (!frame) {
2788  if (ifp->sub2video.end_pts < INT64_MAX)
2789  sub2video_update(ifp, INT64_MAX, NULL);
2790 
2791  return av_buffersrc_add_frame(ifilter->filter, NULL);
2792  }
2793 
2794  ifp->width = frame->width ? frame->width : ifp->width;
2795  ifp->height = frame->height ? frame->height : ifp->height;
2796 
2797  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2798 
2799  return 0;
2800 }
2801 
2802 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2803  int64_t pts, AVRational tb)
2804 {
2805  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2806  int ret;
2807 
2808  if (fgt->eof_in[ifilter->index])
2809  return 0;
2810 
2811  fgt->eof_in[ifilter->index] = 1;
2812 
2813  if (ifilter->filter) {
2814  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2816 
2818  if (ret < 0)
2819  return ret;
2820  } else {
2821  if (ifp->format < 0) {
2822  // the filtergraph was never configured, use the fallback parameters
2823  ifp->format = ifp->opts.fallback->format;
2824  ifp->sample_rate = ifp->opts.fallback->sample_rate;
2825  ifp->width = ifp->opts.fallback->width;
2826  ifp->height = ifp->opts.fallback->height;
2828  ifp->color_space = ifp->opts.fallback->colorspace;
2829  ifp->color_range = ifp->opts.fallback->color_range;
2830  ifp->alpha_mode = ifp->opts.fallback->alpha_mode;
2831  ifp->time_base = ifp->opts.fallback->time_base;
2832 
2834  &ifp->opts.fallback->ch_layout);
2835  if (ret < 0)
2836  return ret;
2837 
2839  ret = clone_side_data(&ifp->side_data, &ifp->nb_side_data,
2840  ifp->opts.fallback->side_data,
2841  ifp->opts.fallback->nb_side_data, 0);
2842  if (ret < 0)
2843  return ret;
2844 
2845  if (ifilter_has_all_input_formats(ifilter->graph)) {
2846  ret = configure_filtergraph(ifilter->graph, fgt);
2847  if (ret < 0) {
2848  av_log(ifilter->graph, AV_LOG_ERROR, "Error initializing filters!\n");
2849  return ret;
2850  }
2851  }
2852  }
2853 
2854  if (ifp->format < 0) {
2855  av_log(ifilter->graph, AV_LOG_ERROR,
2856  "Cannot determine format of input %s after EOF\n",
2857  ifp->opts.name);
2858  return AVERROR_INVALIDDATA;
2859  }
2860  }
2861 
2862  return 0;
2863 }
2864 
2866  VIDEO_CHANGED = (1 << 0),
2867  AUDIO_CHANGED = (1 << 1),
2868  MATRIX_CHANGED = (1 << 2),
2869  DOWNMIX_CHANGED = (1 << 3),
2870  HWACCEL_CHANGED = (1 << 4)
2871 };
2872 
2873 static const char *unknown_if_null(const char *str)
2874 {
2875  return str ? str : "unknown";
2876 }
2877 
2879  InputFilter *ifilter, AVFrame *frame)
2880 {
2881  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2882  FrameData *fd;
2883  AVFrameSideData *sd;
2884  int need_reinit = 0, ret;
2885 
2886  /* determine if the parameters for this input changed */
2887  switch (ifilter->type) {
2888  case AVMEDIA_TYPE_AUDIO:
2889  if (ifp->format != frame->format ||
2890  ifp->sample_rate != frame->sample_rate ||
2891  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
2892  need_reinit |= AUDIO_CHANGED;
2893  break;
2894  case AVMEDIA_TYPE_VIDEO:
2895  if (ifp->format != frame->format ||
2896  ifp->width != frame->width ||
2897  ifp->height != frame->height ||
2898  ifp->color_space != frame->colorspace ||
2899  ifp->color_range != frame->color_range ||
2900  ifp->alpha_mode != frame->alpha_mode)
2901  need_reinit |= VIDEO_CHANGED;
2902  break;
2903  }
2904 
2906  if (!ifp->displaymatrix_present ||
2907  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2908  need_reinit |= MATRIX_CHANGED;
2909  } else if (ifp->displaymatrix_present)
2910  need_reinit |= MATRIX_CHANGED;
2911 
2913  if (!ifp->downmixinfo_present ||
2914  memcmp(sd->data, &ifp->downmixinfo, sizeof(ifp->downmixinfo)))
2915  need_reinit |= DOWNMIX_CHANGED;
2916  } else if (ifp->downmixinfo_present)
2917  need_reinit |= DOWNMIX_CHANGED;
2918 
2919  if (need_reinit && fgt->graph && (ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)) {
2920  ifp->nb_dropped++;
2921  av_log_once(fg, AV_LOG_WARNING, AV_LOG_DEBUG, &ifp->drop_warned, "Avoiding reinit; dropping frame pts: %s bound for %s\n", av_ts2str(frame->pts), ifilter->name);
2923  return 0;
2924  }
2925 
2926  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2927  need_reinit = 0;
2928 
2929  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2930  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2931  need_reinit |= HWACCEL_CHANGED;
2932 
2933  if (need_reinit) {
2935  if (ret < 0)
2936  return ret;
2937  }
2938 
2939  /* (re)init the graph if possible, otherwise buffer the frame and return */
2940  if (need_reinit || !fgt->graph) {
2941  AVFrame *tmp = av_frame_alloc();
2942 
2943  if (!tmp)
2944  return AVERROR(ENOMEM);
2945 
2946  if (!ifilter_has_all_input_formats(fg)) {
2948 
2949  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2950  if (ret < 0)
2951  av_frame_free(&tmp);
2952 
2953  return ret;
2954  }
2955 
2956  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2957  av_frame_free(&tmp);
2958  if (ret < 0)
2959  return ret;
2960 
2961  if (fgt->graph) {
2962  AVBPrint reason;
2964  if (need_reinit & AUDIO_CHANGED) {
2965  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2966  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2967  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
2968  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2969  }
2970  if (need_reinit & VIDEO_CHANGED) {
2971  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2972  const char *color_space_name = av_color_space_name(frame->colorspace);
2973  const char *color_range_name = av_color_range_name(frame->color_range);
2974  const char *alpha_mode = av_alpha_mode_name(frame->alpha_mode);
2975  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, %s alpha,",
2976  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2977  unknown_if_null(color_space_name), frame->width, frame->height,
2978  unknown_if_null(alpha_mode));
2979  }
2980  if (need_reinit & MATRIX_CHANGED)
2981  av_bprintf(&reason, "display matrix changed, ");
2982  if (need_reinit & DOWNMIX_CHANGED)
2983  av_bprintf(&reason, "downmix medatata changed, ");
2984  if (need_reinit & HWACCEL_CHANGED)
2985  av_bprintf(&reason, "hwaccel changed, ");
2986  if (reason.len > 1)
2987  reason.str[reason.len - 2] = '\0'; // remove last comma
2988  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2989  }
2990 
2991  ret = configure_filtergraph(fg, fgt);
2992  if (ret < 0) {
2993  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2994  return ret;
2995  }
2996  }
2997 
2998  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
2999  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
3000  frame->time_base = ifp->time_base;
3001 
3002  if (ifp->displaymatrix_applied)
3004 
3005  fd = frame_data(frame);
3006  if (!fd)
3007  return AVERROR(ENOMEM);
3009 
3012  if (ret < 0) {
3014  if (ret != AVERROR_EOF)
3015  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
3016  return ret;
3017  }
3018 
3019  return 0;
3020 }
3021 
3022 static void fg_thread_set_name(const FilterGraph *fg)
3023 {
3024  char name[16];
3025  if (filtergraph_is_simple(fg)) {
3026  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
3027  snprintf(name, sizeof(name), "%cf%s",
3029  ofp->ofilter.output_name);
3030  } else {
3031  snprintf(name, sizeof(name), "fc%d", fg->index);
3032  }
3033 
3035 }
3036 
3038 {
3039  if (fgt->frame_queue_out) {
3040  AVFrame *frame;
3041  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
3042  av_frame_free(&frame);
3044  }
3045 
3046  av_frame_free(&fgt->frame);
3047  av_freep(&fgt->eof_in);
3048  av_freep(&fgt->eof_out);
3049 
3050  avfilter_graph_free(&fgt->graph);
3051 
3052  memset(fgt, 0, sizeof(*fgt));
3053 }
3054 
3055 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
3056 {
3057  memset(fgt, 0, sizeof(*fgt));
3058 
3059  fgt->frame = av_frame_alloc();
3060  if (!fgt->frame)
3061  goto fail;
3062 
3063  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
3064  if (!fgt->eof_in)
3065  goto fail;
3066 
3067  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
3068  if (!fgt->eof_out)
3069  goto fail;
3070 
3072  if (!fgt->frame_queue_out)
3073  goto fail;
3074 
3075  return 0;
3076 
3077 fail:
3078  fg_thread_uninit(fgt);
3079  return AVERROR(ENOMEM);
3080 }
3081 
3082 static int filter_thread(void *arg)
3083 {
3084  FilterGraphPriv *fgp = arg;
3085  FilterGraph *fg = &fgp->fg;
3086 
3087  FilterGraphThread fgt;
3088  int ret = 0, input_status = 0;
3089 
3090  ret = fg_thread_init(&fgt, fg);
3091  if (ret < 0)
3092  goto finish;
3093 
3094  fg_thread_set_name(fg);
3095 
3096  // if we have all input parameters the graph can now be configured
3098  ret = configure_filtergraph(fg, &fgt);
3099  if (ret < 0) {
3100  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
3101  av_err2str(ret));
3102  goto finish;
3103  }
3104  }
3105 
3106  while (1) {
3107  InputFilter *ifilter;
3108  InputFilterPriv *ifp = NULL;
3109  enum FrameOpaque o;
3110  unsigned input_idx = fgt.next_in;
3111 
3112  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
3113  &input_idx, fgt.frame);
3114  if (input_status == AVERROR_EOF) {
3115  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
3116  break;
3117  } else if (input_status == AVERROR(EAGAIN)) {
3118  // should only happen when we didn't request any input
3119  av_assert0(input_idx == fg->nb_inputs);
3120  goto read_frames;
3121  }
3122  av_assert0(input_status >= 0);
3123 
3124  o = (intptr_t)fgt.frame->opaque;
3125 
3126  o = (intptr_t)fgt.frame->opaque;
3127 
3128  // message on the control stream
3129  if (input_idx == fg->nb_inputs) {
3130  FilterCommand *fc;
3131 
3132  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
3133 
3134  fc = (FilterCommand*)fgt.frame->buf[0]->data;
3135  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
3136  fc->all_filters);
3137  av_frame_unref(fgt.frame);
3138  continue;
3139  }
3140 
3141  // we received an input frame or EOF
3142  ifilter = fg->inputs[input_idx];
3143  ifp = ifp_from_ifilter(ifilter);
3144 
3145  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
3146  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
3147  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
3148  !fgt.graph);
3149  } else if (fgt.frame->buf[0]) {
3150  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
3151  } else {
3153  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
3154  }
3155  av_frame_unref(fgt.frame);
3156  if (ret == AVERROR_EOF) {
3157  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
3158  input_idx);
3159  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
3160  continue;
3161  }
3162  if (ret < 0)
3163  goto finish;
3164 
3165 read_frames:
3166  // retrieve all newly available frames
3167  ret = read_frames(fg, &fgt, fgt.frame);
3168  if (ret == AVERROR_EOF) {
3169  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
3170  if (ifp && ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)
3171  av_log(fg, AV_LOG_INFO, "Total changed input frames dropped : %"PRId64"\n", ifp->nb_dropped);
3172  break;
3173  } else if (ret < 0) {
3174  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
3175  av_err2str(ret));
3176  goto finish;
3177  }
3178  }
3179 
3180  for (unsigned i = 0; i < fg->nb_outputs; i++) {
3182 
3183  if (fgt.eof_out[i] || !fgt.graph)
3184  continue;
3185 
3186  ret = fg_output_frame(ofp, &fgt, NULL);
3187  if (ret < 0)
3188  goto finish;
3189  }
3190 
3191 finish:
3192 
3194  print_filtergraph(fg, fgt.graph);
3195 
3196  // EOF is normal termination
3197  if (ret == AVERROR_EOF)
3198  ret = 0;
3199 
3200  fg_thread_uninit(&fgt);
3201 
3202  return ret;
3203 }
3204 
3205 void fg_send_command(FilterGraph *fg, double time, const char *target,
3206  const char *command, const char *arg, int all_filters)
3207 {
3208  FilterGraphPriv *fgp = fgp_from_fg(fg);
3209  AVBufferRef *buf;
3210  FilterCommand *fc;
3211 
3212  fc = av_mallocz(sizeof(*fc));
3213  if (!fc)
3214  return;
3215 
3216  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3217  if (!buf) {
3218  av_freep(&fc);
3219  return;
3220  }
3221 
3222  fc->target = av_strdup(target);
3223  fc->command = av_strdup(command);
3224  fc->arg = av_strdup(arg);
3225  if (!fc->target || !fc->command || !fc->arg) {
3226  av_buffer_unref(&buf);
3227  return;
3228  }
3229 
3230  fc->time = time;
3231  fc->all_filters = all_filters;
3232 
3233  fgp->frame->buf[0] = buf;
3234  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3235 
3236  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
3237 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
InputFilterPriv::nb_dropped
uint64_t nb_dropped
Definition: ffmpeg_filter.c:119
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:129
AVSubtitle
Definition: avcodec.h:2082
formats
formats
Definition: signature.h:47
AVBufferSrcParameters::side_data
AVFrameSideData ** side_data
Definition: buffersrc.h:124
AVBufferSrcParameters::color_space
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
Definition: buffersrc.h:121
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1876
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:95
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:203
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:469
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:678
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:370
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:631
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:375
av_clip
#define av_clip
Definition: common.h:100
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2460
OutputFilter::class
const AVClass * class
Definition: ffmpeg.h:373
view_specifier_parse
int view_specifier_parse(const char **pspec, ViewSpecifier *vs)
Definition: ffmpeg_opt.c:245
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:70
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:106
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2087
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2189
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1483
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:65
read_binary
static int read_binary(void *logctx, const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:431
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:69
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:97
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:615
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:131
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:987
FrameData
Definition: ffmpeg.h:681
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2164
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:152
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
InputFilterOptions::crop_right
unsigned crop_right
Definition: ffmpeg.h:286
OutputFilter::apad
char * apad
Definition: ffmpeg.h:388
out
FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:659
clone_side_data
static int clone_side_data(AVFrameSideData ***dst, int *nb_dst, AVFrameSideData *const *src, int nb_src, unsigned int flags)
Wrapper calling av_frame_side_data_clone() in a loop for all source entries.
Definition: ffmpeg_utils.h:50
FilterGraph::graph_desc
const char * graph_desc
Definition: ffmpeg.h:405
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:137
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3447
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:400
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:155
AVBufferSrcParameters::nb_side_data
int nb_side_data
Definition: buffersrc.h:125
InputFilterOptions::crop_bottom
unsigned crop_bottom
Definition: ffmpeg.h:284
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:37
AVFrame::nb_side_data
int nb_side_data
Definition: frame.h:625
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:2095
stream_specifier_parse
int stream_specifier_parse(StreamSpecifier *ss, const char *spec, int allow_remainder, void *logctx)
Parse a stream specifier string into a form suitable for matching.
Definition: cmdutils.c:1012
ofilter_class
static const AVClass ofilter_class
Definition: ffmpeg_filter.c:635
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:2870
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:62
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:263
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, const ViewSpecifier *vs, InputFilterOptions *opts, SchedulerNode *src)
Definition: ffmpeg_demux.c:1021
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:133
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:226
configure_output_filter
static int configure_output_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1685
av_alpha_mode_name
const char * av_alpha_mode_name(enum AVAlphaMode mode)
Definition: pixdesc.c:3888
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:245
AVSubtitleRect
Definition: avcodec.h:2055
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2086
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts, const ViewSpecifier *vs, SchedulerNode *src)
Definition: ffmpeg_dec.c:1753
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:1002
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:172
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:565
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:689
InputFile::index
int index
Definition: ffmpeg.h:497
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:757
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:529
AVFrame::width
int width
Definition: frame.h:499
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:50
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:57
StreamSpecifier
Definition: cmdutils.h:113
ofilter_bind_enc
int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:801
AVOption
AVOption.
Definition: opt.h:429
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2510
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:190
FilterGraph::index
int index
Definition: ffmpeg.h:398
OutputFilter::index
int index
Definition: ffmpeg.h:377
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:130
data
const char data[16]
Definition: mxf.c:149
InputFilter::index
int index
Definition: ffmpeg.h:358
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:176
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:230
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1886
OutputFilterPriv::alpha_mode
enum AVAlphaMode alpha_mode
Definition: ffmpeg_filter.c:200
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:401
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:2866
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
ViewSpecifier
Definition: ffmpeg.h:129
AVDictionary
Definition: dict.c:32
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:671
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:237
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:324
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: side_data.c:248
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:263
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1603
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:604
AVBufferSrcParameters::height
int height
Definition: buffersrc.h:87
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:326
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2593
FilterGraphPriv
Definition: ffmpeg_filter.c:46
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:599
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:100
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1921
OutputFilterPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:191
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
stream_specifier_uninit
void stream_specifier_uninit(StreamSpecifier *ss)
Definition: cmdutils.c:1003
InputStream
Definition: ffmpeg.h:460
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:76
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:70
OutputFilterOptions
Definition: ffmpeg.h:304
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:271
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:292
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
Definition: avfiltergraph.c:140
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:167
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
finish
static void finish(void)
Definition: movenc.c:374
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
Underlying C type is a uint8_t* that is either NULL or points to an array allocated with the av_mallo...
Definition: opt.h:286
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3823
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:89
OutputFilterPriv
Definition: ffmpeg_filter.c:187
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:3037
filter_opt_apply
static int filter_opt_apply(void *logctx, AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:480
fail
#define fail()
Definition: checkasm.h:200
InputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:361
AVBufferSrcParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
Definition: buffersrc.h:92
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:316
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:85
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
print_filtergraph
int print_filtergraph(FilterGraph *fg, AVFilterGraph *graph)
Definition: graphprint.c:954
samplefmt.h
OutputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:202
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:262
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:605
AVFrame::alpha_mode
enum AVAlphaMode alpha_mode
Indicates how the alpha channel of the video is to be handled.
Definition: frame.h:782
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:770
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:123
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1706
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
AVDownmixInfo
This structure describes optional metadata relevant to a downmix procedure.
Definition: downmix_info.h:58
pts
static int64_t pts
Definition: transcode_aac.c:644
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1902
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:87
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:156
FrameData::tb
AVRational tb
Definition: ffmpeg.h:691
OutputFilterPriv::sws_opts
AVDictionary * sws_opts
Definition: ffmpeg_filter.c:214
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:73
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:196
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputFilterPriv::sub2video
struct InputFilterPriv::@8 sub2video
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:177
AVRational::num
int num
Numerator.
Definition: rational.h:59
OutputFilter::bound
int bound
Definition: ffmpeg.h:385
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:103
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:272
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:39
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:821
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2733
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
OutputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:203
avassert.h
OutputFilterPriv::trim_start_us
int64_t trim_start_us
Definition: ffmpeg_filter.c:227
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:694
InputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:136
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2802
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:104
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:1262
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
duration
int64_t duration
Definition: movenc.c:65
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:350
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:973
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:933
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:251
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:71
llrintf
#define llrintf(x)
Definition: libm.h:401
s
#define s(width, name)
Definition: cbs_vp9.c:198
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:669
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:67
DOWNMIX_CHANGED
@ DOWNMIX_CHANGED
Definition: ffmpeg_filter.c:2869
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:110
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:402
ofilter_item_name
static const char * ofilter_item_name(void *obj)
Definition: ffmpeg_filter.c:629
AVDictionaryEntry::key
char * key
Definition: dict.h:91
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:201
VIEW_SPECIFIER_TYPE_NONE
@ VIEW_SPECIFIER_TYPE_NONE
Definition: ffmpeg.h:118
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:119
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:728
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:386
InputFilter
Definition: ffmpeg.h:355
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:60
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:493
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:49
OFILTER_FLAG_AUTOSCALE
@ OFILTER_FLAG_AUTOSCALE
Definition: ffmpeg.h:301
print_graphs_file
char * print_graphs_file
Definition: ffmpeg_opt.c:81
InputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:369
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2088
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:3055
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:274
InputFilterOptions::crop_top
unsigned crop_top
Definition: ffmpeg.h:283
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:356
AV_SIDE_DATA_PROP_GLOBAL
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
Definition: frame.h:296
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
color_range
color_range
Definition: vf_selectivecolor.c:43
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:232
av_buffersink_get_alpha_mode
enum AVAlphaMode av_buffersink_get_alpha_mode(const AVFilterContext *ctx)
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:1060
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1187
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:220
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:195
InputFilterOptions::crop_left
unsigned crop_left
Definition: ffmpeg.h:285
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3763
AVBufferSrcParameters::alpha_mode
enum AVAlphaMode alpha_mode
Video only, the alpha mode.
Definition: buffersrc.h:130
AVFormatContext
Format I/O context.
Definition: avformat.h:1264
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:646
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:767
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:376
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1414
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:226
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:882
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:126
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:895
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed, const AVChannelLayout *layout_requested)
Definition: ffmpeg_filter.c:761
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:197
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:865
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:174
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:468
tmp
static uint8_t tmp[20]
Definition: aes_ctr.c:47
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:162
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:142
Decoder
Definition: ffmpeg.h:446
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:876
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
OFILTER_FLAG_AUDIO_24BIT
@ OFILTER_FLAG_AUDIO_24BIT
Definition: ffmpeg.h:300
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:934
fg_create_simple
int fg_create_simple(FilterGraph **pfg, InputStream *ist, char *graph_desc, Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:1210
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:591
ofilter_bind_ifilter
static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:916
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:212
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
Definition: ffmpeg_filter.c:643
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2455
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:92
mathops.h
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:70
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:698
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1547
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:957
stream_specifier_match
unsigned stream_specifier_match(const StreamSpecifier *ss, const AVFormatContext *s, const AVStream *st, void *logctx)
Definition: cmdutils.c:1227
AVFilterGraph
Definition: avfilter.h:589
InputFilterPriv::downmixinfo_present
int downmixinfo_present
Definition: ffmpeg_filter.c:146
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:244
InputFilterOptions
Definition: ffmpeg.h:270
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:125
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:733
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:403
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:219
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:880
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:476
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:105
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:275
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:47
FilterGraphPriv::nb_threads
int nb_threads
Definition: ffmpeg_filter.c:62
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:188
FilterGraph
Definition: ffmpeg.h:396
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:946
file_read
char * file_read(const char *filename)
Definition: cmdutils.c:1501
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:78
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:288
InputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:363
AVBufferSrcParameters::frame_rate
AVRational frame_rate
Video only, the frame rate of the input video.
Definition: buffersrc.h:100
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:754
AVAlphaMode
AVAlphaMode
Correlation between the alpha channel and color values.
Definition: pixfmt.h:800
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:280
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:594
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2158
VideoSyncMethod
VideoSyncMethod
Definition: ffmpeg.h:66
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1991
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:264
f
f
Definition: af_crystalizer.c:122
OutputFilter::output_name
char * output_name
Definition: ffmpeg.h:381
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:3082
AVMediaType
AVMediaType
Definition: avutil.h:198
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:140
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:91
FilterGraphThread
Definition: ffmpeg_filter.c:84
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:144
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:319
OutputFilterPriv::color_ranges
enum AVColorRange * color_ranges
Definition: ffmpeg_filter.c:223
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:85
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:107
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:751
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:590
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:210
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:326
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:188
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:719
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:127
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:2868
FilterCommand::time
double time
Definition: ffmpeg_filter.c:247
InputFilterPriv::initialize
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:156
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.c:143
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1464
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:544
AVFrameSideData::data
uint8_t * data
Definition: frame.h:284
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:58
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:460
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2085
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:101
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1470
FPSConvContext::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg_filter.c:179
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:725
OutputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:379
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:124
AVBufferSrcParameters::time_base
AVRational time_base
The timebase to be used for the timestamps on the input frames.
Definition: buffersrc.h:82
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:511
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1895
fg_finalise_bindings
int fg_finalise_bindings(void)
Definition: ffmpeg_filter.c:1417
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:2867
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2395
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:233
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:2873
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:289
decoders
Decoder ** decoders
Definition: ffmpeg.c:114
OutputFilterPriv::log_parent
void * log_parent
Definition: ffmpeg_filter.c:190
nb_decoders
int nb_decoders
Definition: ffmpeg.c:115
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:390
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2670
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:809
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2038
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:2878
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:959
buffersink.h
av_buffersink_get_side_data
const AVFrameSideData *const * av_buffersink_get_side_data(const AVFilterContext *ctx, int *nb_side_data)
Definition: buffersink.c:383
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:839
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:140
OutputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:199
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:248
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:184
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:168
filter_buffered_frames
int filter_buffered_frames
Definition: ffmpeg_opt.c:78
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:121
graph_opts_apply
static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:536
FPSConvContext
Definition: ffmpeg_filter.c:165
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVBufferSrcParameters::width
int width
Video only, the display dimensions of the input frames.
Definition: buffersrc.h:87
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:696
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: side_data.c:133
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:3205
downmix_info.h
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:52
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:294
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:209
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:69
src2
const pixel * src2
Definition: h264pred_template.c:421
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1825
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:690
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:182
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
print_graphs
int print_graphs
Definition: ffmpeg_opt.c:80
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:90
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:499
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:78
graph_parse
static int graph_parse(void *logctx, AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:560
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:116
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
IFILTER_FLAG_DROPCHANGED
@ IFILTER_FLAG_DROPCHANGED
Definition: ffmpeg.h:267
AVFrame::side_data
AVFrameSideData ** side_data
Definition: frame.h:624
len
int len
Definition: vorbis_enc_data.h:426
SchedulerNode
Definition: ffmpeg_sched.h:103
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:693
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:111
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:308
OutputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:198
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:919
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:60
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2505
AVFilter
Filter definition.
Definition: avfilter.h:216
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2332
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:160
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:1074
mid_pred
#define mid_pred
Definition: mathops.h:97
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:92
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:744
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
AVALPHA_MODE_UNSPECIFIED
@ AVALPHA_MODE_UNSPECIFIED
Unknown alpha handling, or no alpha channel.
Definition: pixfmt.h:801
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:357
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:73
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:783
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:166
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:513
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1497
AVFilterParams::opts
AVDictionary * opts
Options to be applied to the filter.
Definition: avfilter.h:917
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:231
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:122
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:2865
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:445
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:524
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:992
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
AVBufferSrcParameters::color_range
enum AVColorRange color_range
Definition: buffersrc.h:122
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:88
OutputFilterPriv::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg_filter.c:215
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:499
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:750
IFILTER_FLAG_CROP
@ IFILTER_FLAG_CROP
Definition: ffmpeg.h:266
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:373
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:617
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:117
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:918
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:464
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:932
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:138
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:114
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:442
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:55
insert_trim
static int insert_trim(void *logctx, int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1446
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:265
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:183
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:615
ifilter_bind_fg
static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
Definition: ffmpeg_filter.c:936
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2212
OutputFilterPriv::flags
unsigned flags
Definition: ffmpeg_filter.c:234
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:221
AVSideDataDescriptor
This struct describes the properties of a side data type.
Definition: frame.h:330
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:285
InputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:135
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:958
OutputFilterPriv::alpha_modes
enum AVAlphaMode * alpha_modes
Definition: ffmpeg_filter.c:224
AVFilterContext
An instance of a filter.
Definition: avfilter.h:274
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:397
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:449
OutputFilter
Definition: ffmpeg.h:372
InputFilterPriv::drop_warned
int drop_warned
Definition: ffmpeg_filter.c:118
av_log_once
void av_log_once(void *avcl, int initial_level, int subsequent_level, int *state, const char *fmt,...)
Definition: log.c:450
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2755
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:105
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
configure_output_video_filter
static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1523
ViewSpecifier::type
enum ViewSpecifierType type
Definition: ffmpeg.h:130
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:323
OutputFilterPriv::color_spaces
enum AVColorSpace * color_spaces
Definition: ffmpeg_filter.c:222
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:492
av_buffersink_get_colorspace
enum AVColorSpace av_buffersink_get_colorspace(const AVFilterContext *ctx)
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2289
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:393
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:83
llrint
#define llrint(x)
Definition: libm.h:396
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:282
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
InputStream::index
int index
Definition: ffmpeg.h:466
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2439
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:90
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:79
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:243
av_frame_side_data_desc
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
Definition: side_data.c:62
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:1067
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:260
OFILTER_FLAG_DISABLE_CONVERT
@ OFILTER_FLAG_DISABLE_CONVERT
Definition: ffmpeg.h:298
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:247
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:449
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:122
InputFilterPriv::end_pts
int64_t end_pts
Definition: ffmpeg_filter.c:153
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:112
int32_t
int32_t
Definition: audioconvert.c:56
InputFilterPriv::alpha_mode
enum AVAlphaMode alpha_mode
Definition: ffmpeg_filter.c:128
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:332
timestamp.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:617
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:194
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1434
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
graphprint.h
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:107
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:57
OutputFilterPriv::trim_duration_us
int64_t trim_duration_us
Definition: ffmpeg_filter.c:228
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
InputFilterPriv::downmixinfo
AVDownmixInfo downmixinfo
Definition: ffmpeg_filter.c:147
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2070
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:130
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:298
AVDictionaryEntry::value
char * value
Definition: dict.h:92
bind_inputs
static int bind_inputs(FilterGraph *fg)
Definition: ffmpeg_filter.c:1399
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:592
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:277
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:732
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:466
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:207
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:746
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:68
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:195
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:126
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:181
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:3022
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:166
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1695
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2084
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:244
src
#define src
Definition: vp8dsp.c:248
FilterCommand
Definition: ffmpeg_filter.c:242
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:124
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3367
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:392
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:77
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:281
av_buffersink_get_color_range
enum AVColorRange av_buffersink_get_color_range(const AVFilterContext *ctx)
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
InputFilter::input_name
char * input_name
Definition: ffmpeg.h:365
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:104
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:183