FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 #include "graph/graphprint.h"
25 
26 #include "libavfilter/avfilter.h"
27 #include "libavfilter/buffersink.h"
28 #include "libavfilter/buffersrc.h"
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
34 #include "libavutil/downmix_info.h"
35 #include "libavutil/mem.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/pixfmt.h"
39 #include "libavutil/samplefmt.h"
40 #include "libavutil/time.h"
41 #include "libavutil/timestamp.h"
42 
43 // FIXME private header, used for mid_pred()
44 #include "libavcodec/mathops.h"
45 
46 typedef struct FilterGraphPriv {
48 
49  // name used for logging
50  char log_name[32];
51 
52  int is_simple;
53  // true when the filtergraph contains only meta filters
54  // that do not modify the frame data
55  int is_meta;
56  // source filters are present in the graph
59 
60  unsigned nb_outputs_done;
61 
63 
64  // frame for temporarily holding output from the filtergraph
66  // frame for sending output to the encoder
68 
70  unsigned sch_idx;
72 
74 {
75  return (FilterGraphPriv*)fg;
76 }
77 
78 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
79 {
80  return (const FilterGraphPriv*)fg;
81 }
82 
83 // data that is local to the filter thread and not visible outside of it
84 typedef struct FilterGraphThread {
86 
88 
89  // Temporary buffer for output frames, since on filtergraph reset
90  // we cannot send them to encoders immediately.
91  // The output index is stored in frame opaque.
93 
94  // index of the next input to request from the scheduler
95  unsigned next_in;
96  // set to 1 after at least one frame passed through this output
97  int got_frame;
98 
99  // EOF status of each input/output, as received by the thread
100  uint8_t *eof_in;
101  uint8_t *eof_out;
103 
104 typedef struct InputFilterPriv {
106 
108 
109  // used to hold submitted input
111 
112  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
113  // same as type otherwise
115 
116  int eof;
117  int bound;
119  uint64_t nb_dropped;
120 
121  // parameters configured for this input
122  int format;
123 
124  int width, height;
128 
131 
133 
136 
138 
140 
144 
147 
148  struct {
149  AVFrame *frame;
150 
153 
154  /// marks if sub2video_update should force an initialization
155  unsigned int initialize;
156  } sub2video;
158 
160 {
161  return (InputFilterPriv*)ifilter;
162 }
163 
164 typedef struct FPSConvContext {
166  /* number of frames emitted by the video-encoding sync code */
168  /* history of nb_frames_prev, i.e. the number of times the
169  * previous frame was duplicated by vsync code in recent
170  * do_video_out() calls */
172 
173  uint64_t dup_warning;
174 
177 
179 
185 
186 typedef struct OutputFilterPriv {
188 
189  void *log_parent;
190  char log_name[32];
191 
192  /* desired output stream properties */
193  int format;
194  int width, height;
199 
202 
203  // time base in which the output is sent to our downstream
204  // does not need to match the filtersink's timebase
206  // at least one frame with the above timebase was sent
207  // to our downstream, so it cannot change anymore
209 
211 
214 
215  // those are only set if no format is specified and the encoder gives us multiple options
216  // They point directly to the relevant lists of the encoder.
217  const int *formats;
219  const int *sample_rates;
222 
226  // offset for output timestamps, in AV_TIME_BASE_Q
230 
231  unsigned flags;
233 
235 {
236  return (OutputFilterPriv*)ofilter;
237 }
238 
239 typedef struct FilterCommand {
240  char *target;
241  char *command;
242  char *arg;
243 
244  double time;
246 } FilterCommand;
247 
248 static void filter_command_free(void *opaque, uint8_t *data)
249 {
251 
252  av_freep(&fc->target);
253  av_freep(&fc->command);
254  av_freep(&fc->arg);
255 
256  av_free(data);
257 }
258 
260 {
261  AVFrame *frame = ifp->sub2video.frame;
262  int ret;
263 
265 
266  frame->width = ifp->width;
267  frame->height = ifp->height;
268  frame->format = ifp->format;
269  frame->colorspace = ifp->color_space;
270  frame->color_range = ifp->color_range;
271 
273  if (ret < 0)
274  return ret;
275 
276  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
277 
278  return 0;
279 }
280 
281 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
282  AVSubtitleRect *r)
283 {
284  uint32_t *pal, *dst2;
285  uint8_t *src, *src2;
286  int x, y;
287 
288  if (r->type != SUBTITLE_BITMAP) {
289  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
290  return;
291  }
292  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
293  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
294  r->x, r->y, r->w, r->h, w, h
295  );
296  return;
297  }
298 
299  dst += r->y * dst_linesize + r->x * 4;
300  src = r->data[0];
301  pal = (uint32_t *)r->data[1];
302  for (y = 0; y < r->h; y++) {
303  dst2 = (uint32_t *)dst;
304  src2 = src;
305  for (x = 0; x < r->w; x++)
306  *(dst2++) = pal[*(src2++)];
307  dst += dst_linesize;
308  src += r->linesize[0];
309  }
310 }
311 
313 {
314  AVFrame *frame = ifp->sub2video.frame;
315  int ret;
316 
317  av_assert1(frame->data[0]);
318  ifp->sub2video.last_pts = frame->pts = pts;
322  if (ret != AVERROR_EOF && ret < 0)
324  "Error while add the frame to buffer source(%s).\n",
325  av_err2str(ret));
326 }
327 
328 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
329  const AVSubtitle *sub)
330 {
331  AVFrame *frame = ifp->sub2video.frame;
332  int8_t *dst;
333  int dst_linesize;
334  int num_rects;
335  int64_t pts, end_pts;
336 
337  if (sub) {
338  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
339  AV_TIME_BASE_Q, ifp->time_base);
340  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
341  AV_TIME_BASE_Q, ifp->time_base);
342  num_rects = sub->num_rects;
343  } else {
344  /* If we are initializing the system, utilize current heartbeat
345  PTS as the start time, and show until the following subpicture
346  is received. Otherwise, utilize the previous subpicture's end time
347  as the fall-back value. */
348  pts = ifp->sub2video.initialize ?
349  heartbeat_pts : ifp->sub2video.end_pts;
350  end_pts = INT64_MAX;
351  num_rects = 0;
352  }
353  if (sub2video_get_blank_frame(ifp) < 0) {
355  "Impossible to get a blank canvas.\n");
356  return;
357  }
358  dst = frame->data [0];
359  dst_linesize = frame->linesize[0];
360  for (int i = 0; i < num_rects; i++)
361  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
362  sub2video_push_ref(ifp, pts);
363  ifp->sub2video.end_pts = end_pts;
364  ifp->sub2video.initialize = 0;
365 }
366 
367 /* Define a function for appending a list of allowed formats
368  * to an AVBPrint. If nonempty, the list will have a header. */
369 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
370 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
371 { \
372  if (ofp->var == none && !ofp->supported_list) \
373  return; \
374  av_bprintf(bprint, #name "="); \
375  if (ofp->var != none) { \
376  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
377  } else { \
378  const type *p; \
379  \
380  for (p = ofp->supported_list; *p != none; p++) { \
381  av_bprintf(bprint, printf_format "|", get_name(*p)); \
382  } \
383  if (bprint->len > 0) \
384  bprint->str[--bprint->len] = '\0'; \
385  } \
386  av_bprint_chars(bprint, ':', 1); \
387 }
388 
391 
394 
396  "%d", )
397 
398 DEF_CHOOSE_FORMAT(color_spaces, enum AVColorSpace, color_space, color_spaces,
400 
401 DEF_CHOOSE_FORMAT(color_ranges, enum AVColorRange, color_range, color_ranges,
403 
404 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
405 {
406  if (av_channel_layout_check(&ofp->ch_layout)) {
407  av_bprintf(bprint, "channel_layouts=");
408  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
409  } else if (ofp->ch_layouts) {
410  const AVChannelLayout *p;
411 
412  av_bprintf(bprint, "channel_layouts=");
413  for (p = ofp->ch_layouts; p->nb_channels; p++) {
415  av_bprintf(bprint, "|");
416  }
417  if (bprint->len > 0)
418  bprint->str[--bprint->len] = '\0';
419  } else
420  return;
421  av_bprint_chars(bprint, ':', 1);
422 }
423 
424 static int read_binary(void *logctx, const char *path,
425  uint8_t **data, int *len)
426 {
427  AVIOContext *io = NULL;
428  int64_t fsize;
429  int ret;
430 
431  *data = NULL;
432  *len = 0;
433 
434  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
435  if (ret < 0) {
436  av_log(logctx, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
437  path, av_err2str(ret));
438  return ret;
439  }
440 
441  fsize = avio_size(io);
442  if (fsize < 0 || fsize > INT_MAX) {
443  av_log(logctx, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
444  ret = AVERROR(EIO);
445  goto fail;
446  }
447 
448  *data = av_malloc(fsize);
449  if (!*data) {
450  ret = AVERROR(ENOMEM);
451  goto fail;
452  }
453 
454  ret = avio_read(io, *data, fsize);
455  if (ret != fsize) {
456  av_log(logctx, AV_LOG_ERROR, "Error reading file %s\n", path);
457  ret = ret < 0 ? ret : AVERROR(EIO);
458  goto fail;
459  }
460 
461  *len = fsize;
462 
463  ret = 0;
464 fail:
465  avio_close(io);
466  if (ret < 0) {
467  av_freep(data);
468  *len = 0;
469  }
470  return ret;
471 }
472 
473 static int filter_opt_apply(void *logctx, AVFilterContext *f,
474  const char *key, const char *val)
475 {
476  const AVOption *o = NULL;
477  int ret;
478 
480  if (ret >= 0)
481  return 0;
482 
483  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
485  if (!o)
486  goto err_apply;
487 
488  // key is a valid option name prefixed with '/'
489  // interpret value as a path from which to load the actual option value
490  key++;
491 
492  if (o->type == AV_OPT_TYPE_BINARY) {
493  uint8_t *data;
494  int len;
495 
496  ret = read_binary(logctx, val, &data, &len);
497  if (ret < 0)
498  goto err_load;
499 
501  av_freep(&data);
502  } else {
503  char *data = file_read(val);
504  if (!data) {
505  ret = AVERROR(EIO);
506  goto err_load;
507  }
508 
510  av_freep(&data);
511  }
512  if (ret < 0)
513  goto err_apply;
514 
515  return 0;
516 
517 err_apply:
518  av_log(logctx, AV_LOG_ERROR,
519  "Error applying option '%s' to filter '%s': %s\n",
520  key, f->filter->name, av_err2str(ret));
521  return ret;
522 err_load:
523  av_log(logctx, AV_LOG_ERROR,
524  "Error loading value for option '%s' from file '%s'\n",
525  key, val);
526  return ret;
527 }
528 
529 static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
530 {
531  for (size_t i = 0; i < seg->nb_chains; i++) {
532  AVFilterChain *ch = seg->chains[i];
533 
534  for (size_t j = 0; j < ch->nb_filters; j++) {
535  AVFilterParams *p = ch->filters[j];
536  const AVDictionaryEntry *e = NULL;
537 
538  av_assert0(p->filter);
539 
540  while ((e = av_dict_iterate(p->opts, e))) {
541  int ret = filter_opt_apply(logctx, p->filter, e->key, e->value);
542  if (ret < 0)
543  return ret;
544  }
545 
546  av_dict_free(&p->opts);
547  }
548  }
549 
550  return 0;
551 }
552 
553 static int graph_parse(void *logctx,
554  AVFilterGraph *graph, const char *desc,
556  AVBufferRef *hw_device)
557 {
559  int ret;
560 
561  *inputs = NULL;
562  *outputs = NULL;
563 
564  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
565  if (ret < 0)
566  return ret;
567 
569  if (ret < 0)
570  goto fail;
571 
572  if (hw_device) {
573  for (int i = 0; i < graph->nb_filters; i++) {
574  AVFilterContext *f = graph->filters[i];
575 
576  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
577  continue;
578  f->hw_device_ctx = av_buffer_ref(hw_device);
579  if (!f->hw_device_ctx) {
580  ret = AVERROR(ENOMEM);
581  goto fail;
582  }
583  }
584  }
585 
586  ret = graph_opts_apply(logctx, seg);
587  if (ret < 0)
588  goto fail;
589 
591 
592 fail:
594  return ret;
595 }
596 
597 // Filters can be configured only if the formats of all inputs are known.
599 {
600  for (int i = 0; i < fg->nb_inputs; i++) {
602  if (ifp->format < 0)
603  return 0;
604  }
605  return 1;
606 }
607 
608 static int filter_thread(void *arg);
609 
610 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
611 {
612  AVFilterContext *ctx = inout->filter_ctx;
613  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
614  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
615 
616  if (nb_pads > 1)
617  return av_strdup(ctx->filter->name);
618  return av_asprintf("%s:%s", ctx->filter->name,
619  avfilter_pad_get_name(pads, inout->pad_idx));
620 }
621 
622 static const char *ofilter_item_name(void *obj)
623 {
624  OutputFilterPriv *ofp = obj;
625  return ofp->log_name;
626 }
627 
628 static const AVClass ofilter_class = {
629  .class_name = "OutputFilter",
630  .version = LIBAVUTIL_VERSION_INT,
631  .item_name = ofilter_item_name,
632  .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
633  .category = AV_CLASS_CATEGORY_FILTER,
634 };
635 
637 {
638  OutputFilterPriv *ofp;
639  OutputFilter *ofilter;
640 
641  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
642  if (!ofp)
643  return NULL;
644 
645  ofilter = &ofp->ofilter;
646  ofilter->class = &ofilter_class;
647  ofp->log_parent = fg;
648  ofilter->graph = fg;
649  ofilter->type = type;
650  ofp->format = -1;
653  ofilter->index = fg->nb_outputs - 1;
654 
655  snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
656  av_get_media_type_string(type)[0], ofilter->index);
657 
658  return ofilter;
659 }
660 
661 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist,
662  const ViewSpecifier *vs)
663 {
664  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
665  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
667  int ret;
668 
669  av_assert0(!ifp->bound);
670  ifp->bound = 1;
671 
672  if (ifilter->type != ist->par->codec_type &&
673  !(ifilter->type == AVMEDIA_TYPE_VIDEO && ist->par->codec_type == AVMEDIA_TYPE_SUBTITLE)) {
674  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
676  return AVERROR(EINVAL);
677  }
678 
679  ifp->type_src = ist->st->codecpar->codec_type;
680 
681  ifp->opts.fallback = av_frame_alloc();
682  if (!ifp->opts.fallback)
683  return AVERROR(ENOMEM);
684 
685  ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
686  vs, &ifp->opts, &src);
687  if (ret < 0)
688  return ret;
689 
690  ifilter->input_name = av_strdup(ifp->opts.name);
691  if (!ifilter->input_name)
692  return AVERROR(EINVAL);
693 
694  ret = sch_connect(fgp->sch,
695  src, SCH_FILTER_IN(fgp->sch_idx, ifilter->index));
696  if (ret < 0)
697  return ret;
698 
699  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
700  ifp->sub2video.frame = av_frame_alloc();
701  if (!ifp->sub2video.frame)
702  return AVERROR(ENOMEM);
703 
704  ifp->width = ifp->opts.sub2video_width;
705  ifp->height = ifp->opts.sub2video_height;
706 
707  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
708  palettes for all rectangles are identical or compatible */
709  ifp->format = AV_PIX_FMT_RGB32;
710 
711  ifp->time_base = AV_TIME_BASE_Q;
712 
713  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
714  ifp->width, ifp->height);
715  }
716 
717  return 0;
718 }
719 
721  const ViewSpecifier *vs)
722 {
725  int ret;
726 
727  av_assert0(!ifp->bound);
728  ifp->bound = 1;
729 
730  if (ifp->ifilter.type != dec->type) {
731  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
733  return AVERROR(EINVAL);
734  }
735 
736  ifp->type_src = ifp->ifilter.type;
737 
738  ret = dec_filter_add(dec, &ifp->ifilter, &ifp->opts, vs, &src);
739  if (ret < 0)
740  return ret;
741 
742  ifp->ifilter.input_name = av_strdup(ifp->opts.name);
743  if (!ifp->ifilter.input_name)
744  return AVERROR(EINVAL);
745 
746  ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index));
747  if (ret < 0)
748  return ret;
749 
750  return 0;
751 }
752 
753 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
754  const AVChannelLayout *layout_requested)
755 {
756  int i, err;
757 
758  if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
759  /* Pass the layout through for all orders but UNSPEC */
760  err = av_channel_layout_copy(&f->ch_layout, layout_requested);
761  if (err < 0)
762  return err;
763  return 0;
764  }
765 
766  /* Requested layout is of order UNSPEC */
767  if (!layouts_allowed) {
768  /* Use the default native layout for the requested amount of channels when the
769  encoder doesn't have a list of supported layouts */
770  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
771  return 0;
772  }
773  /* Encoder has a list of supported layouts. Pick the first layout in it with the
774  same amount of channels as the requested layout */
775  for (i = 0; layouts_allowed[i].nb_channels; i++) {
776  if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
777  break;
778  }
779  if (layouts_allowed[i].nb_channels) {
780  /* Use it if one is found */
781  err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
782  if (err < 0)
783  return err;
784  return 0;
785  }
786  /* If no layout for the amount of channels requested was found, use the default
787  native layout for it. */
788  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
789 
790  return 0;
791 }
792 
793 int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc,
794  const OutputFilterOptions *opts)
795 {
796  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
797  FilterGraph *fg = ofilter->graph;
798  FilterGraphPriv *fgp = fgp_from_fg(fg);
799  int ret;
800 
801  av_assert0(!ofilter->bound);
802  av_assert0(!opts->enc ||
803  ofilter->type == opts->enc->type);
804 
805  ofilter->bound = 1;
806  av_freep(&ofilter->linklabel);
807 
808  ofp->flags = opts->flags;
809  ofp->ts_offset = opts->ts_offset;
810  ofp->enc_timebase = opts->output_tb;
811 
812  ofp->trim_start_us = opts->trim_start_us;
813  ofp->trim_duration_us = opts->trim_duration_us;
814 
815  ofilter->output_name = av_strdup(opts->name);
816  if (!ofilter->output_name)
817  return AVERROR(EINVAL);
818 
819  ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
820  if (ret < 0)
821  return ret;
822 
823  ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
824  if (ret < 0)
825  return ret;
826 
827  if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
828  av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
829 
830  if (fgp->is_simple) {
831  // for simple filtergraph there is just one output,
832  // so use only graph-level information for logging
833  ofp->log_parent = NULL;
834  av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
835  } else
836  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name);
837 
838  switch (ofilter->type) {
839  case AVMEDIA_TYPE_VIDEO:
840  ofp->width = opts->width;
841  ofp->height = opts->height;
842  if (opts->format != AV_PIX_FMT_NONE) {
843  ofp->format = opts->format;
844  } else
845  ofp->formats = opts->formats;
846 
847  if (opts->color_space != AVCOL_SPC_UNSPECIFIED)
848  ofp->color_space = opts->color_space;
849  else
850  ofp->color_spaces = opts->color_spaces;
851 
852  if (opts->color_range != AVCOL_RANGE_UNSPECIFIED)
853  ofp->color_range = opts->color_range;
854  else
855  ofp->color_ranges = opts->color_ranges;
856 
858 
859  ofp->fps.last_frame = av_frame_alloc();
860  if (!ofp->fps.last_frame)
861  return AVERROR(ENOMEM);
862 
863  ofp->fps.vsync_method = opts->vsync_method;
864  ofp->fps.framerate = opts->frame_rate;
865  ofp->fps.framerate_max = opts->max_frame_rate;
866  ofp->fps.framerate_supported = opts->frame_rates;
867 
868  // reduce frame rate for mpeg4 to be within the spec limits
869  if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
870  ofp->fps.framerate_clip = 65535;
871 
872  ofp->fps.dup_warning = 1000;
873 
874  break;
875  case AVMEDIA_TYPE_AUDIO:
876  if (opts->format != AV_SAMPLE_FMT_NONE) {
877  ofp->format = opts->format;
878  } else {
879  ofp->formats = opts->formats;
880  }
881  if (opts->sample_rate) {
882  ofp->sample_rate = opts->sample_rate;
883  } else
884  ofp->sample_rates = opts->sample_rates;
885  if (opts->ch_layout.nb_channels) {
886  int ret = set_channel_layout(ofp, opts->ch_layouts, &opts->ch_layout);
887  if (ret < 0)
888  return ret;
889  } else {
890  ofp->ch_layouts = opts->ch_layouts;
891  }
892  break;
893  }
894 
895  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofilter->index),
896  SCH_ENC(sched_idx_enc));
897  if (ret < 0)
898  return ret;
899 
900  return 0;
901 }
902 
904  const OutputFilterOptions *opts)
905 {
906  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
907 
908  av_assert0(!ofilter->bound);
909  av_assert0(ofilter->type == ifp->ifilter.type);
910 
911  ofilter->bound = 1;
912  av_freep(&ofilter->linklabel);
913 
914  ofilter->output_name = av_strdup(opts->name);
915  if (!ofilter->output_name)
916  return AVERROR(EINVAL);
917 
918  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name);
919 
920  return 0;
921 }
922 
923 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
924 {
926  OutputFilter *ofilter_src = fg_src->outputs[out_idx];
928  char name[32];
929  int ret;
930 
931  av_assert0(!ifp->bound);
932  ifp->bound = 1;
933 
934  if (ifp->ifilter.type != ofilter_src->type) {
935  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
936  av_get_media_type_string(ofilter_src->type),
938  return AVERROR(EINVAL);
939  }
940 
941  ifp->type_src = ifp->ifilter.type;
942 
943  memset(&opts, 0, sizeof(opts));
944 
945  snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->ifilter.index);
946  opts.name = name;
947 
948  ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
949  if (ret < 0)
950  return ret;
951 
952  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
953  SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index));
954  if (ret < 0)
955  return ret;
956 
957  return 0;
958 }
959 
961 {
962  InputFilterPriv *ifp;
963  InputFilter *ifilter;
964 
965  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
966  if (!ifp)
967  return NULL;
968 
969  ifilter = &ifp->ifilter;
970  ifilter->graph = fg;
971 
972  ifp->frame = av_frame_alloc();
973  if (!ifp->frame)
974  return NULL;
975 
976  ifilter->index = fg->nb_inputs - 1;
977  ifp->format = -1;
980 
982  if (!ifp->frame_queue)
983  return NULL;
984 
985  return ifilter;
986 }
987 
988 void fg_free(FilterGraph **pfg)
989 {
990  FilterGraph *fg = *pfg;
991  FilterGraphPriv *fgp;
992 
993  if (!fg)
994  return;
995  fgp = fgp_from_fg(fg);
996 
997  for (int j = 0; j < fg->nb_inputs; j++) {
998  InputFilter *ifilter = fg->inputs[j];
999  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1000 
1001  if (ifp->frame_queue) {
1002  AVFrame *frame;
1003  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
1004  av_frame_free(&frame);
1005  av_fifo_freep2(&ifp->frame_queue);
1006  }
1007  av_frame_free(&ifp->sub2video.frame);
1008 
1009  av_frame_free(&ifp->frame);
1010  av_frame_free(&ifp->opts.fallback);
1011 
1013  av_freep(&ifilter->linklabel);
1014  av_freep(&ifp->opts.name);
1016  av_freep(&ifilter->name);
1017  av_freep(&ifilter->input_name);
1018  av_freep(&fg->inputs[j]);
1019  }
1020  av_freep(&fg->inputs);
1021  for (int j = 0; j < fg->nb_outputs; j++) {
1022  OutputFilter *ofilter = fg->outputs[j];
1023  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1024 
1025  av_frame_free(&ofp->fps.last_frame);
1026  av_dict_free(&ofp->sws_opts);
1027  av_dict_free(&ofp->swr_opts);
1028 
1029  av_freep(&ofilter->linklabel);
1030  av_freep(&ofilter->name);
1031  av_freep(&ofilter->output_name);
1032  av_freep(&ofilter->apad);
1035  av_freep(&fg->outputs[j]);
1036  }
1037  av_freep(&fg->outputs);
1038  av_freep(&fg->graph_desc);
1039 
1040  av_frame_free(&fgp->frame);
1041  av_frame_free(&fgp->frame_enc);
1042 
1043  av_freep(pfg);
1044 }
1045 
1046 static const char *fg_item_name(void *obj)
1047 {
1048  const FilterGraphPriv *fgp = obj;
1049 
1050  return fgp->log_name;
1051 }
1052 
1053 static const AVClass fg_class = {
1054  .class_name = "FilterGraph",
1055  .version = LIBAVUTIL_VERSION_INT,
1056  .item_name = fg_item_name,
1057  .category = AV_CLASS_CATEGORY_FILTER,
1058 };
1059 
1060 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
1061 {
1062  FilterGraphPriv *fgp;
1063  FilterGraph *fg;
1064 
1066  AVFilterGraph *graph;
1067  int ret = 0;
1068 
1069  fgp = av_mallocz(sizeof(*fgp));
1070  if (!fgp) {
1071  av_freep(&graph_desc);
1072  return AVERROR(ENOMEM);
1073  }
1074  fg = &fgp->fg;
1075 
1076  if (pfg) {
1077  *pfg = fg;
1078  fg->index = -1;
1079  } else {
1081  if (ret < 0) {
1082  av_freep(&graph_desc);
1083  av_freep(&fgp);
1084  return ret;
1085  }
1086 
1087  fg->index = nb_filtergraphs - 1;
1088  }
1089 
1090  fg->class = &fg_class;
1091  fg->graph_desc = graph_desc;
1093  fgp->nb_threads = -1;
1094  fgp->sch = sch;
1095 
1096  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
1097 
1098  fgp->frame = av_frame_alloc();
1099  fgp->frame_enc = av_frame_alloc();
1100  if (!fgp->frame || !fgp->frame_enc)
1101  return AVERROR(ENOMEM);
1102 
1103  /* this graph is only used for determining the kinds of inputs
1104  * and outputs we have, and is discarded on exit from this function */
1105  graph = avfilter_graph_alloc();
1106  if (!graph)
1107  return AVERROR(ENOMEM);;
1108  graph->nb_threads = 1;
1109 
1110  ret = graph_parse(fg, graph, fg->graph_desc, &inputs, &outputs,
1112  if (ret < 0)
1113  goto fail;
1114 
1115  for (unsigned i = 0; i < graph->nb_filters; i++) {
1116  const AVFilter *f = graph->filters[i]->filter;
1117  if ((!avfilter_filter_pad_count(f, 0) &&
1118  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) ||
1119  !strcmp(f->name, "apad")) {
1120  fgp->have_sources = 1;
1121  break;
1122  }
1123  }
1124 
1125  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1126  InputFilter *const ifilter = ifilter_alloc(fg);
1127 
1128  if (!ifilter) {
1129  ret = AVERROR(ENOMEM);
1130  goto fail;
1131  }
1132 
1133  ifilter->linklabel = cur->name;
1134  cur->name = NULL;
1135 
1136  ifilter->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1137  cur->pad_idx);
1138 
1139  if (ifilter->type != AVMEDIA_TYPE_VIDEO && ifilter->type != AVMEDIA_TYPE_AUDIO) {
1140  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1141  "currently.\n");
1142  ret = AVERROR(ENOSYS);
1143  goto fail;
1144  }
1145 
1146  ifilter->name = describe_filter_link(fg, cur, 1);
1147  if (!ifilter->name) {
1148  ret = AVERROR(ENOMEM);
1149  goto fail;
1150  }
1151  }
1152 
1153  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1154  const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1155  cur->pad_idx);
1156  OutputFilter *const ofilter = ofilter_alloc(fg, type);
1157 
1158  if (!ofilter) {
1159  ret = AVERROR(ENOMEM);
1160  goto fail;
1161  }
1162 
1163  ofilter->linklabel = cur->name;
1164  cur->name = NULL;
1165 
1166  ofilter->name = describe_filter_link(fg, cur, 0);
1167  if (!ofilter->name) {
1168  ret = AVERROR(ENOMEM);
1169  goto fail;
1170  }
1171  }
1172 
1173  if (!fg->nb_outputs) {
1174  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1175  ret = AVERROR(ENOSYS);
1176  goto fail;
1177  }
1178 
1179  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1180  filter_thread, fgp);
1181  if (ret < 0)
1182  goto fail;
1183  fgp->sch_idx = ret;
1184 
1185 fail:
1188  avfilter_graph_free(&graph);
1189 
1190  if (ret < 0)
1191  return ret;
1192 
1193  return 0;
1194 }
1195 
1197  InputStream *ist,
1198  char *graph_desc,
1199  Scheduler *sch, unsigned sched_idx_enc,
1200  const OutputFilterOptions *opts)
1201 {
1202  const enum AVMediaType type = ist->par->codec_type;
1203  FilterGraph *fg;
1204  FilterGraphPriv *fgp;
1205  int ret;
1206 
1207  ret = fg_create(pfg, graph_desc, sch);
1208  if (ret < 0)
1209  return ret;
1210  fg = *pfg;
1211  fgp = fgp_from_fg(fg);
1212 
1213  fgp->is_simple = 1;
1214 
1215  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1216  av_get_media_type_string(type)[0], opts->name);
1217 
1218  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1219  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1220  "to have exactly 1 input and 1 output. "
1221  "However, it had %d input(s) and %d output(s). Please adjust, "
1222  "or use a complex filtergraph (-filter_complex) instead.\n",
1223  graph_desc, fg->nb_inputs, fg->nb_outputs);
1224  return AVERROR(EINVAL);
1225  }
1226  if (fg->outputs[0]->type != type) {
1227  av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1228  "it to %s output stream\n",
1231  return AVERROR(EINVAL);
1232  }
1233 
1234  ret = ifilter_bind_ist(fg->inputs[0], ist, opts->vs);
1235  if (ret < 0)
1236  return ret;
1237 
1238  ret = ofilter_bind_enc(fg->outputs[0], sched_idx_enc, opts);
1239  if (ret < 0)
1240  return ret;
1241 
1242  if (opts->nb_threads >= 0)
1243  fgp->nb_threads = opts->nb_threads;
1244 
1245  return 0;
1246 }
1247 
1249 {
1250  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1251  InputStream *ist = NULL;
1252  enum AVMediaType type = ifilter->type;
1254  const char *spec;
1255  char *p;
1256  int i, ret;
1257 
1258  if (ifilter->linklabel && !strncmp(ifilter->linklabel, "dec:", 4)) {
1259  // bind to a standalone decoder
1260  int dec_idx;
1261 
1262  dec_idx = strtol(ifilter->linklabel + 4, &p, 0);
1263  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1264  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1265  dec_idx, fg->graph_desc);
1266  return AVERROR(EINVAL);
1267  }
1268 
1269  if (type == AVMEDIA_TYPE_VIDEO) {
1270  spec = *p == ':' ? p + 1 : p;
1271  ret = view_specifier_parse(&spec, &vs);
1272  if (ret < 0)
1273  return ret;
1274  }
1275 
1276  ret = ifilter_bind_dec(ifp, decoders[dec_idx], &vs);
1277  if (ret < 0)
1278  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1279  ifilter->name);
1280  return ret;
1281  } else if (ifilter->linklabel) {
1283  AVFormatContext *s;
1284  AVStream *st = NULL;
1285  int file_idx;
1286 
1287  // try finding an unbound filtergraph output with this label
1288  for (int i = 0; i < nb_filtergraphs; i++) {
1289  FilterGraph *fg_src = filtergraphs[i];
1290 
1291  if (fg == fg_src)
1292  continue;
1293 
1294  for (int j = 0; j < fg_src->nb_outputs; j++) {
1295  OutputFilter *ofilter = fg_src->outputs[j];
1296 
1297  if (!ofilter->bound && ofilter->linklabel &&
1298  !strcmp(ofilter->linklabel, ifilter->linklabel)) {
1299  av_log(fg, AV_LOG_VERBOSE,
1300  "Binding input with label '%s' to filtergraph output %d:%d\n",
1301  ifilter->linklabel, i, j);
1302 
1303  ret = ifilter_bind_fg(ifp, fg_src, j);
1304  if (ret < 0)
1305  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1306  ifilter->linklabel);
1307  return ret;
1308  }
1309  }
1310  }
1311 
1312  // bind to an explicitly specified demuxer stream
1313  file_idx = strtol(ifilter->linklabel, &p, 0);
1314  if (file_idx < 0 || file_idx >= nb_input_files) {
1315  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1316  file_idx, fg->graph_desc);
1317  return AVERROR(EINVAL);
1318  }
1319  s = input_files[file_idx]->ctx;
1320 
1321  ret = stream_specifier_parse(&ss, *p == ':' ? p + 1 : p, 1, fg);
1322  if (ret < 0) {
1323  av_log(fg, AV_LOG_ERROR, "Invalid stream specifier: %s\n", p);
1324  return ret;
1325  }
1326 
1327  if (type == AVMEDIA_TYPE_VIDEO) {
1328  spec = ss.remainder ? ss.remainder : "";
1329  ret = view_specifier_parse(&spec, &vs);
1330  if (ret < 0) {
1332  return ret;
1333  }
1334  }
1335 
1336  for (i = 0; i < s->nb_streams; i++) {
1337  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1338  if (stream_type != type &&
1339  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1340  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1341  continue;
1342  if (stream_specifier_match(&ss, s, s->streams[i], fg)) {
1343  st = s->streams[i];
1344  break;
1345  }
1346  }
1348  if (!st) {
1349  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1350  "matches no streams.\n", p, fg->graph_desc);
1351  return AVERROR(EINVAL);
1352  }
1353  ist = input_files[file_idx]->streams[st->index];
1354 
1355  av_log(fg, AV_LOG_VERBOSE,
1356  "Binding input with label '%s' to input stream %d:%d\n",
1357  ifilter->linklabel, ist->file->index, ist->index);
1358  } else {
1359  ist = ist_find_unused(type);
1360  if (!ist) {
1361  av_log(fg, AV_LOG_FATAL,
1362  "Cannot find an unused %s input stream to feed the "
1363  "unlabeled input pad %s.\n",
1364  av_get_media_type_string(type), ifilter->name);
1365  return AVERROR(EINVAL);
1366  }
1367 
1368  av_log(fg, AV_LOG_VERBOSE,
1369  "Binding unlabeled input %d to input stream %d:%d\n",
1370  ifilter->index, ist->file->index, ist->index);
1371  }
1372  av_assert0(ist);
1373 
1374  ret = ifilter_bind_ist(ifilter, ist, &vs);
1375  if (ret < 0) {
1376  av_log(fg, AV_LOG_ERROR,
1377  "Error binding an input stream to complex filtergraph input %s.\n",
1378  ifilter->name);
1379  return ret;
1380  }
1381 
1382  return 0;
1383 }
1384 
1385 static int bind_inputs(FilterGraph *fg)
1386 {
1387  // bind filtergraph inputs to input streams or other filtergraphs
1388  for (int i = 0; i < fg->nb_inputs; i++) {
1390  int ret;
1391 
1392  if (ifp->bound)
1393  continue;
1394 
1395  ret = fg_complex_bind_input(fg, &ifp->ifilter);
1396  if (ret < 0)
1397  return ret;
1398  }
1399 
1400  return 0;
1401 }
1402 
1404 {
1405  int ret;
1406 
1407  for (int i = 0; i < nb_filtergraphs; i++) {
1409  if (ret < 0)
1410  return ret;
1411  }
1412 
1413  // check that all outputs were bound
1414  for (int i = 0; i < nb_filtergraphs; i++) {
1415  FilterGraph *fg = filtergraphs[i];
1416 
1417  for (int j = 0; j < fg->nb_outputs; j++) {
1418  OutputFilter *output = fg->outputs[j];
1419  if (!output->bound) {
1420  av_log(fg, AV_LOG_FATAL,
1421  "Filter '%s' has output %d (%s) unconnected\n",
1422  output->name, j,
1423  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1424  return AVERROR(EINVAL);
1425  }
1426  }
1427  }
1428 
1429  return 0;
1430 }
1431 
1432 static int insert_trim(void *logctx, int64_t start_time, int64_t duration,
1433  AVFilterContext **last_filter, int *pad_idx,
1434  const char *filter_name)
1435 {
1436  AVFilterGraph *graph = (*last_filter)->graph;
1438  const AVFilter *trim;
1439  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1440  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1441  int ret = 0;
1442 
1443  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1444  return 0;
1445 
1446  trim = avfilter_get_by_name(name);
1447  if (!trim) {
1448  av_log(logctx, AV_LOG_ERROR, "%s filter not present, cannot limit "
1449  "recording time.\n", name);
1450  return AVERROR_FILTER_NOT_FOUND;
1451  }
1452 
1453  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1454  if (!ctx)
1455  return AVERROR(ENOMEM);
1456 
1457  if (duration != INT64_MAX) {
1458  ret = av_opt_set_int(ctx, "durationi", duration,
1460  }
1461  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1462  ret = av_opt_set_int(ctx, "starti", start_time,
1464  }
1465  if (ret < 0) {
1466  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1467  return ret;
1468  }
1469 
1471  if (ret < 0)
1472  return ret;
1473 
1474  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1475  if (ret < 0)
1476  return ret;
1477 
1478  *last_filter = ctx;
1479  *pad_idx = 0;
1480  return 0;
1481 }
1482 
1483 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1484  const char *filter_name, const char *args)
1485 {
1486  AVFilterGraph *graph = (*last_filter)->graph;
1487  const AVFilter *filter = avfilter_get_by_name(filter_name);
1489  int ret;
1490 
1491  if (!filter)
1492  return AVERROR_BUG;
1493 
1495  filter,
1496  filter_name, args, NULL, graph);
1497  if (ret < 0)
1498  return ret;
1499 
1500  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1501  if (ret < 0)
1502  return ret;
1503 
1504  *last_filter = ctx;
1505  *pad_idx = 0;
1506  return 0;
1507 }
1508 
1510  OutputFilter *ofilter, AVFilterInOut *out)
1511 {
1512  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1513  AVFilterContext *last_filter = out->filter_ctx;
1514  AVBPrint bprint;
1515  int pad_idx = out->pad_idx;
1516  int ret;
1517  char name[255];
1518 
1519  snprintf(name, sizeof(name), "out_%s", ofilter->output_name);
1521  avfilter_get_by_name("buffersink"),
1522  name, NULL, NULL, graph);
1523 
1524  if (ret < 0)
1525  return ret;
1526 
1527  if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE)) {
1528  char args[255];
1530  const AVDictionaryEntry *e = NULL;
1531 
1532  snprintf(args, sizeof(args), "%d:%d",
1533  ofp->width, ofp->height);
1534 
1535  while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1536  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1537  }
1538 
1539  snprintf(name, sizeof(name), "scaler_out_%s", ofilter->output_name);
1541  name, args, NULL, graph)) < 0)
1542  return ret;
1543  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1544  return ret;
1545 
1546  last_filter = filter;
1547  pad_idx = 0;
1548  }
1549 
1551  ofp->format != AV_PIX_FMT_NONE || !ofp->formats);
1553  choose_pix_fmts(ofp, &bprint);
1554  choose_color_spaces(ofp, &bprint);
1555  choose_color_ranges(ofp, &bprint);
1556  if (!av_bprint_is_complete(&bprint))
1557  return AVERROR(ENOMEM);
1558 
1559  if (bprint.len) {
1561 
1563  avfilter_get_by_name("format"),
1564  "format", bprint.str, NULL, graph);
1565  av_bprint_finalize(&bprint, NULL);
1566  if (ret < 0)
1567  return ret;
1568  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1569  return ret;
1570 
1571  last_filter = filter;
1572  pad_idx = 0;
1573  }
1574 
1575  snprintf(name, sizeof(name), "trim_out_%s", ofilter->output_name);
1576  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1577  &last_filter, &pad_idx, name);
1578  if (ret < 0)
1579  return ret;
1580 
1581 
1582  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1583  return ret;
1584 
1585  return 0;
1586 }
1587 
1589  OutputFilter *ofilter, AVFilterInOut *out)
1590 {
1591  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1592  AVFilterContext *last_filter = out->filter_ctx;
1593  int pad_idx = out->pad_idx;
1594  AVBPrint args;
1595  char name[255];
1596  int ret;
1597 
1598  snprintf(name, sizeof(name), "out_%s", ofilter->output_name);
1600  avfilter_get_by_name("abuffersink"),
1601  name, NULL, NULL, graph);
1602  if (ret < 0)
1603  return ret;
1604 
1605 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1606  AVFilterContext *filt_ctx; \
1607  \
1608  av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1609  "similarly to -af " filter_name "=%s.\n", arg); \
1610  \
1611  ret = avfilter_graph_create_filter(&filt_ctx, \
1612  avfilter_get_by_name(filter_name), \
1613  filter_name, arg, NULL, graph); \
1614  if (ret < 0) \
1615  goto fail; \
1616  \
1617  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1618  if (ret < 0) \
1619  goto fail; \
1620  \
1621  last_filter = filt_ctx; \
1622  pad_idx = 0; \
1623 } while (0)
1625 
1626  choose_sample_fmts(ofp, &args);
1627  choose_sample_rates(ofp, &args);
1628  choose_channel_layouts(ofp, &args);
1629  if (!av_bprint_is_complete(&args)) {
1630  ret = AVERROR(ENOMEM);
1631  goto fail;
1632  }
1633  if (args.len) {
1635 
1636  snprintf(name, sizeof(name), "format_out_%s", ofilter->output_name);
1638  avfilter_get_by_name("aformat"),
1639  name, args.str, NULL, graph);
1640  if (ret < 0)
1641  goto fail;
1642 
1643  ret = avfilter_link(last_filter, pad_idx, format, 0);
1644  if (ret < 0)
1645  goto fail;
1646 
1647  last_filter = format;
1648  pad_idx = 0;
1649  }
1650 
1651  if (ofilter->apad) {
1652  AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1653  fgp->have_sources = 1;
1654  }
1655 
1656  snprintf(name, sizeof(name), "trim for output %s", ofilter->output_name);
1657  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1658  &last_filter, &pad_idx, name);
1659  if (ret < 0)
1660  goto fail;
1661 
1662  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1663  goto fail;
1664 fail:
1665  av_bprint_finalize(&args, NULL);
1666 
1667  return ret;
1668 }
1669 
1671  OutputFilter *ofilter, AVFilterInOut *out)
1672 {
1673  switch (ofilter->type) {
1674  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fgp, graph, ofilter, out);
1675  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fgp, graph, ofilter, out);
1676  default: av_assert0(0); return 0;
1677  }
1678 }
1679 
1681 {
1682  ifp->sub2video.last_pts = INT64_MIN;
1683  ifp->sub2video.end_pts = INT64_MIN;
1684 
1685  /* sub2video structure has been (re-)initialized.
1686  Mark it as such so that the system will be
1687  initialized with the first received heartbeat. */
1688  ifp->sub2video.initialize = 1;
1689 }
1690 
1692  InputFilter *ifilter, AVFilterInOut *in)
1693 {
1694  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1695 
1696  AVFilterContext *last_filter;
1697  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1698  const AVPixFmtDescriptor *desc;
1699  char name[255];
1700  int ret, pad_idx = 0;
1702  if (!par)
1703  return AVERROR(ENOMEM);
1704 
1705  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1706  sub2video_prepare(ifp);
1707 
1708  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1709  ifp->opts.name);
1710 
1711  ifilter->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name);
1712  if (!ifilter->filter) {
1713  ret = AVERROR(ENOMEM);
1714  goto fail;
1715  }
1716 
1717  par->format = ifp->format;
1718  par->time_base = ifp->time_base;
1719  par->frame_rate = ifp->opts.framerate;
1720  par->width = ifp->width;
1721  par->height = ifp->height;
1722  par->sample_aspect_ratio = ifp->sample_aspect_ratio.den > 0 ?
1723  ifp->sample_aspect_ratio : (AVRational){ 0, 1 };
1724  par->color_space = ifp->color_space;
1725  par->color_range = ifp->color_range;
1726  par->hw_frames_ctx = ifp->hw_frames_ctx;
1727  par->side_data = ifp->side_data;
1728  par->nb_side_data = ifp->nb_side_data;
1729 
1730  ret = av_buffersrc_parameters_set(ifilter->filter, par);
1731  if (ret < 0)
1732  goto fail;
1733  av_freep(&par);
1734 
1735  ret = avfilter_init_dict(ifilter->filter, NULL);
1736  if (ret < 0)
1737  goto fail;
1738 
1739  last_filter = ifilter->filter;
1740 
1742  av_assert0(desc);
1743 
1744  if ((ifp->opts.flags & IFILTER_FLAG_CROP)) {
1745  char crop_buf[64];
1746  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1747  ifp->opts.crop_left, ifp->opts.crop_right,
1748  ifp->opts.crop_top, ifp->opts.crop_bottom,
1749  ifp->opts.crop_left, ifp->opts.crop_top);
1750  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1751  if (ret < 0)
1752  return ret;
1753  }
1754 
1755  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1756  ifp->displaymatrix_applied = 0;
1757  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1758  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1759  int32_t *displaymatrix = ifp->displaymatrix;
1760  double theta;
1761 
1762  theta = get_rotation(displaymatrix);
1763 
1764  if (fabs(theta - 90) < 1.0) {
1765  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1766  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1767  } else if (fabs(theta - 180) < 1.0) {
1768  if (displaymatrix[0] < 0) {
1769  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1770  if (ret < 0)
1771  return ret;
1772  }
1773  if (displaymatrix[4] < 0) {
1774  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1775  }
1776  } else if (fabs(theta - 270) < 1.0) {
1777  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1778  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1779  } else if (fabs(theta) > 1.0) {
1780  char rotate_buf[64];
1781  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1782  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1783  } else if (fabs(theta) < 1.0) {
1784  if (displaymatrix && displaymatrix[4] < 0) {
1785  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1786  }
1787  }
1788  if (ret < 0)
1789  return ret;
1790 
1791  ifp->displaymatrix_applied = 1;
1792  }
1793 
1794  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1795  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1796  &last_filter, &pad_idx, name);
1797  if (ret < 0)
1798  return ret;
1799 
1800  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1801  return ret;
1802  return 0;
1803 fail:
1804  av_freep(&par);
1805 
1806  return ret;
1807 }
1808 
1810  InputFilter *ifilter, AVFilterInOut *in)
1811 {
1812  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1813  AVFilterContext *last_filter;
1814  AVBufferSrcParameters *par;
1815  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1816  AVBPrint args;
1817  char name[255];
1818  int ret, pad_idx = 0;
1819 
1821  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1822  ifp->time_base.num, ifp->time_base.den,
1823  ifp->sample_rate,
1825  if (av_channel_layout_check(&ifp->ch_layout) &&
1827  av_bprintf(&args, ":channel_layout=");
1829  } else
1830  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1831  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1832 
1833  if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
1834  name, args.str, NULL,
1835  graph)) < 0)
1836  return ret;
1838  if (!par)
1839  return AVERROR(ENOMEM);
1840  par->side_data = ifp->side_data;
1841  par->nb_side_data = ifp->nb_side_data;
1842  ret = av_buffersrc_parameters_set(ifilter->filter, par);
1843  av_free(par);
1844  if (ret < 0)
1845  return ret;
1846  last_filter = ifilter->filter;
1847 
1848  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1849  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1850  &last_filter, &pad_idx, name);
1851  if (ret < 0)
1852  return ret;
1853 
1854  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1855  return ret;
1856 
1857  return 0;
1858 }
1859 
1861  InputFilter *ifilter, AVFilterInOut *in)
1862 {
1863  switch (ifilter->type) {
1864  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1865  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1866  default: av_assert0(0); return 0;
1867  }
1868 }
1869 
1871 {
1872  for (int i = 0; i < fg->nb_outputs; i++)
1873  fg->outputs[i]->filter = NULL;
1874  for (int i = 0; i < fg->nb_inputs; i++)
1875  fg->inputs[i]->filter = NULL;
1876  avfilter_graph_free(&fgt->graph);
1877 }
1878 
1880 {
1881  return f->nb_inputs == 0 &&
1882  (!strcmp(f->filter->name, "buffer") ||
1883  !strcmp(f->filter->name, "abuffer"));
1884 }
1885 
1886 static int graph_is_meta(AVFilterGraph *graph)
1887 {
1888  for (unsigned i = 0; i < graph->nb_filters; i++) {
1889  const AVFilterContext *f = graph->filters[i];
1890 
1891  /* in addition to filters flagged as meta, also
1892  * disregard sinks and buffersources (but not other sources,
1893  * since they introduce data we are not aware of)
1894  */
1895  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1896  f->nb_outputs == 0 ||
1898  return 0;
1899  }
1900  return 1;
1901 }
1902 
1903 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1904 
1906 {
1907  FilterGraphPriv *fgp = fgp_from_fg(fg);
1908  AVBufferRef *hw_device;
1909  AVFilterInOut *inputs, *outputs, *cur;
1910  int ret = AVERROR_BUG, i, simple = filtergraph_is_simple(fg);
1911  int have_input_eof = 0;
1912  const char *graph_desc = fg->graph_desc;
1913 
1914  cleanup_filtergraph(fg, fgt);
1915  fgt->graph = avfilter_graph_alloc();
1916  if (!fgt->graph)
1917  return AVERROR(ENOMEM);
1918 
1919  if (simple) {
1920  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
1921 
1922  if (filter_nbthreads) {
1923  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1924  if (ret < 0)
1925  goto fail;
1926  } else if (fgp->nb_threads >= 0) {
1927  ret = av_opt_set_int(fgt->graph, "threads", fgp->nb_threads, 0);
1928  if (ret < 0)
1929  return ret;
1930  }
1931 
1932  if (av_dict_count(ofp->sws_opts)) {
1934  &fgt->graph->scale_sws_opts,
1935  '=', ':');
1936  if (ret < 0)
1937  goto fail;
1938  }
1939 
1940  if (av_dict_count(ofp->swr_opts)) {
1941  char *args;
1942  ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
1943  if (ret < 0)
1944  goto fail;
1945  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1946  av_free(args);
1947  }
1948  } else {
1950  }
1951 
1952  if (filter_buffered_frames) {
1953  ret = av_opt_set_int(fgt->graph, "max_buffered_frames", filter_buffered_frames, 0);
1954  if (ret < 0)
1955  return ret;
1956  }
1957 
1958  hw_device = hw_device_for_filter();
1959 
1960  ret = graph_parse(fg, fgt->graph, graph_desc, &inputs, &outputs, hw_device);
1961  if (ret < 0)
1962  goto fail;
1963 
1964  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1965  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1968  goto fail;
1969  }
1971 
1972  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1973  ret = configure_output_filter(fgp, fgt->graph, fg->outputs[i], cur);
1974  if (ret < 0) {
1976  goto fail;
1977  }
1978  }
1980 
1981  if (fgp->disable_conversions)
1983  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
1984  goto fail;
1985 
1986  fgp->is_meta = graph_is_meta(fgt->graph);
1987 
1988  /* limit the lists of allowed formats to the ones selected, to
1989  * make sure they stay the same if the filtergraph is reconfigured later */
1990  for (int i = 0; i < fg->nb_outputs; i++) {
1991  const AVFrameSideData *const *sd;
1992  int nb_sd;
1993  OutputFilter *ofilter = fg->outputs[i];
1994  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1995  AVFilterContext *sink = ofilter->filter;
1996 
1997  ofp->format = av_buffersink_get_format(sink);
1998 
1999  ofp->width = av_buffersink_get_w(sink);
2000  ofp->height = av_buffersink_get_h(sink);
2003 
2004  // If the timing parameters are not locked yet, get the tentative values
2005  // here but don't lock them. They will only be used if no output frames
2006  // are ever produced.
2007  if (!ofp->tb_out_locked) {
2009  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
2010  fr.num > 0 && fr.den > 0)
2011  ofp->fps.framerate = fr;
2012  ofp->tb_out = av_buffersink_get_time_base(sink);
2013  }
2015 
2018  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
2019  if (ret < 0)
2020  goto fail;
2022  sd = av_buffersink_get_side_data(sink, &nb_sd);
2023  if (nb_sd)
2024  for (int j = 0; j < nb_sd; j++) {
2026  sd[j], 0);
2027  if (ret < 0) {
2029  goto fail;
2030  }
2031  }
2032  }
2033 
2034  for (int i = 0; i < fg->nb_inputs; i++) {
2035  InputFilter *ifilter = fg->inputs[i];
2037  AVFrame *tmp;
2038  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
2039  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2040  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
2041  } else {
2042  if (ifp->type_src == AVMEDIA_TYPE_VIDEO) {
2043  if (ifp->displaymatrix_applied)
2045  }
2046  ret = av_buffersrc_add_frame(ifilter->filter, tmp);
2047  }
2048  av_frame_free(&tmp);
2049  if (ret < 0)
2050  goto fail;
2051  }
2052  }
2053 
2054  /* send the EOFs for the finished inputs */
2055  for (int i = 0; i < fg->nb_inputs; i++) {
2056  InputFilter *ifilter = fg->inputs[i];
2057  if (fgt->eof_in[i]) {
2058  ret = av_buffersrc_add_frame(ifilter->filter, NULL);
2059  if (ret < 0)
2060  goto fail;
2061  have_input_eof = 1;
2062  }
2063  }
2064 
2065  if (have_input_eof) {
2066  // make sure the EOF propagates to the end of the graph
2068  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2069  goto fail;
2070  }
2071 
2072  return 0;
2073 fail:
2074  cleanup_filtergraph(fg, fgt);
2075  return ret;
2076 }
2077 
2079 {
2080  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2081  AVFrameSideData *sd;
2082  int ret;
2083 
2084  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
2085  if (ret < 0)
2086  return ret;
2087 
2088  ifp->time_base = (ifilter->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
2089  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
2090  frame->time_base;
2091 
2092  ifp->format = frame->format;
2093 
2094  ifp->width = frame->width;
2095  ifp->height = frame->height;
2096  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
2097  ifp->color_space = frame->colorspace;
2098  ifp->color_range = frame->color_range;
2099 
2100  ifp->sample_rate = frame->sample_rate;
2101  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
2102  if (ret < 0)
2103  return ret;
2104 
2106  for (int i = 0; i < frame->nb_side_data; i++) {
2107  const AVSideDataDescriptor *desc = av_frame_side_data_desc(frame->side_data[i]->type);
2108 
2109  if (!(desc->props & AV_SIDE_DATA_PROP_GLOBAL))
2110  continue;
2111 
2113  &ifp->nb_side_data,
2114  frame->side_data[i], 0);
2115  if (ret < 0)
2116  return ret;
2117  }
2118 
2120  if (sd)
2121  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
2122  ifp->displaymatrix_present = !!sd;
2123 
2124  /* Copy downmix related side data to InputFilterPriv so it may be propagated
2125  * to the filter chain even though it's not "global", as filters like aresample
2126  * require this information during init and not when remixing a frame */
2128  if (sd) {
2130  &ifp->nb_side_data, sd, 0);
2131  if (ret < 0)
2132  return ret;
2133  memcpy(&ifp->downmixinfo, sd->data, sizeof(ifp->downmixinfo));
2134  }
2135  ifp->downmixinfo_present = !!sd;
2136 
2137  return 0;
2138 }
2139 
2141 {
2142  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
2143  return fgp->is_simple;
2144 }
2145 
2146 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
2147  double time, const char *target,
2148  const char *command, const char *arg, int all_filters)
2149 {
2150  int ret;
2151 
2152  if (!graph)
2153  return;
2154 
2155  if (time < 0) {
2156  char response[4096];
2157  ret = avfilter_graph_send_command(graph, target, command, arg,
2158  response, sizeof(response),
2159  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
2160  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
2161  fg->index, ret, response);
2162  } else if (!all_filters) {
2163  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
2164  } else {
2165  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
2166  if (ret < 0)
2167  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
2168  }
2169 }
2170 
2171 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
2172 {
2173  int nb_requests, nb_requests_max = -1;
2174  int best_input = -1;
2175 
2176  for (int i = 0; i < fg->nb_inputs; i++) {
2177  InputFilter *ifilter = fg->inputs[i];
2178 
2179  if (fgt->eof_in[i])
2180  continue;
2181 
2182  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
2183  if (nb_requests > nb_requests_max) {
2184  nb_requests_max = nb_requests;
2185  best_input = i;
2186  }
2187  }
2188 
2189  av_assert0(best_input >= 0);
2190 
2191  return best_input;
2192 }
2193 
2195 {
2196  OutputFilter *ofilter = &ofp->ofilter;
2197  FPSConvContext *fps = &ofp->fps;
2198  AVRational tb = (AVRational){ 0, 0 };
2199  AVRational fr;
2200  const FrameData *fd;
2201 
2202  fd = frame_data_c(frame);
2203 
2204  // apply -enc_time_base
2205  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2206  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2207  av_log(ofp, AV_LOG_ERROR,
2208  "Demuxing timebase not available - cannot use it for encoding\n");
2209  return AVERROR(EINVAL);
2210  }
2211 
2212  switch (ofp->enc_timebase.num) {
2213  case 0: break;
2214  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2215  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2216  default: tb = ofp->enc_timebase; break;
2217  }
2218 
2219  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2220  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2221  goto finish;
2222  }
2223 
2224  fr = fps->framerate;
2225  if (!fr.num) {
2226  AVRational fr_sink = av_buffersink_get_frame_rate(ofilter->filter);
2227  if (fr_sink.num > 0 && fr_sink.den > 0)
2228  fr = fr_sink;
2229  }
2230 
2231  if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2232  if (!fr.num && !fps->framerate_max.num) {
2233  fr = (AVRational){25, 1};
2234  av_log(ofp, AV_LOG_WARNING,
2235  "No information "
2236  "about the input framerate is available. Falling "
2237  "back to a default value of 25fps. Use the -r option "
2238  "if you want a different framerate.\n");
2239  }
2240 
2241  if (fps->framerate_max.num &&
2242  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2243  !fr.den))
2244  fr = fps->framerate_max;
2245  }
2246 
2247  if (fr.num > 0) {
2248  if (fps->framerate_supported) {
2249  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2250  fr = fps->framerate_supported[idx];
2251  }
2252  if (fps->framerate_clip) {
2253  av_reduce(&fr.num, &fr.den,
2254  fr.num, fr.den, fps->framerate_clip);
2255  }
2256  }
2257 
2258  if (!(tb.num > 0 && tb.den > 0))
2259  tb = av_inv_q(fr);
2260  if (!(tb.num > 0 && tb.den > 0))
2261  tb = frame->time_base;
2262 
2263  fps->framerate = fr;
2264 finish:
2265  ofp->tb_out = tb;
2266  ofp->tb_out_locked = 1;
2267 
2268  return 0;
2269 }
2270 
2271 static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame,
2272  AVRational tb_dst, int64_t start_time)
2273 {
2274  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2275 
2276  AVRational tb = tb_dst;
2277  AVRational filter_tb = frame->time_base;
2278  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2279 
2280  if (frame->pts == AV_NOPTS_VALUE)
2281  goto early_exit;
2282 
2283  tb.den <<= extra_bits;
2284  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2286  float_pts /= 1 << extra_bits;
2287  // when float_pts is not exactly an integer,
2288  // avoid exact midpoints to reduce the chance of rounding differences, this
2289  // can be removed in case the fps code is changed to work with integers
2290  if (float_pts != llrint(float_pts))
2291  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2292 
2293  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2295  frame->time_base = tb_dst;
2296 
2297 early_exit:
2298 
2299  if (debug_ts) {
2300  av_log(logctx, AV_LOG_INFO,
2301  "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2302  frame ? av_ts2str(frame->pts) : "NULL",
2303  av_ts2timestr(frame->pts, &tb_dst),
2304  float_pts, tb_dst.num, tb_dst.den);
2305  }
2306 
2307  return float_pts;
2308 }
2309 
2310 /* Convert frame timestamps to the encoder timebase and decide how many times
2311  * should this (and possibly previous) frame be repeated in order to conform to
2312  * desired target framerate (if any).
2313  */
2315  int64_t *nb_frames, int64_t *nb_frames_prev)
2316 {
2317  OutputFilter *ofilter = &ofp->ofilter;
2318  FPSConvContext *fps = &ofp->fps;
2319  double delta0, delta, sync_ipts, duration;
2320 
2321  if (!frame) {
2322  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2323  fps->frames_prev_hist[1],
2324  fps->frames_prev_hist[2]);
2325 
2326  if (!*nb_frames && fps->last_dropped) {
2327  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2328  fps->last_dropped++;
2329  }
2330 
2331  goto finish;
2332  }
2333 
2334  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2335 
2336  sync_ipts = adjust_frame_pts_to_encoder_tb(ofilter->graph, frame,
2337  ofp->tb_out, ofp->ts_offset);
2338  /* delta0 is the "drift" between the input frame and
2339  * where it would fall in the output. */
2340  delta0 = sync_ipts - ofp->next_pts;
2341  delta = delta0 + duration;
2342 
2343  // tracks the number of times the PREVIOUS frame should be duplicated,
2344  // mostly for variable framerate (VFR)
2345  *nb_frames_prev = 0;
2346  /* by default, we output a single frame */
2347  *nb_frames = 1;
2348 
2349  if (delta0 < 0 &&
2350  delta > 0 &&
2353  && fps->vsync_method != VSYNC_DROP
2354 #endif
2355  ) {
2356  if (delta0 < -0.6) {
2357  av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2358  } else
2359  av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2360  sync_ipts = ofp->next_pts;
2361  duration += delta0;
2362  delta0 = 0;
2363  }
2364 
2365  switch (fps->vsync_method) {
2366  case VSYNC_VSCFR:
2367  if (fps->frame_number == 0 && delta0 >= 0.5) {
2368  av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2369  delta = duration;
2370  delta0 = 0;
2371  ofp->next_pts = llrint(sync_ipts);
2372  }
2373  case VSYNC_CFR:
2374  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2375  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2376  *nb_frames = 0;
2377  } else if (delta < -1.1)
2378  *nb_frames = 0;
2379  else if (delta > 1.1) {
2380  *nb_frames = llrintf(delta);
2381  if (delta0 > 1.1)
2382  *nb_frames_prev = llrintf(delta0 - 0.6);
2383  }
2384  frame->duration = 1;
2385  break;
2386  case VSYNC_VFR:
2387  if (delta <= -0.6)
2388  *nb_frames = 0;
2389  else if (delta > 0.6)
2390  ofp->next_pts = llrint(sync_ipts);
2391  frame->duration = llrint(duration);
2392  break;
2393 #if FFMPEG_OPT_VSYNC_DROP
2394  case VSYNC_DROP:
2395 #endif
2396  case VSYNC_PASSTHROUGH:
2397  ofp->next_pts = llrint(sync_ipts);
2398  frame->duration = llrint(duration);
2399  break;
2400  default:
2401  av_assert0(0);
2402  }
2403 
2404 finish:
2405  memmove(fps->frames_prev_hist + 1,
2406  fps->frames_prev_hist,
2407  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2408  fps->frames_prev_hist[0] = *nb_frames_prev;
2409 
2410  if (*nb_frames_prev == 0 && fps->last_dropped) {
2411  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2412  av_log(ofp, AV_LOG_VERBOSE,
2413  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2414  fps->frame_number, fps->last_frame->pts);
2415  }
2416  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2417  uint64_t nb_frames_dup;
2418  if (*nb_frames > dts_error_threshold * 30) {
2419  av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2420  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2421  *nb_frames = 0;
2422  return;
2423  }
2424  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2425  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2426  av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2427  if (nb_frames_dup > fps->dup_warning) {
2428  av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2429  fps->dup_warning *= 10;
2430  }
2431  }
2432 
2433  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2434  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2435 }
2436 
2438 {
2440  int ret;
2441 
2442  // we are finished and no frames were ever seen at this output,
2443  // at least initialize the encoder with a dummy frame
2444  if (!fgt->got_frame) {
2445  AVFrame *frame = fgt->frame;
2446  FrameData *fd;
2447 
2448  frame->time_base = ofp->tb_out;
2449  frame->format = ofp->format;
2450 
2451  frame->width = ofp->width;
2452  frame->height = ofp->height;
2453  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2454 
2455  frame->sample_rate = ofp->sample_rate;
2456  if (ofp->ch_layout.nb_channels) {
2457  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2458  if (ret < 0)
2459  return ret;
2460  }
2461  av_frame_side_data_free(&frame->side_data, &frame->nb_side_data);
2462  ret = clone_side_data(&frame->side_data, &frame->nb_side_data,
2463  ofp->side_data, ofp->nb_side_data, 0);
2464  if (ret < 0)
2465  return ret;
2466 
2467  fd = frame_data(frame);
2468  if (!fd)
2469  return AVERROR(ENOMEM);
2470 
2471  fd->frame_rate_filter = ofp->fps.framerate;
2472 
2473  av_assert0(!frame->buf[0]);
2474 
2475  av_log(ofp, AV_LOG_WARNING,
2476  "No filtered frames for output stream, trying to "
2477  "initialize anyway.\n");
2478 
2479  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame);
2480  if (ret < 0) {
2482  return ret;
2483  }
2484  }
2485 
2486  fgt->eof_out[ofp->ofilter.index] = 1;
2487 
2488  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, NULL);
2489  return (ret == AVERROR_EOF) ? 0 : ret;
2490 }
2491 
2493  AVFrame *frame)
2494 {
2496  AVFrame *frame_prev = ofp->fps.last_frame;
2497  enum AVMediaType type = ofp->ofilter.type;
2498 
2499  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2500 
2501  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2502  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2503 
2504  for (int64_t i = 0; i < nb_frames; i++) {
2505  AVFrame *frame_out;
2506  int ret;
2507 
2508  if (type == AVMEDIA_TYPE_VIDEO) {
2509  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2510  frame_prev : frame;
2511  if (!frame_in)
2512  break;
2513 
2514  frame_out = fgp->frame_enc;
2515  ret = av_frame_ref(frame_out, frame_in);
2516  if (ret < 0)
2517  return ret;
2518 
2519  frame_out->pts = ofp->next_pts;
2520 
2521  if (ofp->fps.dropped_keyframe) {
2522  frame_out->flags |= AV_FRAME_FLAG_KEY;
2523  ofp->fps.dropped_keyframe = 0;
2524  }
2525  } else {
2526  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2527  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2529 
2530  frame->time_base = ofp->tb_out;
2531  frame->duration = av_rescale_q(frame->nb_samples,
2532  (AVRational){ 1, frame->sample_rate },
2533  ofp->tb_out);
2534 
2535  ofp->next_pts = frame->pts + frame->duration;
2536 
2537  frame_out = frame;
2538  }
2539 
2540  // send the frame to consumers
2541  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame_out);
2542  if (ret < 0) {
2543  av_frame_unref(frame_out);
2544 
2545  if (!fgt->eof_out[ofp->ofilter.index]) {
2546  fgt->eof_out[ofp->ofilter.index] = 1;
2547  fgp->nb_outputs_done++;
2548  }
2549 
2550  return ret == AVERROR_EOF ? 0 : ret;
2551  }
2552 
2553  if (type == AVMEDIA_TYPE_VIDEO) {
2554  ofp->fps.frame_number++;
2555  ofp->next_pts++;
2556 
2557  if (i == nb_frames_prev && frame)
2558  frame->flags &= ~AV_FRAME_FLAG_KEY;
2559  }
2560 
2561  fgt->got_frame = 1;
2562  }
2563 
2564  if (frame && frame_prev) {
2565  av_frame_unref(frame_prev);
2566  av_frame_move_ref(frame_prev, frame);
2567  }
2568 
2569  if (!frame)
2570  return close_output(ofp, fgt);
2571 
2572  return 0;
2573 }
2574 
2576  AVFrame *frame)
2577 {
2580  FrameData *fd;
2581  int ret;
2582 
2585  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->ofilter.index]) {
2586  ret = fg_output_frame(ofp, fgt, NULL);
2587  return (ret < 0) ? ret : 1;
2588  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2589  return 1;
2590  } else if (ret < 0) {
2591  av_log(ofp, AV_LOG_WARNING,
2592  "Error in retrieving a frame from the filtergraph: %s\n",
2593  av_err2str(ret));
2594  return ret;
2595  }
2596 
2597  if (fgt->eof_out[ofp->ofilter.index]) {
2599  return 0;
2600  }
2601 
2603 
2604  if (debug_ts)
2605  av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2606  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2607  frame->time_base.num, frame->time_base.den);
2608 
2609  // Choose the output timebase the first time we get a frame.
2610  if (!ofp->tb_out_locked) {
2611  ret = choose_out_timebase(ofp, frame);
2612  if (ret < 0) {
2613  av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2615  return ret;
2616  }
2617  }
2618 
2619  fd = frame_data(frame);
2620  if (!fd) {
2622  return AVERROR(ENOMEM);
2623  }
2624 
2626 
2627  // only use bits_per_raw_sample passed through from the decoder
2628  // if the filtergraph did not touch the frame data
2629  if (!fgp->is_meta)
2630  fd->bits_per_raw_sample = 0;
2631 
2632  if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2633  if (!frame->duration) {
2635  if (fr.num > 0 && fr.den > 0)
2636  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2637  }
2638 
2639  fd->frame_rate_filter = ofp->fps.framerate;
2640  }
2641 
2642  ret = fg_output_frame(ofp, fgt, frame);
2644  if (ret < 0)
2645  return ret;
2646 
2647  return 0;
2648 }
2649 
2650 /* retrieve all frames available at filtergraph outputs
2651  * and send them to consumers */
2653  AVFrame *frame)
2654 {
2655  FilterGraphPriv *fgp = fgp_from_fg(fg);
2656  int did_step = 0;
2657 
2658  // graph not configured, just select the input to request
2659  if (!fgt->graph) {
2660  for (int i = 0; i < fg->nb_inputs; i++) {
2662  if (ifp->format < 0 && !fgt->eof_in[i]) {
2663  fgt->next_in = i;
2664  return 0;
2665  }
2666  }
2667 
2668  // This state - graph is not configured, but all inputs are either
2669  // initialized or EOF - should be unreachable because sending EOF to a
2670  // filter without even a fallback format should fail
2671  av_assert0(0);
2672  return AVERROR_BUG;
2673  }
2674 
2675  while (fgp->nb_outputs_done < fg->nb_outputs) {
2676  int ret;
2677 
2678  /* Reap all buffers present in the buffer sinks */
2679  for (int i = 0; i < fg->nb_outputs; i++) {
2681 
2682  ret = 0;
2683  while (!ret) {
2684  ret = fg_output_step(ofp, fgt, frame);
2685  if (ret < 0)
2686  return ret;
2687  }
2688  }
2689 
2690  // return after one iteration, so that scheduler can rate-control us
2691  if (did_step && fgp->have_sources)
2692  return 0;
2693 
2695  if (ret == AVERROR(EAGAIN)) {
2696  fgt->next_in = choose_input(fg, fgt);
2697  return 0;
2698  } else if (ret < 0) {
2699  if (ret == AVERROR_EOF)
2700  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2701  else
2702  av_log(fg, AV_LOG_ERROR,
2703  "Error requesting a frame from the filtergraph: %s\n",
2704  av_err2str(ret));
2705  return ret;
2706  }
2707  fgt->next_in = fg->nb_inputs;
2708 
2709  did_step = 1;
2710  }
2711 
2712  return AVERROR_EOF;
2713 }
2714 
2716 {
2717  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2718  int64_t pts2;
2719 
2720  /* subtitles seem to be usually muxed ahead of other streams;
2721  if not, subtracting a larger time here is necessary */
2722  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2723 
2724  /* do not send the heartbeat frame if the subtitle is already ahead */
2725  if (pts2 <= ifp->sub2video.last_pts)
2726  return;
2727 
2728  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2729  /* if we have hit the end of the current displayed subpicture,
2730  or if we need to initialize the system, update the
2731  overlayed subpicture and its start/end times */
2732  sub2video_update(ifp, pts2 + 1, NULL);
2733  else
2734  sub2video_push_ref(ifp, pts2);
2735 }
2736 
2737 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2738 {
2739  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2740  int ret;
2741 
2742  if (buffer) {
2743  AVFrame *tmp;
2744 
2745  if (!frame)
2746  return 0;
2747 
2748  tmp = av_frame_alloc();
2749  if (!tmp)
2750  return AVERROR(ENOMEM);
2751 
2753 
2754  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2755  if (ret < 0) {
2756  av_frame_free(&tmp);
2757  return ret;
2758  }
2759 
2760  return 0;
2761  }
2762 
2763  // heartbeat frame
2764  if (frame && !frame->buf[0]) {
2765  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2766  return 0;
2767  }
2768 
2769  if (!frame) {
2770  if (ifp->sub2video.end_pts < INT64_MAX)
2771  sub2video_update(ifp, INT64_MAX, NULL);
2772 
2773  return av_buffersrc_add_frame(ifilter->filter, NULL);
2774  }
2775 
2776  ifp->width = frame->width ? frame->width : ifp->width;
2777  ifp->height = frame->height ? frame->height : ifp->height;
2778 
2779  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2780 
2781  return 0;
2782 }
2783 
2784 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2785  int64_t pts, AVRational tb)
2786 {
2787  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2788  int ret;
2789 
2790  if (fgt->eof_in[ifilter->index])
2791  return 0;
2792 
2793  fgt->eof_in[ifilter->index] = 1;
2794 
2795  if (ifilter->filter) {
2796  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2798 
2800  if (ret < 0)
2801  return ret;
2802  } else {
2803  if (ifp->format < 0) {
2804  // the filtergraph was never configured, use the fallback parameters
2805  ifp->format = ifp->opts.fallback->format;
2806  ifp->sample_rate = ifp->opts.fallback->sample_rate;
2807  ifp->width = ifp->opts.fallback->width;
2808  ifp->height = ifp->opts.fallback->height;
2810  ifp->color_space = ifp->opts.fallback->colorspace;
2811  ifp->color_range = ifp->opts.fallback->color_range;
2812  ifp->time_base = ifp->opts.fallback->time_base;
2813 
2815  &ifp->opts.fallback->ch_layout);
2816  if (ret < 0)
2817  return ret;
2818 
2820  ret = clone_side_data(&ifp->side_data, &ifp->nb_side_data,
2821  ifp->opts.fallback->side_data,
2822  ifp->opts.fallback->nb_side_data, 0);
2823  if (ret < 0)
2824  return ret;
2825 
2826  if (ifilter_has_all_input_formats(ifilter->graph)) {
2827  ret = configure_filtergraph(ifilter->graph, fgt);
2828  if (ret < 0) {
2829  av_log(ifilter->graph, AV_LOG_ERROR, "Error initializing filters!\n");
2830  return ret;
2831  }
2832  }
2833  }
2834 
2835  if (ifp->format < 0) {
2836  av_log(ifilter->graph, AV_LOG_ERROR,
2837  "Cannot determine format of input %s after EOF\n",
2838  ifp->opts.name);
2839  return AVERROR_INVALIDDATA;
2840  }
2841  }
2842 
2843  return 0;
2844 }
2845 
2847  VIDEO_CHANGED = (1 << 0),
2848  AUDIO_CHANGED = (1 << 1),
2849  MATRIX_CHANGED = (1 << 2),
2850  DOWNMIX_CHANGED = (1 << 3),
2851  HWACCEL_CHANGED = (1 << 4)
2852 };
2853 
2854 static const char *unknown_if_null(const char *str)
2855 {
2856  return str ? str : "unknown";
2857 }
2858 
2860  InputFilter *ifilter, AVFrame *frame)
2861 {
2862  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2863  FrameData *fd;
2864  AVFrameSideData *sd;
2865  int need_reinit = 0, ret;
2866 
2867  /* determine if the parameters for this input changed */
2868  switch (ifilter->type) {
2869  case AVMEDIA_TYPE_AUDIO:
2870  if (ifp->format != frame->format ||
2871  ifp->sample_rate != frame->sample_rate ||
2872  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
2873  need_reinit |= AUDIO_CHANGED;
2874  break;
2875  case AVMEDIA_TYPE_VIDEO:
2876  if (ifp->format != frame->format ||
2877  ifp->width != frame->width ||
2878  ifp->height != frame->height ||
2879  ifp->color_space != frame->colorspace ||
2880  ifp->color_range != frame->color_range)
2881  need_reinit |= VIDEO_CHANGED;
2882  break;
2883  }
2884 
2886  if (!ifp->displaymatrix_present ||
2887  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2888  need_reinit |= MATRIX_CHANGED;
2889  } else if (ifp->displaymatrix_present)
2890  need_reinit |= MATRIX_CHANGED;
2891 
2893  if (!ifp->downmixinfo_present ||
2894  memcmp(sd->data, &ifp->downmixinfo, sizeof(ifp->downmixinfo)))
2895  need_reinit |= DOWNMIX_CHANGED;
2896  } else if (ifp->downmixinfo_present)
2897  need_reinit |= DOWNMIX_CHANGED;
2898 
2899  if (need_reinit && fgt->graph && (ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)) {
2900  ifp->nb_dropped++;
2901  av_log_once(fg, AV_LOG_WARNING, AV_LOG_DEBUG, &ifp->drop_warned, "Avoiding reinit; dropping frame pts: %s bound for %s\n", av_ts2str(frame->pts), ifilter->name);
2903  return 0;
2904  }
2905 
2906  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2907  need_reinit = 0;
2908 
2909  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2910  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2911  need_reinit |= HWACCEL_CHANGED;
2912 
2913  if (need_reinit) {
2915  if (ret < 0)
2916  return ret;
2917  }
2918 
2919  /* (re)init the graph if possible, otherwise buffer the frame and return */
2920  if (need_reinit || !fgt->graph) {
2921  AVFrame *tmp = av_frame_alloc();
2922 
2923  if (!tmp)
2924  return AVERROR(ENOMEM);
2925 
2926  if (!ifilter_has_all_input_formats(fg)) {
2928 
2929  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2930  if (ret < 0)
2931  av_frame_free(&tmp);
2932 
2933  return ret;
2934  }
2935 
2936  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2937  av_frame_free(&tmp);
2938  if (ret < 0)
2939  return ret;
2940 
2941  if (fgt->graph) {
2942  AVBPrint reason;
2944  if (need_reinit & AUDIO_CHANGED) {
2945  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2946  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2947  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
2948  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2949  }
2950  if (need_reinit & VIDEO_CHANGED) {
2951  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2952  const char *color_space_name = av_color_space_name(frame->colorspace);
2953  const char *color_range_name = av_color_range_name(frame->color_range);
2954  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, ",
2955  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2956  unknown_if_null(color_space_name), frame->width, frame->height);
2957  }
2958  if (need_reinit & MATRIX_CHANGED)
2959  av_bprintf(&reason, "display matrix changed, ");
2960  if (need_reinit & DOWNMIX_CHANGED)
2961  av_bprintf(&reason, "downmix medatata changed, ");
2962  if (need_reinit & HWACCEL_CHANGED)
2963  av_bprintf(&reason, "hwaccel changed, ");
2964  if (reason.len > 1)
2965  reason.str[reason.len - 2] = '\0'; // remove last comma
2966  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2967  }
2968 
2969  ret = configure_filtergraph(fg, fgt);
2970  if (ret < 0) {
2971  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2972  return ret;
2973  }
2974  }
2975 
2976  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
2977  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
2978  frame->time_base = ifp->time_base;
2979 
2980  if (ifp->displaymatrix_applied)
2982 
2983  fd = frame_data(frame);
2984  if (!fd)
2985  return AVERROR(ENOMEM);
2987 
2990  if (ret < 0) {
2992  if (ret != AVERROR_EOF)
2993  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2994  return ret;
2995  }
2996 
2997  return 0;
2998 }
2999 
3000 static void fg_thread_set_name(const FilterGraph *fg)
3001 {
3002  char name[16];
3003  if (filtergraph_is_simple(fg)) {
3004  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
3005  snprintf(name, sizeof(name), "%cf%s",
3007  ofp->ofilter.output_name);
3008  } else {
3009  snprintf(name, sizeof(name), "fc%d", fg->index);
3010  }
3011 
3013 }
3014 
3016 {
3017  if (fgt->frame_queue_out) {
3018  AVFrame *frame;
3019  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
3020  av_frame_free(&frame);
3022  }
3023 
3024  av_frame_free(&fgt->frame);
3025  av_freep(&fgt->eof_in);
3026  av_freep(&fgt->eof_out);
3027 
3028  avfilter_graph_free(&fgt->graph);
3029 
3030  memset(fgt, 0, sizeof(*fgt));
3031 }
3032 
3033 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
3034 {
3035  memset(fgt, 0, sizeof(*fgt));
3036 
3037  fgt->frame = av_frame_alloc();
3038  if (!fgt->frame)
3039  goto fail;
3040 
3041  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
3042  if (!fgt->eof_in)
3043  goto fail;
3044 
3045  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
3046  if (!fgt->eof_out)
3047  goto fail;
3048 
3050  if (!fgt->frame_queue_out)
3051  goto fail;
3052 
3053  return 0;
3054 
3055 fail:
3056  fg_thread_uninit(fgt);
3057  return AVERROR(ENOMEM);
3058 }
3059 
3060 static int filter_thread(void *arg)
3061 {
3062  FilterGraphPriv *fgp = arg;
3063  FilterGraph *fg = &fgp->fg;
3064 
3065  FilterGraphThread fgt;
3066  int ret = 0, input_status = 0;
3067 
3068  ret = fg_thread_init(&fgt, fg);
3069  if (ret < 0)
3070  goto finish;
3071 
3072  fg_thread_set_name(fg);
3073 
3074  // if we have all input parameters the graph can now be configured
3076  ret = configure_filtergraph(fg, &fgt);
3077  if (ret < 0) {
3078  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
3079  av_err2str(ret));
3080  goto finish;
3081  }
3082  }
3083 
3084  while (1) {
3085  InputFilter *ifilter;
3086  InputFilterPriv *ifp = NULL;
3087  enum FrameOpaque o;
3088  unsigned input_idx = fgt.next_in;
3089 
3090  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
3091  &input_idx, fgt.frame);
3092  if (input_status == AVERROR_EOF) {
3093  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
3094  break;
3095  } else if (input_status == AVERROR(EAGAIN)) {
3096  // should only happen when we didn't request any input
3097  av_assert0(input_idx == fg->nb_inputs);
3098  goto read_frames;
3099  }
3100  av_assert0(input_status >= 0);
3101 
3102  o = (intptr_t)fgt.frame->opaque;
3103 
3104  o = (intptr_t)fgt.frame->opaque;
3105 
3106  // message on the control stream
3107  if (input_idx == fg->nb_inputs) {
3108  FilterCommand *fc;
3109 
3110  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
3111 
3112  fc = (FilterCommand*)fgt.frame->buf[0]->data;
3113  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
3114  fc->all_filters);
3115  av_frame_unref(fgt.frame);
3116  continue;
3117  }
3118 
3119  // we received an input frame or EOF
3120  ifilter = fg->inputs[input_idx];
3121  ifp = ifp_from_ifilter(ifilter);
3122 
3123  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
3124  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
3125  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
3126  !fgt.graph);
3127  } else if (fgt.frame->buf[0]) {
3128  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
3129  } else {
3131  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
3132  }
3133  av_frame_unref(fgt.frame);
3134  if (ret == AVERROR_EOF) {
3135  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
3136  input_idx);
3137  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
3138  continue;
3139  }
3140  if (ret < 0)
3141  goto finish;
3142 
3143 read_frames:
3144  // retrieve all newly avalable frames
3145  ret = read_frames(fg, &fgt, fgt.frame);
3146  if (ret == AVERROR_EOF) {
3147  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
3148  if (ifp && ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)
3149  av_log(fg, AV_LOG_INFO, "Total changed input frames dropped : %"PRId64"\n", ifp->nb_dropped);
3150  break;
3151  } else if (ret < 0) {
3152  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
3153  av_err2str(ret));
3154  goto finish;
3155  }
3156  }
3157 
3158  for (unsigned i = 0; i < fg->nb_outputs; i++) {
3160 
3161  if (fgt.eof_out[i] || !fgt.graph)
3162  continue;
3163 
3164  ret = fg_output_frame(ofp, &fgt, NULL);
3165  if (ret < 0)
3166  goto finish;
3167  }
3168 
3169 finish:
3170 
3172  print_filtergraph(fg, fgt.graph);
3173 
3174  // EOF is normal termination
3175  if (ret == AVERROR_EOF)
3176  ret = 0;
3177 
3178  fg_thread_uninit(&fgt);
3179 
3180  return ret;
3181 }
3182 
3183 void fg_send_command(FilterGraph *fg, double time, const char *target,
3184  const char *command, const char *arg, int all_filters)
3185 {
3186  FilterGraphPriv *fgp = fgp_from_fg(fg);
3187  AVBufferRef *buf;
3188  FilterCommand *fc;
3189 
3190  fc = av_mallocz(sizeof(*fc));
3191  if (!fc)
3192  return;
3193 
3194  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3195  if (!buf) {
3196  av_freep(&fc);
3197  return;
3198  }
3199 
3200  fc->target = av_strdup(target);
3201  fc->command = av_strdup(command);
3202  fc->arg = av_strdup(arg);
3203  if (!fc->target || !fc->command || !fc->arg) {
3204  av_buffer_unref(&buf);
3205  return;
3206  }
3207 
3208  fc->time = time;
3209  fc->all_filters = all_filters;
3210 
3211  fgp->frame->buf[0] = buf;
3212  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3213 
3214  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
3215 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
InputFilterPriv::nb_dropped
uint64_t nb_dropped
Definition: ffmpeg_filter.c:119
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:129
AVSubtitle
Definition: avcodec.h:2075
formats
formats
Definition: signature.h:47
AVBufferSrcParameters::side_data
AVFrameSideData ** side_data
Definition: buffersrc.h:124
AVBufferSrcParameters::color_space
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
Definition: buffersrc.h:121
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1860
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:95
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:203
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:462
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:672
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:362
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:628
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:373
av_clip
#define av_clip
Definition: common.h:100
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2460
OutputFilter::class
const AVClass * class
Definition: ffmpeg.h:371
view_specifier_parse
int view_specifier_parse(const char **pspec, ViewSpecifier *vs)
Definition: ffmpeg_opt.c:245
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:70
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:106
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2080
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2171
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1480
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:65
read_binary
static int read_binary(void *logctx, const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:424
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:69
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:97
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:608
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:130
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:982
FrameData
Definition: ffmpeg.h:679
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2146
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:151
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
InputFilterOptions::crop_right
unsigned crop_right
Definition: ffmpeg.h:286
OutputFilter::apad
char * apad
Definition: ffmpeg.h:386
out
FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:205
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:657
clone_side_data
static int clone_side_data(AVFrameSideData ***dst, int *nb_dst, AVFrameSideData *const *src, int nb_src, unsigned int flags)
Wrapper calling av_frame_side_data_clone() in a loop for all source entries.
Definition: ffmpeg_utils.h:50
FilterGraph::graph_desc
const char * graph_desc
Definition: ffmpeg.h:403
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:137
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3441
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:398
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:150
AVBufferSrcParameters::nb_side_data
int nb_side_data
Definition: buffersrc.h:125
InputFilterOptions::crop_bottom
unsigned crop_bottom
Definition: ffmpeg.h:284
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:37
AVFrame::nb_side_data
int nb_side_data
Definition: frame.h:619
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:2078
stream_specifier_parse
int stream_specifier_parse(StreamSpecifier *ss, const char *spec, int allow_remainder, void *logctx)
Parse a stream specifier string into a form suitable for matching.
Definition: cmdutils.c:1012
ofilter_class
static const AVClass ofilter_class
Definition: ffmpeg_filter.c:628
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:2851
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:62
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:263
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, const ViewSpecifier *vs, InputFilterOptions *opts, SchedulerNode *src)
Definition: ffmpeg_demux.c:1020
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:132
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:226
configure_output_filter
static int configure_output_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1670
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:242
AVSubtitleRect
Definition: avcodec.h:2048
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2079
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts, const ViewSpecifier *vs, SchedulerNode *src)
Definition: ffmpeg_dec.c:1752
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:988
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:171
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:559
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:683
InputFile::index
int index
Definition: ffmpeg.h:495
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:421
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:750
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:523
AVFrame::width
int width
Definition: frame.h:493
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:50
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:57
StreamSpecifier
Definition: cmdutils.h:113
ofilter_bind_enc
int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:793
AVOption
AVOption.
Definition: opt.h:429
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2492
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:185
FilterGraph::index
int index
Definition: ffmpeg.h:396
OutputFilter::index
int index
Definition: ffmpeg.h:375
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:129
data
const char data[16]
Definition: mxf.c:149
InputFilter::index
int index
Definition: ffmpeg.h:356
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:175
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:227
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1870
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:399
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:2847
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
ViewSpecifier
Definition: ffmpeg.h:129
AVDictionary
Definition: dict.c:32
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:665
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:234
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:324
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: side_data.c:247
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:263
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1588
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:598
AVBufferSrcParameters::height
int height
Definition: buffersrc.h:87
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2575
FilterGraphPriv
Definition: ffmpeg_filter.c:46
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:599
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:100
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1905
OutputFilterPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:190
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
stream_specifier_uninit
void stream_specifier_uninit(StreamSpecifier *ss)
Definition: cmdutils.c:1003
InputStream
Definition: ffmpeg.h:458
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:76
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:70
OutputFilterOptions
Definition: ffmpeg.h:304
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:271
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:712
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:292
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
Definition: avfiltergraph.c:140
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:167
finish
static void finish(void)
Definition: movenc.c:374
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
Underlying C type is a uint8_t* that is either NULL or points to an array allocated with the av_mallo...
Definition: opt.h:286
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3817
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:89
OutputFilterPriv
Definition: ffmpeg_filter.c:186
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:3015
filter_opt_apply
static int filter_opt_apply(void *logctx, AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:473
fail
#define fail()
Definition: checkasm.h:198
InputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:359
AVBufferSrcParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
Definition: buffersrc.h:92
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:312
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:85
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
print_filtergraph
int print_filtergraph(FilterGraph *fg, AVFilterGraph *graph)
Definition: graphprint.c:954
samplefmt.h
OutputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:200
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:259
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:598
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:764
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:123
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1691
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
AVDownmixInfo
This structure describes optional metadata relevant to a downmix procedure.
Definition: downmix_info.h:58
pts
static int64_t pts
Definition: transcode_aac.c:644
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1886
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:87
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:151
FrameData::tb
AVRational tb
Definition: ffmpeg.h:689
OutputFilterPriv::sws_opts
AVDictionary * sws_opts
Definition: ffmpeg_filter.c:212
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:73
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:195
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputFilterPriv::sub2video
struct InputFilterPriv::@8 sub2video
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:176
AVRational::num
int num
Numerator.
Definition: rational.h:59
OutputFilter::bound
int bound
Definition: ffmpeg.h:383
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:103
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:272
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:39
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:821
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:51
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2715
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
OutputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:201
avassert.h
OutputFilterPriv::trim_start_us
int64_t trim_start_us
Definition: ffmpeg_filter.c:224
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:692
InputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:135
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2784
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:104
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:1248
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:636
duration
int64_t duration
Definition: movenc.c:65
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:342
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:960
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:926
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:248
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:71
llrintf
#define llrintf(x)
Definition: libm.h:401
s
#define s(width, name)
Definition: cbs_vp9.c:198
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:661
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:67
DOWNMIX_CHANGED
@ DOWNMIX_CHANGED
Definition: ffmpeg_filter.c:2850
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:110
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:400
ofilter_item_name
static const char * ofilter_item_name(void *obj)
Definition: ffmpeg_filter.c:622
AVDictionaryEntry::key
char * key
Definition: dict.h:91
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:201
VIEW_SPECIFIER_TYPE_NONE
@ VIEW_SPECIFIER_TYPE_NONE
Definition: ffmpeg.h:118
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:119
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:720
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:384
InputFilter
Definition: ffmpeg.h:353
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:60
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:493
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:298
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:49
OFILTER_FLAG_AUTOSCALE
@ OFILTER_FLAG_AUTOSCALE
Definition: ffmpeg.h:301
print_graphs_file
char * print_graphs_file
Definition: ffmpeg_opt.c:81
InputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:367
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2081
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:3033
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:274
InputFilterOptions::crop_top
unsigned crop_top
Definition: ffmpeg.h:283
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:354
AV_SIDE_DATA_PROP_GLOBAL
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
Definition: frame.h:290
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
color_range
color_range
Definition: vf_selectivecolor.c:43
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:229
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:1046
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1187
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:218
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:194
InputFilterOptions::crop_left
unsigned crop_left
Definition: ffmpeg.h:285
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3757
AVFormatContext
Format I/O context.
Definition: avformat.h:1264
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:642
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:767
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:374
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1297
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:223
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:882
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:126
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:895
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed, const AVChannelLayout *layout_requested)
Definition: ffmpeg_filter.c:753
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:196
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:858
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:173
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:466
tmp
static uint8_t tmp[20]
Definition: aes_ctr.c:47
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:162
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:141
Decoder
Definition: ffmpeg.h:444
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:869
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
OFILTER_FLAG_AUDIO_24BIT
@ OFILTER_FLAG_AUDIO_24BIT
Definition: ffmpeg.h:300
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:927
fg_create_simple
int fg_create_simple(FilterGraph **pfg, InputStream *ist, char *graph_desc, Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:1196
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:584
ofilter_bind_ifilter
static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:903
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:210
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
Definition: ffmpeg_filter.c:636
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2437
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:92
mathops.h
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:70
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:696
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1430
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:950
stream_specifier_match
unsigned stream_specifier_match(const StreamSpecifier *ss, const AVFormatContext *s, const AVStream *st, void *logctx)
Definition: cmdutils.c:1227
AVFilterGraph
Definition: avfilter.h:582
InputFilterPriv::downmixinfo_present
int downmixinfo_present
Definition: ffmpeg_filter.c:145
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:244
InputFilterOptions
Definition: ffmpeg.h:270
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:125
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:733
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:401
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:217
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:880
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:474
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:105
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:275
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:47
FilterGraphPriv::nb_threads
int nb_threads
Definition: ffmpeg_filter.c:62
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:187
FilterGraph
Definition: ffmpeg.h:394
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:939
file_read
char * file_read(const char *filename)
Definition: cmdutils.c:1498
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:78
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:288
InputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:361
AVBufferSrcParameters::frame_rate
AVRational frame_rate
Video only, the frame rate of the input video.
Definition: buffersrc.h:100
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:747
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:273
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:587
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2140
VideoSyncMethod
VideoSyncMethod
Definition: ffmpeg.h:66
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1991
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:264
f
f
Definition: af_crystalizer.c:122
OutputFilter::output_name
char * output_name
Definition: ffmpeg.h:379
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:3060
AVMediaType
AVMediaType
Definition: avutil.h:198
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:139
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:91
FilterGraphThread
Definition: ffmpeg_filter.c:84
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:276
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:143
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:319
OutputFilterPriv::color_ranges
enum AVColorRange * color_ranges
Definition: ffmpeg_filter.c:221
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:85
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:105
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:744
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:584
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:208
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:326
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:183
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:127
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:2849
FilterCommand::time
double time
Definition: ffmpeg_filter.c:244
InputFilterPriv::initialize
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:155
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.c:142
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1347
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:538
AVFrameSideData::data
uint8_t * data
Definition: frame.h:278
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:508
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:58
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:460
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2078
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:101
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1470
FPSConvContext::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg_filter.c:178
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:723
OutputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:377
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:124
AVBufferSrcParameters::time_base
AVRational time_base
The timebase to be used for the timestamps on the input frames.
Definition: buffersrc.h:82
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:511
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1879
fg_finalise_bindings
int fg_finalise_bindings(void)
Definition: ffmpeg_filter.c:1403
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:2848
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2395
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:233
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:2854
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:289
decoders
Decoder ** decoders
Definition: ffmpeg.c:114
OutputFilterPriv::log_parent
void * log_parent
Definition: ffmpeg_filter.c:189
nb_decoders
int nb_decoders
Definition: ffmpeg.c:115
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:388
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2652
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:809
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2031
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:2859
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:954
buffersink.h
av_buffersink_get_side_data
const AVFrameSideData *const * av_buffersink_get_side_data(const AVFilterContext *ctx, int *nb_side_data)
Definition: buffersink.c:375
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:839
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:140
OutputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:198
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:245
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:183
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:167
filter_buffered_frames
int filter_buffered_frames
Definition: ffmpeg_opt.c:78
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:118
graph_opts_apply
static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:529
FPSConvContext
Definition: ffmpeg_filter.c:164
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVBufferSrcParameters::width
int width
Video only, the display dimensions of the input frames.
Definition: buffersrc.h:87
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:694
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: side_data.c:132
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:3183
downmix_info.h
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:52
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:294
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:204
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:69
src2
const pixel * src2
Definition: h264pred_template.c:421
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1809
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:690
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:181
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
print_graphs
int print_graphs
Definition: ffmpeg_opt.c:80
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:90
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:497
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:521
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:78
graph_parse
static int graph_parse(void *logctx, AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:553
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:494
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:116
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
IFILTER_FLAG_DROPCHANGED
@ IFILTER_FLAG_DROPCHANGED
Definition: ffmpeg.h:267
AVFrame::side_data
AVFrameSideData ** side_data
Definition: frame.h:618
len
int len
Definition: vorbis_enc_data.h:426
SchedulerNode
Definition: ffmpeg_sched.h:103
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:693
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:111
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:308
OutputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:197
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:919
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:60
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2505
AVFilter
Filter definition.
Definition: avfilter.h:211
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2314
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:159
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:1060
mid_pred
#define mid_pred
Definition: mathops.h:97
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:91
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:744
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:355
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:73
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:783
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:165
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:511
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1483
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:910
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:228
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:2846
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:445
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:518
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:987
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
AVBufferSrcParameters::color_range
enum AVColorRange color_range
Definition: buffersrc.h:122
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:88
OutputFilterPriv::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg_filter.c:213
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:493
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:750
IFILTER_FLAG_CROP
@ IFILTER_FLAG_CROP
Definition: ffmpeg.h:266
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:369
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:610
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:117
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:913
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:462
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:925
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:137
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:114
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:442
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:55
insert_trim
static int insert_trim(void *logctx, int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1432
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:265
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:178
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:612
ifilter_bind_fg
static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
Definition: ffmpeg_filter.c:923
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2194
OutputFilterPriv::flags
unsigned flags
Definition: ffmpeg_filter.c:231
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:219
AVSideDataDescriptor
This struct describes the properties of a side data type.
Definition: frame.h:324
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:281
InputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:134
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:951
AVFilterContext
An instance of a filter.
Definition: avfilter.h:269
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:395
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:449
OutputFilter
Definition: ffmpeg.h:370
InputFilterPriv::drop_warned
int drop_warned
Definition: ffmpeg_filter.c:118
av_log_once
void av_log_once(void *avcl, int initial_level, int subsequent_level, int *state, const char *fmt,...)
Definition: log.c:449
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2737
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:105
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
configure_output_video_filter
static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1509
ViewSpecifier::type
enum ViewSpecifierType type
Definition: ffmpeg.h:130
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:315
OutputFilterPriv::color_spaces
enum AVColorSpace * color_spaces
Definition: ffmpeg_filter.c:220
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:492
av_buffersink_get_colorspace
enum AVColorSpace av_buffersink_get_colorspace(const AVFilterContext *ctx)
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2271
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:391
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:83
llrint
#define llrint(x)
Definition: libm.h:396
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:276
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
InputStream::index
int index
Definition: ffmpeg.h:464
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2439
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:90
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:79
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:240
av_frame_side_data_desc
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
Definition: side_data.c:61
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:1053
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:260
OFILTER_FLAG_DISABLE_CONVERT
@ OFILTER_FLAG_DISABLE_CONVERT
Definition: ffmpeg.h:298
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:247
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:447
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:122
InputFilterPriv::end_pts
int64_t end_pts
Definition: ffmpeg_filter.c:152
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:112
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:328
timestamp.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:617
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:193
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1317
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
graphprint.h
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:107
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:57
OutputFilterPriv::trim_duration_us
int64_t trim_duration_us
Definition: ffmpeg_filter.c:225
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
InputFilterPriv::downmixinfo
AVDownmixInfo downmixinfo
Definition: ffmpeg_filter.c:146
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2070
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:298
AVDictionaryEntry::value
char * value
Definition: dict.h:92
bind_inputs
static int bind_inputs(FilterGraph *fg)
Definition: ffmpeg_filter.c:1385
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:585
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:272
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:732
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:466
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:205
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:739
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:68
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:194
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:126
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:180
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:3000
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:165
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1680
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2077
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:241
src
#define src
Definition: vp8dsp.c:248
FilterCommand
Definition: ffmpeg_filter.c:239
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:124
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3361
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:390
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:77
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:281
av_buffersink_get_color_range
enum AVColorRange av_buffersink_get_color_range(const AVFilterContext *ctx)
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
InputFilter::input_name
char * input_name
Definition: ffmpeg.h:363
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:104
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:182