FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/bprint.h"
33 #include "libavutil/downmix_info.h"
34 #include "libavutil/mem.h"
35 #include "libavutil/opt.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/pixfmt.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/time.h"
40 #include "libavutil/timestamp.h"
41 
42 // FIXME private header, used for mid_pred()
43 #include "libavcodec/mathops.h"
44 
45 typedef struct FilterGraphPriv {
47 
48  // name used for logging
49  char log_name[32];
50 
51  int is_simple;
52  // true when the filtergraph contains only meta filters
53  // that do not modify the frame data
54  int is_meta;
55  // source filters are present in the graph
58 
59  unsigned nb_outputs_done;
60 
61  const char *graph_desc;
62 
63  char *nb_threads;
64 
65  // frame for temporarily holding output from the filtergraph
67  // frame for sending output to the encoder
69 
71  unsigned sch_idx;
73 
75 {
76  return (FilterGraphPriv*)fg;
77 }
78 
79 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
80 {
81  return (const FilterGraphPriv*)fg;
82 }
83 
84 // data that is local to the filter thread and not visible outside of it
85 typedef struct FilterGraphThread {
87 
89 
90  // Temporary buffer for output frames, since on filtergraph reset
91  // we cannot send them to encoders immediately.
92  // The output index is stored in frame opaque.
94 
95  // index of the next input to request from the scheduler
96  unsigned next_in;
97  // set to 1 after at least one frame passed through this output
98  int got_frame;
99 
100  // EOF status of each input/output, as received by the thread
101  uint8_t *eof_in;
102  uint8_t *eof_out;
104 
105 typedef struct InputFilterPriv {
107 
109 
110  int index;
111 
113 
114  // used to hold submitted input
116 
117  /* for filters that are not yet bound to an input stream,
118  * this stores the input linklabel, if any */
119  uint8_t *linklabel;
120 
121  // filter data type
123  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
124  // same as type otherwise
126 
127  int eof;
128  int bound;
129 
130  // parameters configured for this input
131  int format;
132 
133  int width, height;
137 
140 
142 
145 
147 
149 
153 
156 
157  struct {
158  AVFrame *frame;
159 
162 
163  ///< marks if sub2video_update should force an initialization
164  unsigned int initialize;
165  } sub2video;
167 
169 {
170  return (InputFilterPriv*)ifilter;
171 }
172 
173 typedef struct FPSConvContext {
175  /* number of frames emitted by the video-encoding sync code */
177  /* history of nb_frames_prev, i.e. the number of times the
178  * previous frame was duplicated by vsync code in recent
179  * do_video_out() calls */
181 
182  uint64_t dup_warning;
183 
186 
188 
194 
195 typedef struct OutputFilterPriv {
197 
198  int index;
199 
200  void *log_parent;
201  char log_name[32];
202 
203  char *name;
204 
206 
207  /* desired output stream properties */
208  int format;
209  int width, height;
214 
217 
218  // time base in which the output is sent to our downstream
219  // does not need to match the filtersink's timebase
221  // at least one frame with the above timebase was sent
222  // to our downstream, so it cannot change anymore
224 
226 
229 
230  // those are only set if no format is specified and the encoder gives us multiple options
231  // They point directly to the relevant lists of the encoder.
232  const int *formats;
234  const int *sample_rates;
237 
241  // offset for output timestamps, in AV_TIME_BASE_Q
245 
246  unsigned flags;
248 
250 {
251  return (OutputFilterPriv*)ofilter;
252 }
253 
254 typedef struct FilterCommand {
255  char *target;
256  char *command;
257  char *arg;
258 
259  double time;
261 } FilterCommand;
262 
263 static void filter_command_free(void *opaque, uint8_t *data)
264 {
266 
267  av_freep(&fc->target);
268  av_freep(&fc->command);
269  av_freep(&fc->arg);
270 
271  av_free(data);
272 }
273 
275 {
276  AVFrame *frame = ifp->sub2video.frame;
277  int ret;
278 
280 
281  frame->width = ifp->width;
282  frame->height = ifp->height;
283  frame->format = ifp->format;
284  frame->colorspace = ifp->color_space;
285  frame->color_range = ifp->color_range;
286 
288  if (ret < 0)
289  return ret;
290 
291  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
292 
293  return 0;
294 }
295 
296 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
297  AVSubtitleRect *r)
298 {
299  uint32_t *pal, *dst2;
300  uint8_t *src, *src2;
301  int x, y;
302 
303  if (r->type != SUBTITLE_BITMAP) {
304  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
305  return;
306  }
307  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
308  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
309  r->x, r->y, r->w, r->h, w, h
310  );
311  return;
312  }
313 
314  dst += r->y * dst_linesize + r->x * 4;
315  src = r->data[0];
316  pal = (uint32_t *)r->data[1];
317  for (y = 0; y < r->h; y++) {
318  dst2 = (uint32_t *)dst;
319  src2 = src;
320  for (x = 0; x < r->w; x++)
321  *(dst2++) = pal[*(src2++)];
322  dst += dst_linesize;
323  src += r->linesize[0];
324  }
325 }
326 
328 {
329  AVFrame *frame = ifp->sub2video.frame;
330  int ret;
331 
332  av_assert1(frame->data[0]);
333  ifp->sub2video.last_pts = frame->pts = pts;
337  if (ret != AVERROR_EOF && ret < 0)
339  "Error while add the frame to buffer source(%s).\n",
340  av_err2str(ret));
341 }
342 
343 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
344  const AVSubtitle *sub)
345 {
346  AVFrame *frame = ifp->sub2video.frame;
347  int8_t *dst;
348  int dst_linesize;
349  int num_rects;
350  int64_t pts, end_pts;
351 
352  if (sub) {
353  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
354  AV_TIME_BASE_Q, ifp->time_base);
355  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
356  AV_TIME_BASE_Q, ifp->time_base);
357  num_rects = sub->num_rects;
358  } else {
359  /* If we are initializing the system, utilize current heartbeat
360  PTS as the start time, and show until the following subpicture
361  is received. Otherwise, utilize the previous subpicture's end time
362  as the fall-back value. */
363  pts = ifp->sub2video.initialize ?
364  heartbeat_pts : ifp->sub2video.end_pts;
365  end_pts = INT64_MAX;
366  num_rects = 0;
367  }
368  if (sub2video_get_blank_frame(ifp) < 0) {
370  "Impossible to get a blank canvas.\n");
371  return;
372  }
373  dst = frame->data [0];
374  dst_linesize = frame->linesize[0];
375  for (int i = 0; i < num_rects; i++)
376  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
377  sub2video_push_ref(ifp, pts);
378  ifp->sub2video.end_pts = end_pts;
379  ifp->sub2video.initialize = 0;
380 }
381 
382 /* Define a function for appending a list of allowed formats
383  * to an AVBPrint. If nonempty, the list will have a header. */
384 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
385 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
386 { \
387  if (ofp->var == none && !ofp->supported_list) \
388  return; \
389  av_bprintf(bprint, #name "="); \
390  if (ofp->var != none) { \
391  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
392  } else { \
393  const type *p; \
394  \
395  for (p = ofp->supported_list; *p != none; p++) { \
396  av_bprintf(bprint, printf_format "|", get_name(*p)); \
397  } \
398  if (bprint->len > 0) \
399  bprint->str[--bprint->len] = '\0'; \
400  } \
401  av_bprint_chars(bprint, ':', 1); \
402 }
403 
406 
409 
411  "%d", )
412 
413 DEF_CHOOSE_FORMAT(color_spaces, enum AVColorSpace, color_space, color_spaces,
415 
416 DEF_CHOOSE_FORMAT(color_ranges, enum AVColorRange, color_range, color_ranges,
418 
419 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
420 {
421  if (av_channel_layout_check(&ofp->ch_layout)) {
422  av_bprintf(bprint, "channel_layouts=");
423  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
424  } else if (ofp->ch_layouts) {
425  const AVChannelLayout *p;
426 
427  av_bprintf(bprint, "channel_layouts=");
428  for (p = ofp->ch_layouts; p->nb_channels; p++) {
430  av_bprintf(bprint, "|");
431  }
432  if (bprint->len > 0)
433  bprint->str[--bprint->len] = '\0';
434  } else
435  return;
436  av_bprint_chars(bprint, ':', 1);
437 }
438 
439 static int read_binary(void *logctx, const char *path,
440  uint8_t **data, int *len)
441 {
442  AVIOContext *io = NULL;
443  int64_t fsize;
444  int ret;
445 
446  *data = NULL;
447  *len = 0;
448 
449  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
450  if (ret < 0) {
451  av_log(logctx, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
452  path, av_err2str(ret));
453  return ret;
454  }
455 
456  fsize = avio_size(io);
457  if (fsize < 0 || fsize > INT_MAX) {
458  av_log(logctx, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
459  ret = AVERROR(EIO);
460  goto fail;
461  }
462 
463  *data = av_malloc(fsize);
464  if (!*data) {
465  ret = AVERROR(ENOMEM);
466  goto fail;
467  }
468 
469  ret = avio_read(io, *data, fsize);
470  if (ret != fsize) {
471  av_log(logctx, AV_LOG_ERROR, "Error reading file %s\n", path);
472  ret = ret < 0 ? ret : AVERROR(EIO);
473  goto fail;
474  }
475 
476  *len = fsize;
477 
478  ret = 0;
479 fail:
480  avio_close(io);
481  if (ret < 0) {
482  av_freep(data);
483  *len = 0;
484  }
485  return ret;
486 }
487 
488 static int filter_opt_apply(void *logctx, AVFilterContext *f,
489  const char *key, const char *val)
490 {
491  const AVOption *o = NULL;
492  int ret;
493 
495  if (ret >= 0)
496  return 0;
497 
498  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
500  if (!o)
501  goto err_apply;
502 
503  // key is a valid option name prefixed with '/'
504  // interpret value as a path from which to load the actual option value
505  key++;
506 
507  if (o->type == AV_OPT_TYPE_BINARY) {
508  uint8_t *data;
509  int len;
510 
511  ret = read_binary(logctx, val, &data, &len);
512  if (ret < 0)
513  goto err_load;
514 
516  av_freep(&data);
517  } else {
518  char *data = file_read(val);
519  if (!data) {
520  ret = AVERROR(EIO);
521  goto err_load;
522  }
523 
525  av_freep(&data);
526  }
527  if (ret < 0)
528  goto err_apply;
529 
530  return 0;
531 
532 err_apply:
533  av_log(logctx, AV_LOG_ERROR,
534  "Error applying option '%s' to filter '%s': %s\n",
535  key, f->filter->name, av_err2str(ret));
536  return ret;
537 err_load:
538  av_log(logctx, AV_LOG_ERROR,
539  "Error loading value for option '%s' from file '%s'\n",
540  key, val);
541  return ret;
542 }
543 
544 static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
545 {
546  for (size_t i = 0; i < seg->nb_chains; i++) {
547  AVFilterChain *ch = seg->chains[i];
548 
549  for (size_t j = 0; j < ch->nb_filters; j++) {
550  AVFilterParams *p = ch->filters[j];
551  const AVDictionaryEntry *e = NULL;
552 
553  av_assert0(p->filter);
554 
555  while ((e = av_dict_iterate(p->opts, e))) {
556  int ret = filter_opt_apply(logctx, p->filter, e->key, e->value);
557  if (ret < 0)
558  return ret;
559  }
560 
561  av_dict_free(&p->opts);
562  }
563  }
564 
565  return 0;
566 }
567 
568 static int graph_parse(void *logctx,
569  AVFilterGraph *graph, const char *desc,
571  AVBufferRef *hw_device)
572 {
574  int ret;
575 
576  *inputs = NULL;
577  *outputs = NULL;
578 
579  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
580  if (ret < 0)
581  return ret;
582 
584  if (ret < 0)
585  goto fail;
586 
587  if (hw_device) {
588  for (int i = 0; i < graph->nb_filters; i++) {
589  AVFilterContext *f = graph->filters[i];
590 
591  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
592  continue;
593  f->hw_device_ctx = av_buffer_ref(hw_device);
594  if (!f->hw_device_ctx) {
595  ret = AVERROR(ENOMEM);
596  goto fail;
597  }
598  }
599  }
600 
601  ret = graph_opts_apply(logctx, seg);
602  if (ret < 0)
603  goto fail;
604 
606 
607 fail:
609  return ret;
610 }
611 
612 // Filters can be configured only if the formats of all inputs are known.
614 {
615  for (int i = 0; i < fg->nb_inputs; i++) {
617  if (ifp->format < 0)
618  return 0;
619  }
620  return 1;
621 }
622 
623 static int filter_thread(void *arg);
624 
625 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
626 {
627  AVFilterContext *ctx = inout->filter_ctx;
628  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
629  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
630 
631  if (nb_pads > 1)
632  return av_strdup(ctx->filter->name);
633  return av_asprintf("%s:%s", ctx->filter->name,
634  avfilter_pad_get_name(pads, inout->pad_idx));
635 }
636 
637 static const char *ofilter_item_name(void *obj)
638 {
639  OutputFilterPriv *ofp = obj;
640  return ofp->log_name;
641 }
642 
643 static const AVClass ofilter_class = {
644  .class_name = "OutputFilter",
645  .version = LIBAVUTIL_VERSION_INT,
646  .item_name = ofilter_item_name,
647  .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
648  .category = AV_CLASS_CATEGORY_FILTER,
649 };
650 
652 {
653  OutputFilterPriv *ofp;
654  OutputFilter *ofilter;
655 
656  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
657  if (!ofp)
658  return NULL;
659 
660  ofilter = &ofp->ofilter;
661  ofilter->class = &ofilter_class;
662  ofp->log_parent = fg;
663  ofilter->graph = fg;
664  ofilter->type = type;
665  ofp->format = -1;
668  ofp->index = fg->nb_outputs - 1;
669 
670  snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
672 
673  return ofilter;
674 }
675 
676 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist,
677  const ViewSpecifier *vs)
678 {
679  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
680  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
682  int ret;
683 
684  av_assert0(!ifp->bound);
685  ifp->bound = 1;
686 
687  if (ifp->type != ist->par->codec_type &&
689  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
691  return AVERROR(EINVAL);
692  }
693 
694  ifp->type_src = ist->st->codecpar->codec_type;
695 
696  ifp->opts.fallback = av_frame_alloc();
697  if (!ifp->opts.fallback)
698  return AVERROR(ENOMEM);
699 
700  ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
701  vs, &ifp->opts, &src);
702  if (ret < 0)
703  return ret;
704 
705  ret = sch_connect(fgp->sch,
706  src, SCH_FILTER_IN(fgp->sch_idx, ifp->index));
707  if (ret < 0)
708  return ret;
709 
710  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
711  ifp->sub2video.frame = av_frame_alloc();
712  if (!ifp->sub2video.frame)
713  return AVERROR(ENOMEM);
714 
715  ifp->width = ifp->opts.sub2video_width;
716  ifp->height = ifp->opts.sub2video_height;
717 
718  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
719  palettes for all rectangles are identical or compatible */
720  ifp->format = AV_PIX_FMT_RGB32;
721 
722  ifp->time_base = AV_TIME_BASE_Q;
723 
724  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
725  ifp->width, ifp->height);
726  }
727 
728  return 0;
729 }
730 
732  const ViewSpecifier *vs)
733 {
736  int ret;
737 
738  av_assert0(!ifp->bound);
739  ifp->bound = 1;
740 
741  if (ifp->type != dec->type) {
742  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
744  return AVERROR(EINVAL);
745  }
746 
747  ifp->type_src = ifp->type;
748 
749  ret = dec_filter_add(dec, &ifp->ifilter, &ifp->opts, vs, &src);
750  if (ret < 0)
751  return ret;
752 
753  ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->index));
754  if (ret < 0)
755  return ret;
756 
757  return 0;
758 }
759 
760 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
761  const AVChannelLayout *layout_requested)
762 {
763  int i, err;
764 
765  if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
766  /* Pass the layout through for all orders but UNSPEC */
767  err = av_channel_layout_copy(&f->ch_layout, layout_requested);
768  if (err < 0)
769  return err;
770  return 0;
771  }
772 
773  /* Requested layout is of order UNSPEC */
774  if (!layouts_allowed) {
775  /* Use the default native layout for the requested amount of channels when the
776  encoder doesn't have a list of supported layouts */
777  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
778  return 0;
779  }
780  /* Encoder has a list of supported layouts. Pick the first layout in it with the
781  same amount of channels as the requested layout */
782  for (i = 0; layouts_allowed[i].nb_channels; i++) {
783  if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
784  break;
785  }
786  if (layouts_allowed[i].nb_channels) {
787  /* Use it if one is found */
788  err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
789  if (err < 0)
790  return err;
791  return 0;
792  }
793  /* If no layout for the amount of channels requested was found, use the default
794  native layout for it. */
795  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
796 
797  return 0;
798 }
799 
800 int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc,
801  const OutputFilterOptions *opts)
802 {
803  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
804  FilterGraph *fg = ofilter->graph;
805  FilterGraphPriv *fgp = fgp_from_fg(fg);
806  int ret;
807 
808  av_assert0(!ofilter->bound);
809  av_assert0(!opts->enc ||
810  ofilter->type == opts->enc->type);
811 
812  ofilter->bound = 1;
813  av_freep(&ofilter->linklabel);
814 
815  ofp->flags = opts->flags;
816  ofp->ts_offset = opts->ts_offset;
817  ofp->enc_timebase = opts->output_tb;
818 
819  ofp->trim_start_us = opts->trim_start_us;
820  ofp->trim_duration_us = opts->trim_duration_us;
821 
822  ofp->name = av_strdup(opts->name);
823  if (!ofp->name)
824  return AVERROR(EINVAL);
825 
826  ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
827  if (ret < 0)
828  return ret;
829 
830  ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
831  if (ret < 0)
832  return ret;
833 
834  if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
835  av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
836 
837  if (fgp->is_simple) {
838  // for simple filtergraph there is just one output,
839  // so use only graph-level information for logging
840  ofp->log_parent = NULL;
841  av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
842  } else
843  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
844 
845  switch (ofilter->type) {
846  case AVMEDIA_TYPE_VIDEO:
847  ofp->width = opts->width;
848  ofp->height = opts->height;
849  if (opts->format != AV_PIX_FMT_NONE) {
850  ofp->format = opts->format;
851  } else
852  ofp->formats = opts->formats;
853 
854  if (opts->color_space != AVCOL_SPC_UNSPECIFIED)
855  ofp->color_space = opts->color_space;
856  else
857  ofp->color_spaces = opts->color_spaces;
858 
859  if (opts->color_range != AVCOL_RANGE_UNSPECIFIED)
860  ofp->color_range = opts->color_range;
861  else
862  ofp->color_ranges = opts->color_ranges;
863 
865 
866  ofp->fps.last_frame = av_frame_alloc();
867  if (!ofp->fps.last_frame)
868  return AVERROR(ENOMEM);
869 
870  ofp->fps.vsync_method = opts->vsync_method;
871  ofp->fps.framerate = opts->frame_rate;
872  ofp->fps.framerate_max = opts->max_frame_rate;
873  ofp->fps.framerate_supported = opts->frame_rates;
874 
875  // reduce frame rate for mpeg4 to be within the spec limits
876  if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
877  ofp->fps.framerate_clip = 65535;
878 
879  ofp->fps.dup_warning = 1000;
880 
881  break;
882  case AVMEDIA_TYPE_AUDIO:
883  if (opts->format != AV_SAMPLE_FMT_NONE) {
884  ofp->format = opts->format;
885  } else {
886  ofp->formats = opts->formats;
887  }
888  if (opts->sample_rate) {
889  ofp->sample_rate = opts->sample_rate;
890  } else
891  ofp->sample_rates = opts->sample_rates;
892  if (opts->ch_layout.nb_channels) {
893  int ret = set_channel_layout(ofp, opts->ch_layouts, &opts->ch_layout);
894  if (ret < 0)
895  return ret;
896  } else {
897  ofp->ch_layouts = opts->ch_layouts;
898  }
899  break;
900  }
901 
902  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofp->index),
903  SCH_ENC(sched_idx_enc));
904  if (ret < 0)
905  return ret;
906 
907  return 0;
908 }
909 
911  const OutputFilterOptions *opts)
912 {
913  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
914 
915  av_assert0(!ofilter->bound);
916  av_assert0(ofilter->type == ifp->type);
917 
918  ofilter->bound = 1;
919  av_freep(&ofilter->linklabel);
920 
921  ofp->name = av_strdup(opts->name);
922  if (!ofp->name)
923  return AVERROR(EINVAL);
924 
925  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofp->name);
926 
927  return 0;
928 }
929 
930 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
931 {
933  OutputFilter *ofilter_src = fg_src->outputs[out_idx];
935  char name[32];
936  int ret;
937 
938  av_assert0(!ifp->bound);
939  ifp->bound = 1;
940 
941  if (ifp->type != ofilter_src->type) {
942  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
943  av_get_media_type_string(ofilter_src->type),
945  return AVERROR(EINVAL);
946  }
947 
948  ifp->type_src = ifp->type;
949 
950  memset(&opts, 0, sizeof(opts));
951 
952  snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->index);
953  opts.name = name;
954 
955  ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
956  if (ret < 0)
957  return ret;
958 
959  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
960  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
961  if (ret < 0)
962  return ret;
963 
964  return 0;
965 }
966 
968 {
969  InputFilterPriv *ifp;
970  InputFilter *ifilter;
971 
972  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
973  if (!ifp)
974  return NULL;
975 
976  ifilter = &ifp->ifilter;
977  ifilter->graph = fg;
978 
979  ifp->frame = av_frame_alloc();
980  if (!ifp->frame)
981  return NULL;
982 
983  ifp->index = fg->nb_inputs - 1;
984  ifp->format = -1;
987 
989  if (!ifp->frame_queue)
990  return NULL;
991 
992  return ifilter;
993 }
994 
995 void fg_free(FilterGraph **pfg)
996 {
997  FilterGraph *fg = *pfg;
998  FilterGraphPriv *fgp;
999 
1000  if (!fg)
1001  return;
1002  fgp = fgp_from_fg(fg);
1003 
1004  for (int j = 0; j < fg->nb_inputs; j++) {
1005  InputFilter *ifilter = fg->inputs[j];
1006  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1007 
1008  if (ifp->frame_queue) {
1009  AVFrame *frame;
1010  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
1011  av_frame_free(&frame);
1012  av_fifo_freep2(&ifp->frame_queue);
1013  }
1014  av_frame_free(&ifp->sub2video.frame);
1015 
1016  av_frame_free(&ifp->frame);
1017  av_frame_free(&ifp->opts.fallback);
1018 
1020  av_freep(&ifp->linklabel);
1021  av_freep(&ifp->opts.name);
1023  av_freep(&ifilter->name);
1024  av_freep(&fg->inputs[j]);
1025  }
1026  av_freep(&fg->inputs);
1027  for (int j = 0; j < fg->nb_outputs; j++) {
1028  OutputFilter *ofilter = fg->outputs[j];
1029  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1030 
1031  av_frame_free(&ofp->fps.last_frame);
1032  av_dict_free(&ofp->sws_opts);
1033  av_dict_free(&ofp->swr_opts);
1034 
1035  av_freep(&ofilter->linklabel);
1036  av_freep(&ofilter->name);
1037  av_freep(&ofilter->apad);
1038  av_freep(&ofp->name);
1041  av_freep(&fg->outputs[j]);
1042  }
1043  av_freep(&fg->outputs);
1044  av_freep(&fgp->graph_desc);
1045  av_freep(&fgp->nb_threads);
1046 
1047  av_frame_free(&fgp->frame);
1048  av_frame_free(&fgp->frame_enc);
1049 
1050  av_freep(pfg);
1051 }
1052 
1053 static const char *fg_item_name(void *obj)
1054 {
1055  const FilterGraphPriv *fgp = obj;
1056 
1057  return fgp->log_name;
1058 }
1059 
1060 static const AVClass fg_class = {
1061  .class_name = "FilterGraph",
1062  .version = LIBAVUTIL_VERSION_INT,
1063  .item_name = fg_item_name,
1064  .category = AV_CLASS_CATEGORY_FILTER,
1065 };
1066 
1067 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
1068 {
1069  FilterGraphPriv *fgp;
1070  FilterGraph *fg;
1071 
1073  AVFilterGraph *graph;
1074  int ret = 0;
1075 
1076  fgp = av_mallocz(sizeof(*fgp));
1077  if (!fgp) {
1078  av_freep(&graph_desc);
1079  return AVERROR(ENOMEM);
1080  }
1081  fg = &fgp->fg;
1082 
1083  if (pfg) {
1084  *pfg = fg;
1085  fg->index = -1;
1086  } else {
1088  if (ret < 0) {
1089  av_freep(&graph_desc);
1090  av_freep(&fgp);
1091  return ret;
1092  }
1093 
1094  fg->index = nb_filtergraphs - 1;
1095  }
1096 
1097  fg->class = &fg_class;
1098  fgp->graph_desc = graph_desc;
1100  fgp->sch = sch;
1101 
1102  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
1103 
1104  fgp->frame = av_frame_alloc();
1105  fgp->frame_enc = av_frame_alloc();
1106  if (!fgp->frame || !fgp->frame_enc)
1107  return AVERROR(ENOMEM);
1108 
1109  /* this graph is only used for determining the kinds of inputs
1110  * and outputs we have, and is discarded on exit from this function */
1111  graph = avfilter_graph_alloc();
1112  if (!graph)
1113  return AVERROR(ENOMEM);;
1114  graph->nb_threads = 1;
1115 
1116  ret = graph_parse(fg, graph, fgp->graph_desc, &inputs, &outputs,
1118  if (ret < 0)
1119  goto fail;
1120 
1121  for (unsigned i = 0; i < graph->nb_filters; i++) {
1122  const AVFilter *f = graph->filters[i]->filter;
1123  if ((!avfilter_filter_pad_count(f, 0) &&
1124  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) ||
1125  !strcmp(f->name, "apad")) {
1126  fgp->have_sources = 1;
1127  break;
1128  }
1129  }
1130 
1131  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1132  InputFilter *const ifilter = ifilter_alloc(fg);
1133  InputFilterPriv *ifp;
1134 
1135  if (!ifilter) {
1136  ret = AVERROR(ENOMEM);
1137  goto fail;
1138  }
1139 
1140  ifp = ifp_from_ifilter(ifilter);
1141  ifp->linklabel = cur->name;
1142  cur->name = NULL;
1143 
1144  ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1145  cur->pad_idx);
1146 
1147  if (ifp->type != AVMEDIA_TYPE_VIDEO && ifp->type != AVMEDIA_TYPE_AUDIO) {
1148  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1149  "currently.\n");
1150  ret = AVERROR(ENOSYS);
1151  goto fail;
1152  }
1153 
1154  ifilter->name = describe_filter_link(fg, cur, 1);
1155  if (!ifilter->name) {
1156  ret = AVERROR(ENOMEM);
1157  goto fail;
1158  }
1159  }
1160 
1161  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1162  const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1163  cur->pad_idx);
1164  OutputFilter *const ofilter = ofilter_alloc(fg, type);
1165 
1166  if (!ofilter) {
1167  ret = AVERROR(ENOMEM);
1168  goto fail;
1169  }
1170 
1171  ofilter->linklabel = cur->name;
1172  cur->name = NULL;
1173 
1174  ofilter->name = describe_filter_link(fg, cur, 0);
1175  if (!ofilter->name) {
1176  ret = AVERROR(ENOMEM);
1177  goto fail;
1178  }
1179  }
1180 
1181  if (!fg->nb_outputs) {
1182  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1183  ret = AVERROR(ENOSYS);
1184  goto fail;
1185  }
1186 
1187  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1188  filter_thread, fgp);
1189  if (ret < 0)
1190  goto fail;
1191  fgp->sch_idx = ret;
1192 
1193 fail:
1196  avfilter_graph_free(&graph);
1197 
1198  if (ret < 0)
1199  return ret;
1200 
1201  return 0;
1202 }
1203 
1205  InputStream *ist,
1206  char *graph_desc,
1207  Scheduler *sch, unsigned sched_idx_enc,
1208  const OutputFilterOptions *opts)
1209 {
1210  const enum AVMediaType type = ist->par->codec_type;
1211  FilterGraph *fg;
1212  FilterGraphPriv *fgp;
1213  int ret;
1214 
1215  ret = fg_create(pfg, graph_desc, sch);
1216  if (ret < 0)
1217  return ret;
1218  fg = *pfg;
1219  fgp = fgp_from_fg(fg);
1220 
1221  fgp->is_simple = 1;
1222 
1223  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1224  av_get_media_type_string(type)[0], opts->name);
1225 
1226  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1227  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1228  "to have exactly 1 input and 1 output. "
1229  "However, it had %d input(s) and %d output(s). Please adjust, "
1230  "or use a complex filtergraph (-filter_complex) instead.\n",
1231  graph_desc, fg->nb_inputs, fg->nb_outputs);
1232  return AVERROR(EINVAL);
1233  }
1234  if (fg->outputs[0]->type != type) {
1235  av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1236  "it to %s output stream\n",
1239  return AVERROR(EINVAL);
1240  }
1241 
1242  ret = ifilter_bind_ist(fg->inputs[0], ist, opts->vs);
1243  if (ret < 0)
1244  return ret;
1245 
1246  ret = ofilter_bind_enc(fg->outputs[0], sched_idx_enc, opts);
1247  if (ret < 0)
1248  return ret;
1249 
1250  if (opts->nb_threads) {
1251  av_freep(&fgp->nb_threads);
1252  fgp->nb_threads = av_strdup(opts->nb_threads);
1253  if (!fgp->nb_threads)
1254  return AVERROR(ENOMEM);
1255  }
1256 
1257  return 0;
1258 }
1259 
1261 {
1262  FilterGraphPriv *fgp = fgp_from_fg(fg);
1263  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1264  InputStream *ist = NULL;
1265  enum AVMediaType type = ifp->type;
1267  const char *spec;
1268  char *p;
1269  int i, ret;
1270 
1271  if (ifp->linklabel && !strncmp(ifp->linklabel, "dec:", 4)) {
1272  // bind to a standalone decoder
1273  int dec_idx;
1274 
1275  dec_idx = strtol(ifp->linklabel + 4, &p, 0);
1276  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1277  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1278  dec_idx, fgp->graph_desc);
1279  return AVERROR(EINVAL);
1280  }
1281 
1282  if (type == AVMEDIA_TYPE_VIDEO) {
1283  spec = *p == ':' ? p + 1 : p;
1284  ret = view_specifier_parse(&spec, &vs);
1285  if (ret < 0)
1286  return ret;
1287  }
1288 
1289  ret = ifilter_bind_dec(ifp, decoders[dec_idx], &vs);
1290  if (ret < 0)
1291  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1292  ifilter->name);
1293  return ret;
1294  } else if (ifp->linklabel) {
1296  AVFormatContext *s;
1297  AVStream *st = NULL;
1298  int file_idx;
1299 
1300  // try finding an unbound filtergraph output with this label
1301  for (int i = 0; i < nb_filtergraphs; i++) {
1302  FilterGraph *fg_src = filtergraphs[i];
1303 
1304  if (fg == fg_src)
1305  continue;
1306 
1307  for (int j = 0; j < fg_src->nb_outputs; j++) {
1308  OutputFilter *ofilter = fg_src->outputs[j];
1309 
1310  if (!ofilter->bound && ofilter->linklabel &&
1311  !strcmp(ofilter->linklabel, ifp->linklabel)) {
1312  av_log(fg, AV_LOG_VERBOSE,
1313  "Binding input with label '%s' to filtergraph output %d:%d\n",
1314  ifp->linklabel, i, j);
1315 
1316  ret = ifilter_bind_fg(ifp, fg_src, j);
1317  if (ret < 0)
1318  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1319  ifp->linklabel);
1320  return ret;
1321  }
1322  }
1323  }
1324 
1325  // bind to an explicitly specified demuxer stream
1326  file_idx = strtol(ifp->linklabel, &p, 0);
1327  if (file_idx < 0 || file_idx >= nb_input_files) {
1328  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1329  file_idx, fgp->graph_desc);
1330  return AVERROR(EINVAL);
1331  }
1332  s = input_files[file_idx]->ctx;
1333 
1334  ret = stream_specifier_parse(&ss, *p == ':' ? p + 1 : p, 1, fg);
1335  if (ret < 0) {
1336  av_log(fg, AV_LOG_ERROR, "Invalid stream specifier: %s\n", p);
1337  return ret;
1338  }
1339 
1340  if (type == AVMEDIA_TYPE_VIDEO) {
1341  spec = ss.remainder ? ss.remainder : "";
1342  ret = view_specifier_parse(&spec, &vs);
1343  if (ret < 0) {
1345  return ret;
1346  }
1347  }
1348 
1349  for (i = 0; i < s->nb_streams; i++) {
1350  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1351  if (stream_type != type &&
1352  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1353  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1354  continue;
1355  if (stream_specifier_match(&ss, s, s->streams[i], fg)) {
1356  st = s->streams[i];
1357  break;
1358  }
1359  }
1361  if (!st) {
1362  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1363  "matches no streams.\n", p, fgp->graph_desc);
1364  return AVERROR(EINVAL);
1365  }
1366  ist = input_files[file_idx]->streams[st->index];
1367 
1368  av_log(fg, AV_LOG_VERBOSE,
1369  "Binding input with label '%s' to input stream %d:%d\n",
1370  ifp->linklabel, ist->file->index, ist->index);
1371  } else {
1372  ist = ist_find_unused(type);
1373  if (!ist) {
1374  av_log(fg, AV_LOG_FATAL,
1375  "Cannot find an unused %s input stream to feed the "
1376  "unlabeled input pad %s.\n",
1377  av_get_media_type_string(type), ifilter->name);
1378  return AVERROR(EINVAL);
1379  }
1380 
1381  av_log(fg, AV_LOG_VERBOSE,
1382  "Binding unlabeled input %d to input stream %d:%d\n",
1383  ifp->index, ist->file->index, ist->index);
1384  }
1385  av_assert0(ist);
1386 
1387  ret = ifilter_bind_ist(ifilter, ist, &vs);
1388  if (ret < 0) {
1389  av_log(fg, AV_LOG_ERROR,
1390  "Error binding an input stream to complex filtergraph input %s.\n",
1391  ifilter->name);
1392  return ret;
1393  }
1394 
1395  return 0;
1396 }
1397 
1398 static int bind_inputs(FilterGraph *fg)
1399 {
1400  // bind filtergraph inputs to input streams or other filtergraphs
1401  for (int i = 0; i < fg->nb_inputs; i++) {
1403  int ret;
1404 
1405  if (ifp->bound)
1406  continue;
1407 
1408  ret = fg_complex_bind_input(fg, &ifp->ifilter);
1409  if (ret < 0)
1410  return ret;
1411  }
1412 
1413  return 0;
1414 }
1415 
1417 {
1418  int ret;
1419 
1420  for (int i = 0; i < nb_filtergraphs; i++) {
1422  if (ret < 0)
1423  return ret;
1424  }
1425 
1426  // check that all outputs were bound
1427  for (int i = 0; i < nb_filtergraphs; i++) {
1428  FilterGraph *fg = filtergraphs[i];
1429 
1430  for (int j = 0; j < fg->nb_outputs; j++) {
1431  OutputFilter *output = fg->outputs[j];
1432  if (!output->bound) {
1433  av_log(fg, AV_LOG_FATAL,
1434  "Filter '%s' has output %d (%s) unconnected\n",
1435  output->name, j,
1436  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1437  return AVERROR(EINVAL);
1438  }
1439  }
1440  }
1441 
1442  return 0;
1443 }
1444 
1445 static int insert_trim(void *logctx, int64_t start_time, int64_t duration,
1446  AVFilterContext **last_filter, int *pad_idx,
1447  const char *filter_name)
1448 {
1449  AVFilterGraph *graph = (*last_filter)->graph;
1451  const AVFilter *trim;
1452  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1453  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1454  int ret = 0;
1455 
1456  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1457  return 0;
1458 
1459  trim = avfilter_get_by_name(name);
1460  if (!trim) {
1461  av_log(logctx, AV_LOG_ERROR, "%s filter not present, cannot limit "
1462  "recording time.\n", name);
1463  return AVERROR_FILTER_NOT_FOUND;
1464  }
1465 
1466  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1467  if (!ctx)
1468  return AVERROR(ENOMEM);
1469 
1470  if (duration != INT64_MAX) {
1471  ret = av_opt_set_int(ctx, "durationi", duration,
1473  }
1474  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1475  ret = av_opt_set_int(ctx, "starti", start_time,
1477  }
1478  if (ret < 0) {
1479  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1480  return ret;
1481  }
1482 
1484  if (ret < 0)
1485  return ret;
1486 
1487  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1488  if (ret < 0)
1489  return ret;
1490 
1491  *last_filter = ctx;
1492  *pad_idx = 0;
1493  return 0;
1494 }
1495 
1496 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1497  const char *filter_name, const char *args)
1498 {
1499  AVFilterGraph *graph = (*last_filter)->graph;
1500  const AVFilter *filter = avfilter_get_by_name(filter_name);
1502  int ret;
1503 
1504  if (!filter)
1505  return AVERROR_BUG;
1506 
1508  filter,
1509  filter_name, args, NULL, graph);
1510  if (ret < 0)
1511  return ret;
1512 
1513  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1514  if (ret < 0)
1515  return ret;
1516 
1517  *last_filter = ctx;
1518  *pad_idx = 0;
1519  return 0;
1520 }
1521 
1523  OutputFilter *ofilter, AVFilterInOut *out)
1524 {
1525  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1526  AVFilterContext *last_filter = out->filter_ctx;
1527  AVBPrint bprint;
1528  int pad_idx = out->pad_idx;
1529  int ret;
1530  char name[255];
1531 
1532  snprintf(name, sizeof(name), "out_%s", ofp->name);
1534  avfilter_get_by_name("buffersink"),
1535  name, NULL, NULL, graph);
1536 
1537  if (ret < 0)
1538  return ret;
1539 
1540  if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE)) {
1541  char args[255];
1543  const AVDictionaryEntry *e = NULL;
1544 
1545  snprintf(args, sizeof(args), "%d:%d",
1546  ofp->width, ofp->height);
1547 
1548  while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1549  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1550  }
1551 
1552  snprintf(name, sizeof(name), "scaler_out_%s", ofp->name);
1554  name, args, NULL, graph)) < 0)
1555  return ret;
1556  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1557  return ret;
1558 
1559  last_filter = filter;
1560  pad_idx = 0;
1561  }
1562 
1564  ofp->format != AV_PIX_FMT_NONE || !ofp->formats);
1566  choose_pix_fmts(ofp, &bprint);
1567  choose_color_spaces(ofp, &bprint);
1568  choose_color_ranges(ofp, &bprint);
1569  if (!av_bprint_is_complete(&bprint))
1570  return AVERROR(ENOMEM);
1571 
1572  if (bprint.len) {
1574 
1576  avfilter_get_by_name("format"),
1577  "format", bprint.str, NULL, graph);
1578  av_bprint_finalize(&bprint, NULL);
1579  if (ret < 0)
1580  return ret;
1581  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1582  return ret;
1583 
1584  last_filter = filter;
1585  pad_idx = 0;
1586  }
1587 
1588  snprintf(name, sizeof(name), "trim_out_%s", ofp->name);
1589  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1590  &last_filter, &pad_idx, name);
1591  if (ret < 0)
1592  return ret;
1593 
1594 
1595  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1596  return ret;
1597 
1598  return 0;
1599 }
1600 
1602  OutputFilter *ofilter, AVFilterInOut *out)
1603 {
1604  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1605  AVFilterContext *last_filter = out->filter_ctx;
1606  int pad_idx = out->pad_idx;
1607  AVBPrint args;
1608  char name[255];
1609  int ret;
1610 
1611  snprintf(name, sizeof(name), "out_%s", ofp->name);
1613  avfilter_get_by_name("abuffersink"),
1614  name, NULL, NULL, graph);
1615  if (ret < 0)
1616  return ret;
1617 
1618 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1619  AVFilterContext *filt_ctx; \
1620  \
1621  av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1622  "similarly to -af " filter_name "=%s.\n", arg); \
1623  \
1624  ret = avfilter_graph_create_filter(&filt_ctx, \
1625  avfilter_get_by_name(filter_name), \
1626  filter_name, arg, NULL, graph); \
1627  if (ret < 0) \
1628  goto fail; \
1629  \
1630  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1631  if (ret < 0) \
1632  goto fail; \
1633  \
1634  last_filter = filt_ctx; \
1635  pad_idx = 0; \
1636 } while (0)
1638 
1639  choose_sample_fmts(ofp, &args);
1640  choose_sample_rates(ofp, &args);
1641  choose_channel_layouts(ofp, &args);
1642  if (!av_bprint_is_complete(&args)) {
1643  ret = AVERROR(ENOMEM);
1644  goto fail;
1645  }
1646  if (args.len) {
1648 
1649  snprintf(name, sizeof(name), "format_out_%s", ofp->name);
1651  avfilter_get_by_name("aformat"),
1652  name, args.str, NULL, graph);
1653  if (ret < 0)
1654  goto fail;
1655 
1656  ret = avfilter_link(last_filter, pad_idx, format, 0);
1657  if (ret < 0)
1658  goto fail;
1659 
1660  last_filter = format;
1661  pad_idx = 0;
1662  }
1663 
1664  if (ofilter->apad) {
1665  AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1666  fgp->have_sources = 1;
1667  }
1668 
1669  snprintf(name, sizeof(name), "trim for output %s", ofp->name);
1670  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1671  &last_filter, &pad_idx, name);
1672  if (ret < 0)
1673  goto fail;
1674 
1675  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1676  goto fail;
1677 fail:
1678  av_bprint_finalize(&args, NULL);
1679 
1680  return ret;
1681 }
1682 
1684  OutputFilter *ofilter, AVFilterInOut *out)
1685 {
1686  switch (ofilter->type) {
1687  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fgp, graph, ofilter, out);
1688  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fgp, graph, ofilter, out);
1689  default: av_assert0(0); return 0;
1690  }
1691 }
1692 
1694 {
1695  ifp->sub2video.last_pts = INT64_MIN;
1696  ifp->sub2video.end_pts = INT64_MIN;
1697 
1698  /* sub2video structure has been (re-)initialized.
1699  Mark it as such so that the system will be
1700  initialized with the first received heartbeat. */
1701  ifp->sub2video.initialize = 1;
1702 }
1703 
1705  InputFilter *ifilter, AVFilterInOut *in)
1706 {
1707  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1708 
1709  AVFilterContext *last_filter;
1710  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1711  const AVPixFmtDescriptor *desc;
1712  char name[255];
1713  int ret, pad_idx = 0;
1715  if (!par)
1716  return AVERROR(ENOMEM);
1717 
1718  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1719  sub2video_prepare(ifp);
1720 
1721  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1722  ifp->opts.name);
1723 
1724  ifp->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name);
1725  if (!ifp->filter) {
1726  ret = AVERROR(ENOMEM);
1727  goto fail;
1728  }
1729 
1730  par->format = ifp->format;
1731  par->time_base = ifp->time_base;
1732  par->frame_rate = ifp->opts.framerate;
1733  par->width = ifp->width;
1734  par->height = ifp->height;
1735  par->sample_aspect_ratio = ifp->sample_aspect_ratio.den > 0 ?
1736  ifp->sample_aspect_ratio : (AVRational){ 0, 1 };
1737  par->color_space = ifp->color_space;
1738  par->color_range = ifp->color_range;
1739  par->hw_frames_ctx = ifp->hw_frames_ctx;
1740  par->side_data = ifp->side_data;
1741  par->nb_side_data = ifp->nb_side_data;
1742 
1743  ret = av_buffersrc_parameters_set(ifp->filter, par);
1744  if (ret < 0)
1745  goto fail;
1746  av_freep(&par);
1747 
1748  ret = avfilter_init_dict(ifp->filter, NULL);
1749  if (ret < 0)
1750  goto fail;
1751 
1752  last_filter = ifp->filter;
1753 
1755  av_assert0(desc);
1756 
1757  if ((ifp->opts.flags & IFILTER_FLAG_CROP)) {
1758  char crop_buf[64];
1759  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1760  ifp->opts.crop_left, ifp->opts.crop_right,
1761  ifp->opts.crop_top, ifp->opts.crop_bottom,
1762  ifp->opts.crop_left, ifp->opts.crop_top);
1763  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1764  if (ret < 0)
1765  return ret;
1766  }
1767 
1768  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1769  ifp->displaymatrix_applied = 0;
1770  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1771  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1772  int32_t *displaymatrix = ifp->displaymatrix;
1773  double theta;
1774 
1775  theta = get_rotation(displaymatrix);
1776 
1777  if (fabs(theta - 90) < 1.0) {
1778  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1779  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1780  } else if (fabs(theta - 180) < 1.0) {
1781  if (displaymatrix[0] < 0) {
1782  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1783  if (ret < 0)
1784  return ret;
1785  }
1786  if (displaymatrix[4] < 0) {
1787  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1788  }
1789  } else if (fabs(theta - 270) < 1.0) {
1790  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1791  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1792  } else if (fabs(theta) > 1.0) {
1793  char rotate_buf[64];
1794  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1795  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1796  } else if (fabs(theta) < 1.0) {
1797  if (displaymatrix && displaymatrix[4] < 0) {
1798  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1799  }
1800  }
1801  if (ret < 0)
1802  return ret;
1803 
1804  ifp->displaymatrix_applied = 1;
1805  }
1806 
1807  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1808  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1809  &last_filter, &pad_idx, name);
1810  if (ret < 0)
1811  return ret;
1812 
1813  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1814  return ret;
1815  return 0;
1816 fail:
1817  av_freep(&par);
1818 
1819  return ret;
1820 }
1821 
1823  InputFilter *ifilter, AVFilterInOut *in)
1824 {
1825  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1826  AVFilterContext *last_filter;
1827  AVBufferSrcParameters *par;
1828  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1829  AVBPrint args;
1830  char name[255];
1831  int ret, pad_idx = 0;
1832 
1834  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1835  ifp->time_base.num, ifp->time_base.den,
1836  ifp->sample_rate,
1838  if (av_channel_layout_check(&ifp->ch_layout) &&
1840  av_bprintf(&args, ":channel_layout=");
1842  } else
1843  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1844  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1845 
1846  if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1847  name, args.str, NULL,
1848  graph)) < 0)
1849  return ret;
1851  if (!par)
1852  return AVERROR(ENOMEM);
1853  par->side_data = ifp->side_data;
1854  par->nb_side_data = ifp->nb_side_data;
1855  ret = av_buffersrc_parameters_set(ifp->filter, par);
1856  av_free(par);
1857  if (ret < 0)
1858  return ret;
1859  last_filter = ifp->filter;
1860 
1861  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1862  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1863  &last_filter, &pad_idx, name);
1864  if (ret < 0)
1865  return ret;
1866 
1867  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1868  return ret;
1869 
1870  return 0;
1871 }
1872 
1874  InputFilter *ifilter, AVFilterInOut *in)
1875 {
1876  switch (ifp_from_ifilter(ifilter)->type) {
1877  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1878  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1879  default: av_assert0(0); return 0;
1880  }
1881 }
1882 
1884 {
1885  for (int i = 0; i < fg->nb_outputs; i++)
1887  for (int i = 0; i < fg->nb_inputs; i++)
1888  ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1889  avfilter_graph_free(&fgt->graph);
1890 }
1891 
1893 {
1894  return f->nb_inputs == 0 &&
1895  (!strcmp(f->filter->name, "buffer") ||
1896  !strcmp(f->filter->name, "abuffer"));
1897 }
1898 
1899 static int graph_is_meta(AVFilterGraph *graph)
1900 {
1901  for (unsigned i = 0; i < graph->nb_filters; i++) {
1902  const AVFilterContext *f = graph->filters[i];
1903 
1904  /* in addition to filters flagged as meta, also
1905  * disregard sinks and buffersources (but not other sources,
1906  * since they introduce data we are not aware of)
1907  */
1908  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1909  f->nb_outputs == 0 ||
1911  return 0;
1912  }
1913  return 1;
1914 }
1915 
1916 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1917 
1919 {
1920  FilterGraphPriv *fgp = fgp_from_fg(fg);
1921  AVBufferRef *hw_device;
1922  AVFilterInOut *inputs, *outputs, *cur;
1923  int ret = AVERROR_BUG, i, simple = filtergraph_is_simple(fg);
1924  int have_input_eof = 0;
1925  const char *graph_desc = fgp->graph_desc;
1926 
1927  cleanup_filtergraph(fg, fgt);
1928  fgt->graph = avfilter_graph_alloc();
1929  if (!fgt->graph)
1930  return AVERROR(ENOMEM);
1931 
1932  if (simple) {
1933  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
1934 
1935  if (filter_nbthreads) {
1936  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1937  if (ret < 0)
1938  goto fail;
1939  } else if (fgp->nb_threads) {
1940  ret = av_opt_set(fgt->graph, "threads", fgp->nb_threads, 0);
1941  if (ret < 0)
1942  return ret;
1943  }
1944 
1945  if (av_dict_count(ofp->sws_opts)) {
1947  &fgt->graph->scale_sws_opts,
1948  '=', ':');
1949  if (ret < 0)
1950  goto fail;
1951  }
1952 
1953  if (av_dict_count(ofp->swr_opts)) {
1954  char *args;
1955  ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
1956  if (ret < 0)
1957  goto fail;
1958  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1959  av_free(args);
1960  }
1961  } else {
1963  }
1964 
1965  hw_device = hw_device_for_filter();
1966 
1967  ret = graph_parse(fg, fgt->graph, graph_desc, &inputs, &outputs, hw_device);
1968  if (ret < 0)
1969  goto fail;
1970 
1971  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1972  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1975  goto fail;
1976  }
1978 
1979  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1980  ret = configure_output_filter(fgp, fgt->graph, fg->outputs[i], cur);
1981  if (ret < 0) {
1983  goto fail;
1984  }
1985  }
1987 
1988  if (fgp->disable_conversions)
1990  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
1991  goto fail;
1992 
1993  fgp->is_meta = graph_is_meta(fgt->graph);
1994 
1995  /* limit the lists of allowed formats to the ones selected, to
1996  * make sure they stay the same if the filtergraph is reconfigured later */
1997  for (int i = 0; i < fg->nb_outputs; i++) {
1998  const AVFrameSideData *const *sd;
1999  int nb_sd;
2000  OutputFilter *ofilter = fg->outputs[i];
2001  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
2002  AVFilterContext *sink = ofp->filter;
2003 
2004  ofp->format = av_buffersink_get_format(sink);
2005 
2006  ofp->width = av_buffersink_get_w(sink);
2007  ofp->height = av_buffersink_get_h(sink);
2010 
2011  // If the timing parameters are not locked yet, get the tentative values
2012  // here but don't lock them. They will only be used if no output frames
2013  // are ever produced.
2014  if (!ofp->tb_out_locked) {
2016  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
2017  fr.num > 0 && fr.den > 0)
2018  ofp->fps.framerate = fr;
2019  ofp->tb_out = av_buffersink_get_time_base(sink);
2020  }
2022 
2025  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
2026  if (ret < 0)
2027  goto fail;
2029  sd = av_buffersink_get_side_data(sink, &nb_sd);
2030  if (nb_sd)
2031  for (int j = 0; j < nb_sd; j++) {
2033  sd[j], 0);
2034  if (ret < 0) {
2036  goto fail;
2037  }
2038  }
2039  }
2040 
2041  for (int i = 0; i < fg->nb_inputs; i++) {
2043  AVFrame *tmp;
2044  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
2045  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2046  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
2047  } else {
2049  }
2050  av_frame_free(&tmp);
2051  if (ret < 0)
2052  goto fail;
2053  }
2054  }
2055 
2056  /* send the EOFs for the finished inputs */
2057  for (int i = 0; i < fg->nb_inputs; i++) {
2059  if (fgt->eof_in[i]) {
2061  if (ret < 0)
2062  goto fail;
2063  have_input_eof = 1;
2064  }
2065  }
2066 
2067  if (have_input_eof) {
2068  // make sure the EOF propagates to the end of the graph
2070  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2071  goto fail;
2072  }
2073 
2074  return 0;
2075 fail:
2076  cleanup_filtergraph(fg, fgt);
2077  return ret;
2078 }
2079 
2081 {
2082  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2083  AVFrameSideData *sd;
2084  int ret;
2085 
2086  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
2087  if (ret < 0)
2088  return ret;
2089 
2090  ifp->time_base = (ifp->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
2091  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
2092  frame->time_base;
2093 
2094  ifp->format = frame->format;
2095 
2096  ifp->width = frame->width;
2097  ifp->height = frame->height;
2098  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
2099  ifp->color_space = frame->colorspace;
2100  ifp->color_range = frame->color_range;
2101 
2102  ifp->sample_rate = frame->sample_rate;
2103  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
2104  if (ret < 0)
2105  return ret;
2106 
2108  for (int i = 0; i < frame->nb_side_data; i++) {
2109  const AVSideDataDescriptor *desc = av_frame_side_data_desc(frame->side_data[i]->type);
2110 
2111  if (!(desc->props & AV_SIDE_DATA_PROP_GLOBAL))
2112  continue;
2113 
2115  &ifp->nb_side_data,
2116  frame->side_data[i], 0);
2117  if (ret < 0)
2118  return ret;
2119  }
2120 
2122  if (sd)
2123  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
2124  ifp->displaymatrix_present = !!sd;
2125 
2126  /* Copy downmix related side data to InputFilterPriv so it may be propagated
2127  * to the filter chain even though it's not "global", as filters like aresample
2128  * require this information during init and not when remixing a frame */
2130  if (sd) {
2132  &ifp->nb_side_data, sd, 0);
2133  if (ret < 0)
2134  return ret;
2135  memcpy(&ifp->downmixinfo, sd->data, sizeof(ifp->downmixinfo));
2136  }
2137  ifp->downmixinfo_present = !!sd;
2138 
2139  return 0;
2140 }
2141 
2143 {
2144  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
2145  return fgp->is_simple;
2146 }
2147 
2148 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
2149  double time, const char *target,
2150  const char *command, const char *arg, int all_filters)
2151 {
2152  int ret;
2153 
2154  if (!graph)
2155  return;
2156 
2157  if (time < 0) {
2158  char response[4096];
2159  ret = avfilter_graph_send_command(graph, target, command, arg,
2160  response, sizeof(response),
2161  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
2162  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
2163  fg->index, ret, response);
2164  } else if (!all_filters) {
2165  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
2166  } else {
2167  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
2168  if (ret < 0)
2169  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
2170  }
2171 }
2172 
2173 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
2174 {
2175  int nb_requests, nb_requests_max = -1;
2176  int best_input = -1;
2177 
2178  for (int i = 0; i < fg->nb_inputs; i++) {
2179  InputFilter *ifilter = fg->inputs[i];
2180  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2181 
2182  if (fgt->eof_in[i])
2183  continue;
2184 
2185  nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
2186  if (nb_requests > nb_requests_max) {
2187  nb_requests_max = nb_requests;
2188  best_input = i;
2189  }
2190  }
2191 
2192  av_assert0(best_input >= 0);
2193 
2194  return best_input;
2195 }
2196 
2198 {
2199  OutputFilter *ofilter = &ofp->ofilter;
2200  FPSConvContext *fps = &ofp->fps;
2201  AVRational tb = (AVRational){ 0, 0 };
2202  AVRational fr;
2203  const FrameData *fd;
2204 
2205  fd = frame_data_c(frame);
2206 
2207  // apply -enc_time_base
2208  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2209  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2210  av_log(ofp, AV_LOG_ERROR,
2211  "Demuxing timebase not available - cannot use it for encoding\n");
2212  return AVERROR(EINVAL);
2213  }
2214 
2215  switch (ofp->enc_timebase.num) {
2216  case 0: break;
2217  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2218  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2219  default: tb = ofp->enc_timebase; break;
2220  }
2221 
2222  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2223  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2224  goto finish;
2225  }
2226 
2227  fr = fps->framerate;
2228  if (!fr.num) {
2230  if (fr_sink.num > 0 && fr_sink.den > 0)
2231  fr = fr_sink;
2232  }
2233 
2234  if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2235  if (!fr.num && !fps->framerate_max.num) {
2236  fr = (AVRational){25, 1};
2237  av_log(ofp, AV_LOG_WARNING,
2238  "No information "
2239  "about the input framerate is available. Falling "
2240  "back to a default value of 25fps. Use the -r option "
2241  "if you want a different framerate.\n");
2242  }
2243 
2244  if (fps->framerate_max.num &&
2245  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2246  !fr.den))
2247  fr = fps->framerate_max;
2248  }
2249 
2250  if (fr.num > 0) {
2251  if (fps->framerate_supported) {
2252  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2253  fr = fps->framerate_supported[idx];
2254  }
2255  if (fps->framerate_clip) {
2256  av_reduce(&fr.num, &fr.den,
2257  fr.num, fr.den, fps->framerate_clip);
2258  }
2259  }
2260 
2261  if (!(tb.num > 0 && tb.den > 0))
2262  tb = av_inv_q(fr);
2263  if (!(tb.num > 0 && tb.den > 0))
2264  tb = frame->time_base;
2265 
2266  fps->framerate = fr;
2267 finish:
2268  ofp->tb_out = tb;
2269  ofp->tb_out_locked = 1;
2270 
2271  return 0;
2272 }
2273 
2274 static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame,
2275  AVRational tb_dst, int64_t start_time)
2276 {
2277  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2278 
2279  AVRational tb = tb_dst;
2280  AVRational filter_tb = frame->time_base;
2281  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2282 
2283  if (frame->pts == AV_NOPTS_VALUE)
2284  goto early_exit;
2285 
2286  tb.den <<= extra_bits;
2287  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2289  float_pts /= 1 << extra_bits;
2290  // when float_pts is not exactly an integer,
2291  // avoid exact midpoints to reduce the chance of rounding differences, this
2292  // can be removed in case the fps code is changed to work with integers
2293  if (float_pts != llrint(float_pts))
2294  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2295 
2296  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2298  frame->time_base = tb_dst;
2299 
2300 early_exit:
2301 
2302  if (debug_ts) {
2303  av_log(logctx, AV_LOG_INFO,
2304  "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2305  frame ? av_ts2str(frame->pts) : "NULL",
2306  av_ts2timestr(frame->pts, &tb_dst),
2307  float_pts, tb_dst.num, tb_dst.den);
2308  }
2309 
2310  return float_pts;
2311 }
2312 
2313 /* Convert frame timestamps to the encoder timebase and decide how many times
2314  * should this (and possibly previous) frame be repeated in order to conform to
2315  * desired target framerate (if any).
2316  */
2318  int64_t *nb_frames, int64_t *nb_frames_prev)
2319 {
2320  OutputFilter *ofilter = &ofp->ofilter;
2321  FPSConvContext *fps = &ofp->fps;
2322  double delta0, delta, sync_ipts, duration;
2323 
2324  if (!frame) {
2325  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2326  fps->frames_prev_hist[1],
2327  fps->frames_prev_hist[2]);
2328 
2329  if (!*nb_frames && fps->last_dropped) {
2330  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2331  fps->last_dropped++;
2332  }
2333 
2334  goto finish;
2335  }
2336 
2337  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2338 
2339  sync_ipts = adjust_frame_pts_to_encoder_tb(ofilter->graph, frame,
2340  ofp->tb_out, ofp->ts_offset);
2341  /* delta0 is the "drift" between the input frame and
2342  * where it would fall in the output. */
2343  delta0 = sync_ipts - ofp->next_pts;
2344  delta = delta0 + duration;
2345 
2346  // tracks the number of times the PREVIOUS frame should be duplicated,
2347  // mostly for variable framerate (VFR)
2348  *nb_frames_prev = 0;
2349  /* by default, we output a single frame */
2350  *nb_frames = 1;
2351 
2352  if (delta0 < 0 &&
2353  delta > 0 &&
2356  && fps->vsync_method != VSYNC_DROP
2357 #endif
2358  ) {
2359  if (delta0 < -0.6) {
2360  av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2361  } else
2362  av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2363  sync_ipts = ofp->next_pts;
2364  duration += delta0;
2365  delta0 = 0;
2366  }
2367 
2368  switch (fps->vsync_method) {
2369  case VSYNC_VSCFR:
2370  if (fps->frame_number == 0 && delta0 >= 0.5) {
2371  av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2372  delta = duration;
2373  delta0 = 0;
2374  ofp->next_pts = llrint(sync_ipts);
2375  }
2376  case VSYNC_CFR:
2377  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2378  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2379  *nb_frames = 0;
2380  } else if (delta < -1.1)
2381  *nb_frames = 0;
2382  else if (delta > 1.1) {
2383  *nb_frames = llrintf(delta);
2384  if (delta0 > 1.1)
2385  *nb_frames_prev = llrintf(delta0 - 0.6);
2386  }
2387  frame->duration = 1;
2388  break;
2389  case VSYNC_VFR:
2390  if (delta <= -0.6)
2391  *nb_frames = 0;
2392  else if (delta > 0.6)
2393  ofp->next_pts = llrint(sync_ipts);
2394  frame->duration = llrint(duration);
2395  break;
2396 #if FFMPEG_OPT_VSYNC_DROP
2397  case VSYNC_DROP:
2398 #endif
2399  case VSYNC_PASSTHROUGH:
2400  ofp->next_pts = llrint(sync_ipts);
2401  frame->duration = llrint(duration);
2402  break;
2403  default:
2404  av_assert0(0);
2405  }
2406 
2407 finish:
2408  memmove(fps->frames_prev_hist + 1,
2409  fps->frames_prev_hist,
2410  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2411  fps->frames_prev_hist[0] = *nb_frames_prev;
2412 
2413  if (*nb_frames_prev == 0 && fps->last_dropped) {
2414  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2415  av_log(ofp, AV_LOG_VERBOSE,
2416  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2417  fps->frame_number, fps->last_frame->pts);
2418  }
2419  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2420  uint64_t nb_frames_dup;
2421  if (*nb_frames > dts_error_threshold * 30) {
2422  av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2423  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2424  *nb_frames = 0;
2425  return;
2426  }
2427  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2428  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2429  av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2430  if (nb_frames_dup > fps->dup_warning) {
2431  av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2432  fps->dup_warning *= 10;
2433  }
2434  }
2435 
2436  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2437  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2438 }
2439 
2441 {
2443  int ret;
2444 
2445  // we are finished and no frames were ever seen at this output,
2446  // at least initialize the encoder with a dummy frame
2447  if (!fgt->got_frame) {
2448  AVFrame *frame = fgt->frame;
2449  FrameData *fd;
2450 
2451  frame->time_base = ofp->tb_out;
2452  frame->format = ofp->format;
2453 
2454  frame->width = ofp->width;
2455  frame->height = ofp->height;
2456  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2457 
2458  frame->sample_rate = ofp->sample_rate;
2459  if (ofp->ch_layout.nb_channels) {
2460  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2461  if (ret < 0)
2462  return ret;
2463  }
2464  av_frame_side_data_free(&frame->side_data, &frame->nb_side_data);
2465  ret = clone_side_data(&frame->side_data, &frame->nb_side_data,
2466  ofp->side_data, ofp->nb_side_data, 0);
2467  if (ret < 0)
2468  return ret;
2469 
2470  fd = frame_data(frame);
2471  if (!fd)
2472  return AVERROR(ENOMEM);
2473 
2474  fd->frame_rate_filter = ofp->fps.framerate;
2475 
2476  av_assert0(!frame->buf[0]);
2477 
2478  av_log(ofp, AV_LOG_WARNING,
2479  "No filtered frames for output stream, trying to "
2480  "initialize anyway.\n");
2481 
2482  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame);
2483  if (ret < 0) {
2485  return ret;
2486  }
2487  }
2488 
2489  fgt->eof_out[ofp->index] = 1;
2490 
2491  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, NULL);
2492  return (ret == AVERROR_EOF) ? 0 : ret;
2493 }
2494 
2496  AVFrame *frame)
2497 {
2499  AVFrame *frame_prev = ofp->fps.last_frame;
2500  enum AVMediaType type = ofp->ofilter.type;
2501 
2502  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2503 
2504  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2505  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2506 
2507  for (int64_t i = 0; i < nb_frames; i++) {
2508  AVFrame *frame_out;
2509  int ret;
2510 
2511  if (type == AVMEDIA_TYPE_VIDEO) {
2512  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2513  frame_prev : frame;
2514  if (!frame_in)
2515  break;
2516 
2517  frame_out = fgp->frame_enc;
2518  ret = av_frame_ref(frame_out, frame_in);
2519  if (ret < 0)
2520  return ret;
2521 
2522  frame_out->pts = ofp->next_pts;
2523 
2524  if (ofp->fps.dropped_keyframe) {
2525  frame_out->flags |= AV_FRAME_FLAG_KEY;
2526  ofp->fps.dropped_keyframe = 0;
2527  }
2528  } else {
2529  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2530  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2532 
2533  frame->time_base = ofp->tb_out;
2534  frame->duration = av_rescale_q(frame->nb_samples,
2535  (AVRational){ 1, frame->sample_rate },
2536  ofp->tb_out);
2537 
2538  ofp->next_pts = frame->pts + frame->duration;
2539 
2540  frame_out = frame;
2541  }
2542 
2543  // send the frame to consumers
2544  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame_out);
2545  if (ret < 0) {
2546  av_frame_unref(frame_out);
2547 
2548  if (!fgt->eof_out[ofp->index]) {
2549  fgt->eof_out[ofp->index] = 1;
2550  fgp->nb_outputs_done++;
2551  }
2552 
2553  return ret == AVERROR_EOF ? 0 : ret;
2554  }
2555 
2556  if (type == AVMEDIA_TYPE_VIDEO) {
2557  ofp->fps.frame_number++;
2558  ofp->next_pts++;
2559 
2560  if (i == nb_frames_prev && frame)
2561  frame->flags &= ~AV_FRAME_FLAG_KEY;
2562  }
2563 
2564  fgt->got_frame = 1;
2565  }
2566 
2567  if (frame && frame_prev) {
2568  av_frame_unref(frame_prev);
2569  av_frame_move_ref(frame_prev, frame);
2570  }
2571 
2572  if (!frame)
2573  return close_output(ofp, fgt);
2574 
2575  return 0;
2576 }
2577 
2579  AVFrame *frame)
2580 {
2582  AVFilterContext *filter = ofp->filter;
2583  FrameData *fd;
2584  int ret;
2585 
2588  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->index]) {
2589  ret = fg_output_frame(ofp, fgt, NULL);
2590  return (ret < 0) ? ret : 1;
2591  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2592  return 1;
2593  } else if (ret < 0) {
2594  av_log(ofp, AV_LOG_WARNING,
2595  "Error in retrieving a frame from the filtergraph: %s\n",
2596  av_err2str(ret));
2597  return ret;
2598  }
2599 
2600  if (fgt->eof_out[ofp->index]) {
2602  return 0;
2603  }
2604 
2606 
2607  if (debug_ts)
2608  av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2609  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2610  frame->time_base.num, frame->time_base.den);
2611 
2612  // Choose the output timebase the first time we get a frame.
2613  if (!ofp->tb_out_locked) {
2614  ret = choose_out_timebase(ofp, frame);
2615  if (ret < 0) {
2616  av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2618  return ret;
2619  }
2620  }
2621 
2622  fd = frame_data(frame);
2623  if (!fd) {
2625  return AVERROR(ENOMEM);
2626  }
2627 
2629 
2630  // only use bits_per_raw_sample passed through from the decoder
2631  // if the filtergraph did not touch the frame data
2632  if (!fgp->is_meta)
2633  fd->bits_per_raw_sample = 0;
2634 
2635  if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2636  if (!frame->duration) {
2638  if (fr.num > 0 && fr.den > 0)
2639  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2640  }
2641 
2642  fd->frame_rate_filter = ofp->fps.framerate;
2643  }
2644 
2645  ret = fg_output_frame(ofp, fgt, frame);
2647  if (ret < 0)
2648  return ret;
2649 
2650  return 0;
2651 }
2652 
2653 /* retrieve all frames available at filtergraph outputs
2654  * and send them to consumers */
2656  AVFrame *frame)
2657 {
2658  FilterGraphPriv *fgp = fgp_from_fg(fg);
2659  int did_step = 0;
2660 
2661  // graph not configured, just select the input to request
2662  if (!fgt->graph) {
2663  for (int i = 0; i < fg->nb_inputs; i++) {
2665  if (ifp->format < 0 && !fgt->eof_in[i]) {
2666  fgt->next_in = i;
2667  return 0;
2668  }
2669  }
2670 
2671  // This state - graph is not configured, but all inputs are either
2672  // initialized or EOF - should be unreachable because sending EOF to a
2673  // filter without even a fallback format should fail
2674  av_assert0(0);
2675  return AVERROR_BUG;
2676  }
2677 
2678  while (fgp->nb_outputs_done < fg->nb_outputs) {
2679  int ret;
2680 
2682  if (ret == AVERROR(EAGAIN)) {
2683  fgt->next_in = choose_input(fg, fgt);
2684  break;
2685  } else if (ret < 0) {
2686  if (ret == AVERROR_EOF)
2687  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2688  else
2689  av_log(fg, AV_LOG_ERROR,
2690  "Error requesting a frame from the filtergraph: %s\n",
2691  av_err2str(ret));
2692  return ret;
2693  }
2694  fgt->next_in = fg->nb_inputs;
2695 
2696  // return after one iteration, so that scheduler can rate-control us
2697  if (did_step && fgp->have_sources)
2698  return 0;
2699 
2700  /* Reap all buffers present in the buffer sinks */
2701  for (int i = 0; i < fg->nb_outputs; i++) {
2703 
2704  ret = 0;
2705  while (!ret) {
2706  ret = fg_output_step(ofp, fgt, frame);
2707  if (ret < 0)
2708  return ret;
2709  }
2710  }
2711  did_step = 1;
2712  }
2713 
2714  return (fgp->nb_outputs_done == fg->nb_outputs) ? AVERROR_EOF : 0;
2715 }
2716 
2718 {
2719  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2720  int64_t pts2;
2721 
2722  /* subtitles seem to be usually muxed ahead of other streams;
2723  if not, subtracting a larger time here is necessary */
2724  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2725 
2726  /* do not send the heartbeat frame if the subtitle is already ahead */
2727  if (pts2 <= ifp->sub2video.last_pts)
2728  return;
2729 
2730  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2731  /* if we have hit the end of the current displayed subpicture,
2732  or if we need to initialize the system, update the
2733  overlayed subpicture and its start/end times */
2734  sub2video_update(ifp, pts2 + 1, NULL);
2735  else
2736  sub2video_push_ref(ifp, pts2);
2737 }
2738 
2739 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2740 {
2741  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2742  int ret;
2743 
2744  if (buffer) {
2745  AVFrame *tmp;
2746 
2747  if (!frame)
2748  return 0;
2749 
2750  tmp = av_frame_alloc();
2751  if (!tmp)
2752  return AVERROR(ENOMEM);
2753 
2755 
2756  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2757  if (ret < 0) {
2758  av_frame_free(&tmp);
2759  return ret;
2760  }
2761 
2762  return 0;
2763  }
2764 
2765  // heartbeat frame
2766  if (frame && !frame->buf[0]) {
2767  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2768  return 0;
2769  }
2770 
2771  if (!frame) {
2772  if (ifp->sub2video.end_pts < INT64_MAX)
2773  sub2video_update(ifp, INT64_MAX, NULL);
2774 
2775  return av_buffersrc_add_frame(ifp->filter, NULL);
2776  }
2777 
2778  ifp->width = frame->width ? frame->width : ifp->width;
2779  ifp->height = frame->height ? frame->height : ifp->height;
2780 
2781  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2782 
2783  return 0;
2784 }
2785 
2786 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2787  int64_t pts, AVRational tb)
2788 {
2789  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2790  int ret;
2791 
2792  if (fgt->eof_in[ifp->index])
2793  return 0;
2794 
2795  fgt->eof_in[ifp->index] = 1;
2796 
2797  if (ifp->filter) {
2798  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2800 
2802  if (ret < 0)
2803  return ret;
2804  } else {
2805  if (ifp->format < 0) {
2806  // the filtergraph was never configured, use the fallback parameters
2807  ifp->format = ifp->opts.fallback->format;
2808  ifp->sample_rate = ifp->opts.fallback->sample_rate;
2809  ifp->width = ifp->opts.fallback->width;
2810  ifp->height = ifp->opts.fallback->height;
2812  ifp->color_space = ifp->opts.fallback->colorspace;
2813  ifp->color_range = ifp->opts.fallback->color_range;
2814  ifp->time_base = ifp->opts.fallback->time_base;
2815 
2817  &ifp->opts.fallback->ch_layout);
2818  if (ret < 0)
2819  return ret;
2820 
2822  ret = clone_side_data(&ifp->side_data, &ifp->nb_side_data,
2823  ifp->opts.fallback->side_data,
2824  ifp->opts.fallback->nb_side_data, 0);
2825  if (ret < 0)
2826  return ret;
2827 
2828  if (ifilter_has_all_input_formats(ifilter->graph)) {
2829  ret = configure_filtergraph(ifilter->graph, fgt);
2830  if (ret < 0) {
2831  av_log(ifilter->graph, AV_LOG_ERROR, "Error initializing filters!\n");
2832  return ret;
2833  }
2834  }
2835  }
2836 
2837  if (ifp->format < 0) {
2838  av_log(ifilter->graph, AV_LOG_ERROR,
2839  "Cannot determine format of input %s after EOF\n",
2840  ifp->opts.name);
2841  return AVERROR_INVALIDDATA;
2842  }
2843  }
2844 
2845  return 0;
2846 }
2847 
2849  VIDEO_CHANGED = (1 << 0),
2850  AUDIO_CHANGED = (1 << 1),
2851  MATRIX_CHANGED = (1 << 2),
2852  DOWNMIX_CHANGED = (1 << 3),
2853  HWACCEL_CHANGED = (1 << 4)
2854 };
2855 
2856 static const char *unknown_if_null(const char *str)
2857 {
2858  return str ? str : "unknown";
2859 }
2860 
2862  InputFilter *ifilter, AVFrame *frame)
2863 {
2864  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2865  FrameData *fd;
2866  AVFrameSideData *sd;
2867  int need_reinit = 0, ret;
2868 
2869  /* determine if the parameters for this input changed */
2870  switch (ifp->type) {
2871  case AVMEDIA_TYPE_AUDIO:
2872  if (ifp->format != frame->format ||
2873  ifp->sample_rate != frame->sample_rate ||
2874  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
2875  need_reinit |= AUDIO_CHANGED;
2876  break;
2877  case AVMEDIA_TYPE_VIDEO:
2878  if (ifp->format != frame->format ||
2879  ifp->width != frame->width ||
2880  ifp->height != frame->height ||
2881  ifp->color_space != frame->colorspace ||
2882  ifp->color_range != frame->color_range)
2883  need_reinit |= VIDEO_CHANGED;
2884  break;
2885  }
2886 
2888  if (!ifp->displaymatrix_present ||
2889  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2890  need_reinit |= MATRIX_CHANGED;
2891  } else if (ifp->displaymatrix_present)
2892  need_reinit |= MATRIX_CHANGED;
2893 
2895  if (!ifp->downmixinfo_present ||
2896  memcmp(sd->data, &ifp->downmixinfo, sizeof(ifp->downmixinfo)))
2897  need_reinit |= DOWNMIX_CHANGED;
2898  } else if (ifp->downmixinfo_present)
2899  need_reinit |= DOWNMIX_CHANGED;
2900 
2901  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2902  need_reinit = 0;
2903 
2904  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2905  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2906  need_reinit |= HWACCEL_CHANGED;
2907 
2908  if (need_reinit) {
2910  if (ret < 0)
2911  return ret;
2912  }
2913 
2914  /* (re)init the graph if possible, otherwise buffer the frame and return */
2915  if (need_reinit || !fgt->graph) {
2916  AVFrame *tmp = av_frame_alloc();
2917 
2918  if (!tmp)
2919  return AVERROR(ENOMEM);
2920 
2921  if (!ifilter_has_all_input_formats(fg)) {
2923 
2924  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2925  if (ret < 0)
2926  av_frame_free(&tmp);
2927 
2928  return ret;
2929  }
2930 
2931  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2932  av_frame_free(&tmp);
2933  if (ret < 0)
2934  return ret;
2935 
2936  if (fgt->graph) {
2937  AVBPrint reason;
2939  if (need_reinit & AUDIO_CHANGED) {
2940  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2941  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2942  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
2943  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2944  }
2945  if (need_reinit & VIDEO_CHANGED) {
2946  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2947  const char *color_space_name = av_color_space_name(frame->colorspace);
2948  const char *color_range_name = av_color_range_name(frame->color_range);
2949  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, ",
2950  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2951  unknown_if_null(color_space_name), frame->width, frame->height);
2952  }
2953  if (need_reinit & MATRIX_CHANGED)
2954  av_bprintf(&reason, "display matrix changed, ");
2955  if (need_reinit & DOWNMIX_CHANGED)
2956  av_bprintf(&reason, "downmix medatata changed, ");
2957  if (need_reinit & HWACCEL_CHANGED)
2958  av_bprintf(&reason, "hwaccel changed, ");
2959  if (reason.len > 1)
2960  reason.str[reason.len - 2] = '\0'; // remove last comma
2961  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2962  }
2963 
2964  ret = configure_filtergraph(fg, fgt);
2965  if (ret < 0) {
2966  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2967  return ret;
2968  }
2969  }
2970 
2971  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
2972  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
2973  frame->time_base = ifp->time_base;
2974 
2975  if (ifp->displaymatrix_applied)
2977 
2978  fd = frame_data(frame);
2979  if (!fd)
2980  return AVERROR(ENOMEM);
2982 
2985  if (ret < 0) {
2987  if (ret != AVERROR_EOF)
2988  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2989  return ret;
2990  }
2991 
2992  return 0;
2993 }
2994 
2995 static void fg_thread_set_name(const FilterGraph *fg)
2996 {
2997  char name[16];
2998  if (filtergraph_is_simple(fg)) {
2999  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
3000  snprintf(name, sizeof(name), "%cf%s",
3002  ofp->name);
3003  } else {
3004  snprintf(name, sizeof(name), "fc%d", fg->index);
3005  }
3006 
3008 }
3009 
3011 {
3012  if (fgt->frame_queue_out) {
3013  AVFrame *frame;
3014  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
3015  av_frame_free(&frame);
3017  }
3018 
3019  av_frame_free(&fgt->frame);
3020  av_freep(&fgt->eof_in);
3021  av_freep(&fgt->eof_out);
3022 
3023  avfilter_graph_free(&fgt->graph);
3024 
3025  memset(fgt, 0, sizeof(*fgt));
3026 }
3027 
3028 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
3029 {
3030  memset(fgt, 0, sizeof(*fgt));
3031 
3032  fgt->frame = av_frame_alloc();
3033  if (!fgt->frame)
3034  goto fail;
3035 
3036  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
3037  if (!fgt->eof_in)
3038  goto fail;
3039 
3040  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
3041  if (!fgt->eof_out)
3042  goto fail;
3043 
3045  if (!fgt->frame_queue_out)
3046  goto fail;
3047 
3048  return 0;
3049 
3050 fail:
3051  fg_thread_uninit(fgt);
3052  return AVERROR(ENOMEM);
3053 }
3054 
3055 static int filter_thread(void *arg)
3056 {
3057  FilterGraphPriv *fgp = arg;
3058  FilterGraph *fg = &fgp->fg;
3059 
3060  FilterGraphThread fgt;
3061  int ret = 0, input_status = 0;
3062 
3063  ret = fg_thread_init(&fgt, fg);
3064  if (ret < 0)
3065  goto finish;
3066 
3067  fg_thread_set_name(fg);
3068 
3069  // if we have all input parameters the graph can now be configured
3071  ret = configure_filtergraph(fg, &fgt);
3072  if (ret < 0) {
3073  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
3074  av_err2str(ret));
3075  goto finish;
3076  }
3077  }
3078 
3079  while (1) {
3080  InputFilter *ifilter;
3081  InputFilterPriv *ifp;
3082  enum FrameOpaque o;
3083  unsigned input_idx = fgt.next_in;
3084 
3085  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
3086  &input_idx, fgt.frame);
3087  if (input_status == AVERROR_EOF) {
3088  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
3089  break;
3090  } else if (input_status == AVERROR(EAGAIN)) {
3091  // should only happen when we didn't request any input
3092  av_assert0(input_idx == fg->nb_inputs);
3093  goto read_frames;
3094  }
3095  av_assert0(input_status >= 0);
3096 
3097  o = (intptr_t)fgt.frame->opaque;
3098 
3099  o = (intptr_t)fgt.frame->opaque;
3100 
3101  // message on the control stream
3102  if (input_idx == fg->nb_inputs) {
3103  FilterCommand *fc;
3104 
3105  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
3106 
3107  fc = (FilterCommand*)fgt.frame->buf[0]->data;
3108  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
3109  fc->all_filters);
3110  av_frame_unref(fgt.frame);
3111  continue;
3112  }
3113 
3114  // we received an input frame or EOF
3115  ifilter = fg->inputs[input_idx];
3116  ifp = ifp_from_ifilter(ifilter);
3117 
3118  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
3119  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
3120  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
3121  !fgt.graph);
3122  } else if (fgt.frame->buf[0]) {
3123  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
3124  } else {
3126  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
3127  }
3128  av_frame_unref(fgt.frame);
3129  if (ret == AVERROR_EOF) {
3130  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
3131  input_idx);
3132  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
3133  continue;
3134  }
3135  if (ret < 0)
3136  goto finish;
3137 
3138 read_frames:
3139  // retrieve all newly avalable frames
3140  ret = read_frames(fg, &fgt, fgt.frame);
3141  if (ret == AVERROR_EOF) {
3142  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
3143  break;
3144  } else if (ret < 0) {
3145  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
3146  av_err2str(ret));
3147  goto finish;
3148  }
3149  }
3150 
3151  for (unsigned i = 0; i < fg->nb_outputs; i++) {
3153 
3154  if (fgt.eof_out[i] || !fgt.graph)
3155  continue;
3156 
3157  ret = fg_output_frame(ofp, &fgt, NULL);
3158  if (ret < 0)
3159  goto finish;
3160  }
3161 
3162 finish:
3163  // EOF is normal termination
3164  if (ret == AVERROR_EOF)
3165  ret = 0;
3166 
3167  fg_thread_uninit(&fgt);
3168 
3169  return ret;
3170 }
3171 
3172 void fg_send_command(FilterGraph *fg, double time, const char *target,
3173  const char *command, const char *arg, int all_filters)
3174 {
3175  FilterGraphPriv *fgp = fgp_from_fg(fg);
3176  AVBufferRef *buf;
3177  FilterCommand *fc;
3178 
3179  fc = av_mallocz(sizeof(*fc));
3180  if (!fc)
3181  return;
3182 
3183  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3184  if (!buf) {
3185  av_freep(&fc);
3186  return;
3187  }
3188 
3189  fc->target = av_strdup(target);
3190  fc->command = av_strdup(command);
3191  fc->arg = av_strdup(arg);
3192  if (!fc->target || !fc->command || !fc->arg) {
3193  av_buffer_unref(&buf);
3194  return;
3195  }
3196 
3197  fc->time = time;
3198  fc->all_filters = all_filters;
3199 
3200  fgp->frame->buf[0] = buf;
3201  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3202 
3203  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
3204 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:129
AVSubtitle
Definition: avcodec.h:2257
formats
formats
Definition: signature.h:47
AVBufferSrcParameters::side_data
AVFrameSideData ** side_data
Definition: buffersrc.h:124
AVBufferSrcParameters::color_space
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
Definition: buffersrc.h:121
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1873
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:96
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:464
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:697
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:354
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:645
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:357
av_clip
#define av_clip
Definition: common.h:100
InputFilterPriv::type
enum AVMediaType type
Definition: ffmpeg_filter.c:122
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2460
OutputFilter::class
const AVClass * class
Definition: ffmpeg.h:355
view_specifier_parse
int view_specifier_parse(const char **pspec, ViewSpecifier *vs)
Definition: ffmpeg_opt.c:240
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:69
OutputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:205
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:105
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2262
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2173
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1477
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:66
read_binary
static int read_binary(void *logctx, const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:439
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:70
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:98
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:610
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:139
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:999
FrameData
Definition: ffmpeg.h:655
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2148
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:160
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
InputFilterOptions::crop_right
unsigned crop_right
Definition: ffmpeg.h:282
OutputFilter::apad
char * apad
Definition: ffmpeg.h:365
out
FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:305
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:979
InputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:112
clone_side_data
static int clone_side_data(AVFrameSideData ***dst, int *nb_dst, AVFrameSideData *const *src, int nb_src, unsigned int flags)
Wrapper calling av_frame_side_data_clone() in a loop for all source entries.
Definition: ffmpeg_utils.h:50
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:137
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3248
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:377
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:142
AVBufferSrcParameters::nb_side_data
int nb_side_data
Definition: buffersrc.h:125
InputFilterOptions::crop_bottom
unsigned crop_bottom
Definition: ffmpeg.h:280
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:39
AVFrame::nb_side_data
int nb_side_data
Definition: frame.h:644
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:2080
stream_specifier_parse
int stream_specifier_parse(StreamSpecifier *ss, const char *spec, int allow_remainder, void *logctx)
Parse a stream specifier string into a form suitable for matching.
Definition: cmdutils.c:1009
ofilter_class
static const AVClass ofilter_class
Definition: ffmpeg_filter.c:643
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:2853
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:61
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, const ViewSpecifier *vs, InputFilterOptions *opts, SchedulerNode *src)
Definition: ffmpeg_demux.c:977
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:141
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
configure_output_filter
static int configure_output_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1683
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:257
AVSubtitleRect
Definition: avcodec.h:2230
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2261
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts, const ViewSpecifier *vs, SchedulerNode *src)
Definition: ffmpeg_dec.c:1758
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:995
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:180
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:558
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:708
InputFile::index
int index
Definition: ffmpeg.h:471
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:744
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:522
AVFrame::width
int width
Definition: frame.h:482
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:49
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:56
StreamSpecifier
Definition: cmdutils.h:113
ofilter_bind_enc
int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:800
AVOption
AVOption.
Definition: opt.h:429
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2495
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:185
FilterGraph::index
int index
Definition: ffmpeg.h:375
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:138
data
const char data[16]
Definition: mxf.c:149
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:184
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:242
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1883
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:472
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:378
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:2849
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
ViewSpecifier
Definition: ffmpeg.h:128
AVDictionary
Definition: dict.c:34
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:690
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:249
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:324
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: frame.c:888
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:260
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1601
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:623
AVBufferSrcParameters::height
int height
Definition: buffersrc.h:87
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2578
FilterGraphPriv
Definition: ffmpeg_filter.c:45
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:599
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:101
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:117
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1918
OutputFilterPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:201
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
stream_specifier_uninit
void stream_specifier_uninit(StreamSpecifier *ss)
Definition: cmdutils.c:1000
InputStream
Definition: ffmpeg.h:434
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:75
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:69
OutputFilterOptions
Definition: ffmpeg.h:300
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:267
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:288
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
Definition: avfiltergraph.c:138
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:165
finish
static void finish(void)
Definition: movenc.c:374
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
Underlying C type is a uint8_t* that is either NULL or points to an array allocated with the av_mallo...
Definition: opt.h:286
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3624
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:88
OutputFilterPriv
Definition: ffmpeg_filter.c:195
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:3010
filter_opt_apply
static int filter_opt_apply(void *logctx, AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:488
fail
#define fail()
Definition: checkasm.h:193
AVBufferSrcParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
Definition: buffersrc.h:92
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
OutputFilterPriv::name
char * name
Definition: ffmpeg_filter.c:203
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:327
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
samplefmt.h
OutputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:215
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:274
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:613
val
static double val(void *priv, double ch)
Definition: aeval.c:77
OutputFilterPriv::index
int index
Definition: ffmpeg_filter.c:198
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:819
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:123
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1704
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
AVDownmixInfo
This structure describes optional metadata relevant to a downmix procedure.
Definition: downmix_info.h:58
pts
static int64_t pts
Definition: transcode_aac.c:644
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1899
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:88
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:139
FrameData::tb
AVRational tb
Definition: ffmpeg.h:665
OutputFilterPriv::sws_opts
AVDictionary * sws_opts
Definition: ffmpeg_filter.c:227
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:74
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:210
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputFilterPriv::sub2video
struct InputFilterPriv::@8 sub2video
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:185
AVRational::num
int num
Numerator.
Definition: rational.h:59
OutputFilter::bound
int bound
Definition: ffmpeg.h:362
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:102
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:268
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:821
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:151
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2717
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
OutputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:216
avassert.h
OutputFilterPriv::trim_start_us
int64_t trim_start_us
Definition: ffmpeg_filter.c:239
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:668
InputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:144
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2786
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:105
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:1260
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:661
duration
int64_t duration
Definition: movenc.c:65
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:334
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:967
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:920
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:263
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:70
llrintf
#define llrintf(x)
Definition: libm.h:399
s
#define s(width, name)
Definition: cbs_vp9.c:198
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:676
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:68
DOWNMIX_CHANGED
@ DOWNMIX_CHANGED
Definition: ffmpeg_filter.c:2852
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:115
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:379
ofilter_item_name
static const char * ofilter_item_name(void *obj)
Definition: ffmpeg_filter.c:637
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVDictionaryEntry::key
char * key
Definition: dict.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
VIEW_SPECIFIER_TYPE_NONE
@ VIEW_SPECIFIER_TYPE_NONE
Definition: ffmpeg.h:117
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:119
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:731
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:363
InputFilter
Definition: ffmpeg.h:349
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:59
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:49
OFILTER_FLAG_AUTOSCALE
@ OFILTER_FLAG_AUTOSCALE
Definition: ffmpeg.h:297
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2263
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:3028
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:270
InputFilterOptions::crop_top
unsigned crop_top
Definition: ffmpeg.h:279
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:350
AV_SIDE_DATA_PROP_GLOBAL
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
Definition: frame.h:279
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
color_range
color_range
Definition: vf_selectivecolor.c:43
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:244
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:1053
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1195
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:233
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:209
InputFilterOptions::crop_left
unsigned crop_left
Definition: ffmpeg.h:281
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3564
AVFormatContext
Format I/O context.
Definition: avformat.h:1300
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:639
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:771
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
FilterGraphPriv::nb_threads
char * nb_threads
Definition: ffmpeg_filter.c:63
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:358
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1295
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:238
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:882
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:135
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:895
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed, const AVChannelLayout *layout_requested)
Definition: ffmpeg_filter.c:760
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:211
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:852
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:182
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:442
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:160
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:150
Decoder
Definition: ffmpeg.h:420
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:863
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
OFILTER_FLAG_AUDIO_24BIT
@ OFILTER_FLAG_AUDIO_24BIT
Definition: ffmpeg.h:296
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:921
fg_create_simple
int fg_create_simple(FilterGraph **pfg, InputStream *ist, char *graph_desc, Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:1204
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:586
InputFilterPriv::linklabel
uint8_t * linklabel
Definition: ffmpeg_filter.c:119
ofilter_bind_ifilter
static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:910
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:225
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
Definition: ffmpeg_filter.c:651
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2440
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:93
mathops.h
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:71
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:672
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1426
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:944
stream_specifier_match
unsigned stream_specifier_match(const StreamSpecifier *ss, const AVFormatContext *s, const AVStream *st, void *logctx)
Definition: cmdutils.c:1224
AVFilterGraph
Definition: avfilter.h:584
InputFilterPriv::downmixinfo_present
int downmixinfo_present
Definition: ffmpeg_filter.c:154
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
InputFilterOptions
Definition: ffmpeg.h:266
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:134
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:699
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:380
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:232
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:880
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:450
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:104
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:275
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:46
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:196
FilterGraph
Definition: ffmpeg.h:373
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:933
file_read
char * file_read(const char *filename)
Definition: cmdutils.c:1495
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:77
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:284
AVBufferSrcParameters::frame_rate
AVRational frame_rate
Video only, the frame rate of the input video.
Definition: buffersrc.h:100
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:741
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:289
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:589
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2142
VideoSyncMethod
VideoSyncMethod
Definition: ffmpeg.h:65
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1991
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:261
f
f
Definition: af_crystalizer.c:122
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:3055
AVMediaType
AVMediaType
Definition: avutil.h:199
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:148
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:90
FilterGraphThread
Definition: ffmpeg_filter.c:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:401
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:152
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:319
OutputFilterPriv::color_ranges
enum AVColorRange * color_ranges
Definition: ffmpeg_filter.c:236
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:86
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:105
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:738
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:609
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:223
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:326
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:171
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:136
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:2851
FilterCommand::time
double time
Definition: ffmpeg_filter.c:259
InputFilterPriv::initialize
unsigned int initialize
Definition: ffmpeg_filter.c:164
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.c:151
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1343
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:537
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:497
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:57
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:453
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2260
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:102
FilterGraphPriv::graph_desc
const char * graph_desc
Definition: ffmpeg_filter.c:61
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1467
FPSConvContext::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg_filter.c:187
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:1045
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:133
AVBufferSrcParameters::time_base
AVRational time_base
The timebase to be used for the timestamps on the input frames.
Definition: buffersrc.h:82
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:488
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1892
fg_finalise_bindings
int fg_finalise_bindings(void)
Definition: ffmpeg_filter.c:1416
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:2850
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2395
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:2856
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:285
decoders
Decoder ** decoders
Definition: ffmpeg.c:113
OutputFilterPriv::log_parent
void * log_parent
Definition: ffmpeg_filter.c:200
nb_decoders
int nb_decoders
Definition: ffmpeg.c:114
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:367
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2655
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:809
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2213
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:2861
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:971
buffersink.h
av_buffersink_get_side_data
const AVFrameSideData *const * av_buffersink_get_side_data(const AVFilterContext *ctx, int *nb_side_data)
Definition: buffersink.c:367
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:706
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:839
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:140
OutputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:213
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:260
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:192
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:176
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:118
graph_opts_apply
static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:544
FPSConvContext
Definition: ffmpeg_filter.c:173
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVBufferSrcParameters::width
int width
Video only, the display dimensions of the input frames.
Definition: buffersrc.h:87
InputFilterPriv::index
int index
Definition: ffmpeg_filter.c:110
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:670
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: frame.c:114
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:3172
downmix_info.h
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:51
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:290
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:204
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:68
src2
const pixel * src2
Definition: h264pred_template.c:421
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1822
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:656
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:190
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:89
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:473
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:650
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:79
graph_parse
static int graph_parse(void *logctx, AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:568
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:623
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:127
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
AVFrame::side_data
AVFrameSideData ** side_data
Definition: frame.h:643
len
int len
Definition: vorbis_enc_data.h:426
SchedulerNode
Definition: ffmpeg_sched.h:103
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:659
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:110
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:307
OutputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:212
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:919
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:59
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2505
AVFilter
Filter definition.
Definition: avfilter.h:199
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2317
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:168
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:1067
mid_pred
#define mid_pred
Definition: mathops.h:96
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:91
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:748
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:203
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:351
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:72
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:783
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:174
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:487
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1496
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:904
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:243
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:2848
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:445
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:517
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:1004
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
AVBufferSrcParameters::color_range
enum AVColorRange color_range
Definition: buffersrc.h:122
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:87
OutputFilterPriv::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg_filter.c:228
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:482
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:754
IFILTER_FLAG_CROP
@ IFILTER_FLAG_CROP
Definition: ffmpeg.h:263
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:384
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:625
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:128
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:930
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:438
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:919
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:146
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:125
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:442
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:54
insert_trim
static int insert_trim(void *logctx, int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1445
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:262
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:166
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:612
ifilter_bind_fg
static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
Definition: ffmpeg_filter.c:930
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2197
OutputFilterPriv::flags
unsigned flags
Definition: ffmpeg_filter.c:246
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:234
AVSideDataDescriptor
This struct describes the properties of a side data type.
Definition: frame.h:313
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:296
InputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:143
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:945
AVFilterContext
An instance of a filter.
Definition: avfilter.h:257
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:374
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:449
OutputFilter
Definition: ffmpeg.h:354
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2739
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:106
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
configure_output_video_filter
static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1522
ViewSpecifier::type
enum ViewSpecifierType type
Definition: ffmpeg.h:129
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:331
OutputFilterPriv::color_spaces
enum AVColorSpace * color_spaces
Definition: ffmpeg_filter.c:235
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:491
av_buffersink_get_colorspace
enum AVColorSpace av_buffersink_get_colorspace(const AVFilterContext *ctx)
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2274
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:370
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:78
llrint
#define llrint(x)
Definition: libm.h:394
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
InputStream::index
int index
Definition: ffmpeg.h:440
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2439
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:78
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:255
av_frame_side_data_desc
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
Definition: frame.c:1050
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:1060
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:250
OFILTER_FLAG_DISABLE_CONVERT
@ OFILTER_FLAG_DISABLE_CONVERT
Definition: ffmpeg.h:294
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:423
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:131
InputFilterPriv::end_pts
int64_t end_pts
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:161
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:111
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:343
timestamp.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:616
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:208
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1313
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:108
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:56
OutputFilterPriv::trim_duration_us
int64_t trim_duration_us
Definition: ffmpeg_filter.c:240
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
InputFilterPriv::downmixinfo
AVDownmixInfo downmixinfo
Definition: ffmpeg_filter.c:155
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2070
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:298
AVDictionaryEntry::value
char * value
Definition: dict.h:91
bind_inputs
static int bind_inputs(FilterGraph *fg)
Definition: ffmpeg_filter.c:1398
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:587
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:260
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:698
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:459
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:220
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:733
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:67
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:209
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:126
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:189
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2995
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:157
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1693
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2259
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:256
src
#define src
Definition: vp8dsp.c:248
FilterCommand
Definition: ffmpeg_filter.c:254
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:133
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3168
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:369
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:76
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:277
av_buffersink_get_color_range
enum AVColorRange av_buffersink_get_color_range(const AVFilterContext *ctx)
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:103
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:191