FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 #include "graph/graphprint.h"
25 
26 #include "libavfilter/avfilter.h"
27 #include "libavfilter/buffersink.h"
28 #include "libavfilter/buffersrc.h"
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
34 #include "libavutil/downmix_info.h"
35 #include "libavutil/mem.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/pixfmt.h"
39 #include "libavutil/samplefmt.h"
40 #include "libavutil/time.h"
41 #include "libavutil/timestamp.h"
42 
43 typedef struct FilterGraphPriv {
45 
46  // name used for logging
47  char log_name[32];
48 
49  int is_simple;
50  // true when the filtergraph contains only meta filters
51  // that do not modify the frame data
52  int is_meta;
53  // source filters are present in the graph
56 
57  unsigned nb_outputs_done;
58 
60 
61  // frame for temporarily holding output from the filtergraph
63  // frame for sending output to the encoder
65 
67  unsigned sch_idx;
69 
71 {
72  return (FilterGraphPriv*)fg;
73 }
74 
75 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
76 {
77  return (const FilterGraphPriv*)fg;
78 }
79 
80 // data that is local to the filter thread and not visible outside of it
81 typedef struct FilterGraphThread {
83 
85 
86  // Temporary buffer for output frames, since on filtergraph reset
87  // we cannot send them to encoders immediately.
88  // The output index is stored in frame opaque.
90 
91  // index of the next input to request from the scheduler
92  unsigned next_in;
93  // set to 1 after at least one frame passed through this output
94  int got_frame;
95 
96  // EOF status of each input/output, as received by the thread
97  uint8_t *eof_in;
98  uint8_t *eof_out;
100 
101 typedef struct InputFilterPriv {
103 
105 
106  // used to hold submitted input
108 
109  // For inputs bound to a filtergraph output
111 
112  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
113  // same as type otherwise
115 
116  int eof;
117  int bound;
119  uint64_t nb_dropped;
120 
121  // parameters configured for this input
122  int format;
123 
124  int width, height;
129 
132 
134 
137 
139 
141 
145 
148 
149  struct {
150  AVFrame *frame;
151 
154 
155  /// marks if sub2video_update should force an initialization
156  unsigned int initialize;
157  } sub2video;
159 
161 {
162  return (InputFilterPriv*)ifilter;
163 }
164 
165 typedef struct FPSConvContext {
167  /* number of frames emitted by the video-encoding sync code */
169  /* history of nb_frames_prev, i.e. the number of times the
170  * previous frame was duplicated by vsync code in recent
171  * do_video_out() calls */
173 
174  uint64_t dup_warning;
175 
178 
180 
186 
187 typedef struct OutputFilterPriv {
189 
190  void *log_parent;
191  char log_name[32];
192 
193  int needed;
194 
195  /* desired output stream properties */
196  int format;
197  int width, height;
203 
204  unsigned crop_top;
205  unsigned crop_bottom;
206  unsigned crop_left;
207  unsigned crop_right;
208 
211 
212  // time base in which the output is sent to our downstream
213  // does not need to match the filtersink's timebase
215  // at least one frame with the above timebase was sent
216  // to our downstream, so it cannot change anymore
218 
220 
223 
224  // those are only set if no format is specified and the encoder gives us multiple options
225  // They point directly to the relevant lists of the encoder.
226  union {
227  const enum AVPixelFormat *pix_fmts;
229  };
231  const int *sample_rates;
235 
237 
241  // offset for output timestamps, in AV_TIME_BASE_Q
245 
246  unsigned flags;
248 
250 {
251  return (OutputFilterPriv*)ofilter;
252 }
253 
254 typedef struct FilterCommand {
255  char *target;
256  char *command;
257  char *arg;
258 
259  double time;
261 } FilterCommand;
262 
263 static void filter_command_free(void *opaque, uint8_t *data)
264 {
266 
267  av_freep(&fc->target);
268  av_freep(&fc->command);
269  av_freep(&fc->arg);
270 
271  av_free(data);
272 }
273 
275 {
276  AVFrame *frame = ifp->sub2video.frame;
277  int ret;
278 
280 
281  frame->width = ifp->width;
282  frame->height = ifp->height;
283  frame->format = ifp->format;
284  frame->colorspace = ifp->color_space;
285  frame->color_range = ifp->color_range;
286  frame->alpha_mode = ifp->alpha_mode;
287 
289  if (ret < 0)
290  return ret;
291 
292  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
293 
294  return 0;
295 }
296 
297 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
298  AVSubtitleRect *r)
299 {
300  uint32_t *pal, *dst2;
301  uint8_t *src, *src2;
302  int x, y;
303 
304  if (r->type != SUBTITLE_BITMAP) {
305  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
306  return;
307  }
308  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
309  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
310  r->x, r->y, r->w, r->h, w, h
311  );
312  return;
313  }
314 
315  dst += r->y * dst_linesize + r->x * 4;
316  src = r->data[0];
317  pal = (uint32_t *)r->data[1];
318  for (y = 0; y < r->h; y++) {
319  dst2 = (uint32_t *)dst;
320  src2 = src;
321  for (x = 0; x < r->w; x++)
322  *(dst2++) = pal[*(src2++)];
323  dst += dst_linesize;
324  src += r->linesize[0];
325  }
326 }
327 
329 {
330  AVFrame *frame = ifp->sub2video.frame;
331  int ret;
332 
333  av_assert1(frame->data[0]);
334  ifp->sub2video.last_pts = frame->pts = pts;
338  if (ret != AVERROR_EOF && ret < 0)
340  "Error while add the frame to buffer source(%s).\n",
341  av_err2str(ret));
342 }
343 
344 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
345  const AVSubtitle *sub)
346 {
347  AVFrame *frame = ifp->sub2video.frame;
348  int8_t *dst;
349  int dst_linesize;
350  int num_rects;
351  int64_t pts, end_pts;
352 
353  if (sub) {
354  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
355  AV_TIME_BASE_Q, ifp->time_base);
356  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
357  AV_TIME_BASE_Q, ifp->time_base);
358  num_rects = sub->num_rects;
359  } else {
360  /* If we are initializing the system, utilize current heartbeat
361  PTS as the start time, and show until the following subpicture
362  is received. Otherwise, utilize the previous subpicture's end time
363  as the fall-back value. */
364  pts = ifp->sub2video.initialize ?
365  heartbeat_pts : ifp->sub2video.end_pts;
366  end_pts = INT64_MAX;
367  num_rects = 0;
368  }
369  if (sub2video_get_blank_frame(ifp) < 0) {
371  "Impossible to get a blank canvas.\n");
372  return;
373  }
374  dst = frame->data [0];
375  dst_linesize = frame->linesize[0];
376  for (int i = 0; i < num_rects; i++)
377  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
378  sub2video_push_ref(ifp, pts);
379  ifp->sub2video.end_pts = end_pts;
380  ifp->sub2video.initialize = 0;
381 }
382 
383 /* Define a function for appending a list of allowed formats
384  * to an AVBPrint. If nonempty, the list will have a header. */
385 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
386 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
387 { \
388  if (ofp->var == none && !ofp->supported_list) \
389  return; \
390  av_bprintf(bprint, #name "="); \
391  if (ofp->var != none) { \
392  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
393  } else { \
394  const type *p; \
395  \
396  for (p = ofp->supported_list; *p != none; p++) { \
397  av_bprintf(bprint, printf_format "|", get_name(*p)); \
398  } \
399  if (bprint->len > 0) \
400  bprint->str[--bprint->len] = '\0'; \
401  } \
402  av_bprint_chars(bprint, ':', 1); \
403 }
404 
407 
410 
412  "%d", )
413 
414 DEF_CHOOSE_FORMAT(color_spaces, enum AVColorSpace, color_space, color_spaces,
416 
417 DEF_CHOOSE_FORMAT(color_ranges, enum AVColorRange, color_range, color_ranges,
419 
420 DEF_CHOOSE_FORMAT(alpha_modes, enum AVAlphaMode, alpha_mode, alpha_modes,
422 
423 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
424 {
425  if (av_channel_layout_check(&ofp->ch_layout)) {
426  av_bprintf(bprint, "channel_layouts=");
427  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
428  } else if (ofp->ch_layouts) {
429  const AVChannelLayout *p;
430 
431  av_bprintf(bprint, "channel_layouts=");
432  for (p = ofp->ch_layouts; p->nb_channels; p++) {
434  av_bprintf(bprint, "|");
435  }
436  if (bprint->len > 0)
437  bprint->str[--bprint->len] = '\0';
438  } else
439  return;
440  av_bprint_chars(bprint, ':', 1);
441 }
442 
443 static int read_binary(void *logctx, const char *path,
444  uint8_t **data, int *len)
445 {
446  AVIOContext *io = NULL;
447  int64_t fsize;
448  int ret;
449 
450  *data = NULL;
451  *len = 0;
452 
453  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
454  if (ret < 0) {
455  av_log(logctx, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
456  path, av_err2str(ret));
457  return ret;
458  }
459 
460  fsize = avio_size(io);
461  if (fsize < 0 || fsize > INT_MAX) {
462  av_log(logctx, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
463  ret = AVERROR(EIO);
464  goto fail;
465  }
466 
467  *data = av_malloc(fsize);
468  if (!*data) {
469  ret = AVERROR(ENOMEM);
470  goto fail;
471  }
472 
473  ret = avio_read(io, *data, fsize);
474  if (ret != fsize) {
475  av_log(logctx, AV_LOG_ERROR, "Error reading file %s\n", path);
476  ret = ret < 0 ? ret : AVERROR(EIO);
477  goto fail;
478  }
479 
480  *len = fsize;
481 
482  ret = 0;
483 fail:
484  avio_close(io);
485  if (ret < 0) {
486  av_freep(data);
487  *len = 0;
488  }
489  return ret;
490 }
491 
492 static int filter_opt_apply(void *logctx, AVFilterContext *f,
493  const char *key, const char *val)
494 {
495  const AVOption *o = NULL;
496  int ret;
497 
499  if (ret >= 0)
500  return 0;
501 
502  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
504  if (!o)
505  goto err_apply;
506 
507  // key is a valid option name prefixed with '/'
508  // interpret value as a path from which to load the actual option value
509  key++;
510 
511  if (o->type == AV_OPT_TYPE_BINARY) {
512  uint8_t *data;
513  int len;
514 
515  ret = read_binary(logctx, val, &data, &len);
516  if (ret < 0)
517  goto err_load;
518 
520  av_freep(&data);
521  } else {
522  char *data = read_file_to_string(val);
523  if (!data) {
524  ret = AVERROR(EIO);
525  goto err_load;
526  }
527 
529  av_freep(&data);
530  }
531  if (ret < 0)
532  goto err_apply;
533 
534  return 0;
535 
536 err_apply:
537  av_log(logctx, AV_LOG_ERROR,
538  "Error applying option '%s' to filter '%s': %s\n",
539  key, f->filter->name, av_err2str(ret));
540  return ret;
541 err_load:
542  av_log(logctx, AV_LOG_ERROR,
543  "Error loading value for option '%s' from file '%s'\n",
544  key, val);
545  return ret;
546 }
547 
548 static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
549 {
550  for (size_t i = 0; i < seg->nb_chains; i++) {
551  AVFilterChain *ch = seg->chains[i];
552 
553  for (size_t j = 0; j < ch->nb_filters; j++) {
554  AVFilterParams *p = ch->filters[j];
555  const AVDictionaryEntry *e = NULL;
556 
557  av_assert0(p->filter);
558 
559  while ((e = av_dict_iterate(p->opts, e))) {
560  int ret = filter_opt_apply(logctx, p->filter, e->key, e->value);
561  if (ret < 0)
562  return ret;
563  }
564 
565  av_dict_free(&p->opts);
566  }
567  }
568 
569  return 0;
570 }
571 
572 static int graph_parse(void *logctx,
573  AVFilterGraph *graph, const char *desc,
575  AVBufferRef *hw_device)
576 {
578  int ret;
579 
580  *inputs = NULL;
581  *outputs = NULL;
582 
583  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
584  if (ret < 0)
585  return ret;
586 
588  if (ret < 0)
589  goto fail;
590 
591  if (hw_device) {
592  for (int i = 0; i < graph->nb_filters; i++) {
593  AVFilterContext *f = graph->filters[i];
594 
595  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
596  continue;
597  f->hw_device_ctx = av_buffer_ref(hw_device);
598  if (!f->hw_device_ctx) {
599  ret = AVERROR(ENOMEM);
600  goto fail;
601  }
602  }
603  }
604 
605  ret = graph_opts_apply(logctx, seg);
606  if (ret < 0)
607  goto fail;
608 
610 
611 fail:
613  return ret;
614 }
615 
616 // Filters can be configured only if the formats of all inputs are known.
618 {
619  for (int i = 0; i < fg->nb_inputs; i++) {
621  if (ifp->format < 0)
622  return 0;
623  }
624  return 1;
625 }
626 
627 static int filter_thread(void *arg);
628 
629 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
630 {
631  AVFilterContext *ctx = inout->filter_ctx;
632  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
633  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
634 
635  if (nb_pads > 1)
636  return av_strdup(ctx->filter->name);
637  return av_asprintf("%s:%s", ctx->filter->name,
638  avfilter_pad_get_name(pads, inout->pad_idx));
639 }
640 
641 static const char *ofilter_item_name(void *obj)
642 {
643  OutputFilterPriv *ofp = obj;
644  return ofp->log_name;
645 }
646 
647 static const AVClass ofilter_class = {
648  .class_name = "OutputFilter",
649  .version = LIBAVUTIL_VERSION_INT,
650  .item_name = ofilter_item_name,
651  .parent_log_context_offset = offsetof(OutputFilterPriv, log_parent),
652  .category = AV_CLASS_CATEGORY_FILTER,
653 };
654 
656 {
657  OutputFilterPriv *ofp;
658  OutputFilter *ofilter;
659 
660  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
661  if (!ofp)
662  return NULL;
663 
664  ofilter = &ofp->ofilter;
665  ofilter->class = &ofilter_class;
666  ofp->log_parent = fg;
667  ofilter->graph = fg;
668  ofilter->type = type;
669  ofp->format = -1;
673  ofilter->index = fg->nb_outputs - 1;
674 
675  snprintf(ofp->log_name, sizeof(ofp->log_name), "%co%d",
676  av_get_media_type_string(type)[0], ofilter->index);
677 
678  return ofilter;
679 }
680 
681 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist,
682  const ViewSpecifier *vs)
683 {
684  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
685  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
687  int ret;
688 
689  av_assert0(!ifp->bound);
690  ifp->bound = 1;
691 
692  if (ifilter->type != ist->par->codec_type &&
693  !(ifilter->type == AVMEDIA_TYPE_VIDEO && ist->par->codec_type == AVMEDIA_TYPE_SUBTITLE)) {
694  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
696  return AVERROR(EINVAL);
697  }
698 
699  ifp->type_src = ist->st->codecpar->codec_type;
700 
701  ifp->opts.fallback = av_frame_alloc();
702  if (!ifp->opts.fallback)
703  return AVERROR(ENOMEM);
704 
705  ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
706  vs, &ifp->opts, &src);
707  if (ret < 0)
708  return ret;
709 
710  ifilter->input_name = av_strdup(ifp->opts.name);
711  if (!ifilter->input_name)
712  return AVERROR(EINVAL);
713 
714  ret = sch_connect(fgp->sch,
715  src, SCH_FILTER_IN(fgp->sch_idx, ifilter->index));
716  if (ret < 0)
717  return ret;
718 
719  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
720  ifp->sub2video.frame = av_frame_alloc();
721  if (!ifp->sub2video.frame)
722  return AVERROR(ENOMEM);
723 
724  ifp->width = ifp->opts.sub2video_width;
725  ifp->height = ifp->opts.sub2video_height;
726 
727  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
728  palettes for all rectangles are identical or compatible */
729  ifp->format = AV_PIX_FMT_RGB32;
730 
731  ifp->time_base = AV_TIME_BASE_Q;
732 
733  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
734  ifp->width, ifp->height);
735  }
736 
737  return 0;
738 }
739 
741  const ViewSpecifier *vs)
742 {
745  int ret;
746 
747  av_assert0(!ifp->bound);
748  ifp->bound = 1;
749 
750  if (ifp->ifilter.type != dec->type) {
751  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
753  return AVERROR(EINVAL);
754  }
755 
756  ifp->type_src = ifp->ifilter.type;
757 
758  ret = dec_filter_add(dec, &ifp->ifilter, &ifp->opts, vs, &src);
759  if (ret < 0)
760  return ret;
761 
762  ifp->ifilter.input_name = av_strdup(ifp->opts.name);
763  if (!ifp->ifilter.input_name)
764  return AVERROR(EINVAL);
765 
766  ret = sch_connect(fgp->sch, src, SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index));
767  if (ret < 0)
768  return ret;
769 
770  return 0;
771 }
772 
773 static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed,
774  const AVChannelLayout *layout_requested)
775 {
776  int i, err;
777 
778  if (layout_requested->order != AV_CHANNEL_ORDER_UNSPEC) {
779  /* Pass the layout through for all orders but UNSPEC */
780  err = av_channel_layout_copy(&f->ch_layout, layout_requested);
781  if (err < 0)
782  return err;
783  return 0;
784  }
785 
786  /* Requested layout is of order UNSPEC */
787  if (!layouts_allowed) {
788  /* Use the default native layout for the requested amount of channels when the
789  encoder doesn't have a list of supported layouts */
790  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
791  return 0;
792  }
793  /* Encoder has a list of supported layouts. Pick the first layout in it with the
794  same amount of channels as the requested layout */
795  for (i = 0; layouts_allowed[i].nb_channels; i++) {
796  if (layouts_allowed[i].nb_channels == layout_requested->nb_channels)
797  break;
798  }
799  if (layouts_allowed[i].nb_channels) {
800  /* Use it if one is found */
801  err = av_channel_layout_copy(&f->ch_layout, &layouts_allowed[i]);
802  if (err < 0)
803  return err;
804  return 0;
805  }
806  /* If no layout for the amount of channels requested was found, use the default
807  native layout for it. */
808  av_channel_layout_default(&f->ch_layout, layout_requested->nb_channels);
809 
810  return 0;
811 }
812 
813 int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc,
814  const OutputFilterOptions *opts)
815 {
816  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
817  FilterGraph *fg = ofilter->graph;
818  FilterGraphPriv *fgp = fgp_from_fg(fg);
819  int ret;
820 
821  av_assert0(!ofilter->bound);
822  av_assert0(!opts->enc ||
823  ofilter->type == opts->enc->type);
824 
825  ofp->needed = ofilter->bound = 1;
826  av_freep(&ofilter->linklabel);
827 
828  ofp->flags |= opts->flags;
829  ofp->ts_offset = opts->ts_offset;
830  ofp->enc_timebase = opts->output_tb;
831 
832  ofp->trim_start_us = opts->trim_start_us;
833  ofp->trim_duration_us = opts->trim_duration_us;
834 
835  ofilter->output_name = av_strdup(opts->name);
836  if (!ofilter->output_name)
837  return AVERROR(EINVAL);
838 
839  ret = av_dict_copy(&ofp->sws_opts, opts->sws_opts, 0);
840  if (ret < 0)
841  return ret;
842 
843  ret = av_dict_copy(&ofp->swr_opts, opts->swr_opts, 0);
844  if (ret < 0)
845  return ret;
846 
847  if (opts->flags & OFILTER_FLAG_AUDIO_24BIT)
848  av_dict_set(&ofp->swr_opts, "output_sample_bits", "24", 0);
849 
850  if (fgp->is_simple) {
851  // for simple filtergraph there is just one output,
852  // so use only graph-level information for logging
853  ofp->log_parent = NULL;
854  av_strlcpy(ofp->log_name, fgp->log_name, sizeof(ofp->log_name));
855  } else
856  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name);
857 
858  switch (ofilter->type) {
859  case AVMEDIA_TYPE_VIDEO:
860  ofp->width = opts->width;
861  ofp->height = opts->height;
862  if (opts->format != AV_PIX_FMT_NONE) {
863  ofp->format = opts->format;
864  } else
865  ofp->pix_fmts = opts->pix_fmts;
866 
867  if (opts->color_space != AVCOL_SPC_UNSPECIFIED)
868  ofp->color_space = opts->color_space;
869  else
870  ofp->color_spaces = opts->color_spaces;
871 
872  if (opts->color_range != AVCOL_RANGE_UNSPECIFIED)
873  ofp->color_range = opts->color_range;
874  else
875  ofp->color_ranges = opts->color_ranges;
876 
877  if (opts->alpha_mode != AVALPHA_MODE_UNSPECIFIED)
878  ofp->alpha_mode = opts->alpha_mode;
879  else
880  ofp->alpha_modes = opts->alpha_modes;
881 
883 
884  ofp->fps.last_frame = av_frame_alloc();
885  if (!ofp->fps.last_frame)
886  return AVERROR(ENOMEM);
887 
888  ofp->fps.vsync_method = opts->vsync_method;
889  ofp->fps.framerate = opts->frame_rate;
890  ofp->fps.framerate_max = opts->max_frame_rate;
891  ofp->fps.framerate_supported = opts->frame_rates;
892 
893  // reduce frame rate for mpeg4 to be within the spec limits
894  if (opts->enc && opts->enc->id == AV_CODEC_ID_MPEG4)
895  ofp->fps.framerate_clip = 65535;
896 
897  ofp->fps.dup_warning = 1000;
898 
899  break;
900  case AVMEDIA_TYPE_AUDIO:
901  if (opts->format != AV_SAMPLE_FMT_NONE) {
902  ofp->format = opts->format;
903  } else {
904  ofp->sample_fmts = opts->sample_fmts;
905  }
906  if (opts->sample_rate) {
907  ofp->sample_rate = opts->sample_rate;
908  } else
909  ofp->sample_rates = opts->sample_rates;
910  if (opts->ch_layout.nb_channels) {
911  int ret = set_channel_layout(ofp, opts->ch_layouts, &opts->ch_layout);
912  if (ret < 0)
913  return ret;
914  } else {
915  ofp->ch_layouts = opts->ch_layouts;
916  }
917  break;
918  }
919 
920  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofilter->index),
921  SCH_ENC(sched_idx_enc));
922  if (ret < 0)
923  return ret;
924 
925  return 0;
926 }
927 
929  const OutputFilterOptions *opts)
930 {
931  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
932 
933  av_assert0(!ofilter->bound);
934  av_assert0(ofilter->type == ifp->ifilter.type);
935 
936  ofp->needed = ofilter->bound = 1;
937  av_freep(&ofilter->linklabel);
938 
939  ofilter->output_name = av_strdup(opts->name);
940  if (!ofilter->output_name)
941  return AVERROR(EINVAL);
942 
943  ifp->ofilter_src = ofilter;
944 
945  av_strlcatf(ofp->log_name, sizeof(ofp->log_name), "->%s", ofilter->output_name);
946 
947  return 0;
948 }
949 
950 static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
951 {
953  OutputFilter *ofilter_src = fg_src->outputs[out_idx];
955  char name[32];
956  int ret;
957 
958  av_assert0(!ifp->bound);
959  ifp->bound = 1;
960 
961  if (ifp->ifilter.type != ofilter_src->type) {
962  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s output to %s input\n",
963  av_get_media_type_string(ofilter_src->type),
965  return AVERROR(EINVAL);
966  }
967 
968  ifp->type_src = ifp->ifilter.type;
969 
970  memset(&opts, 0, sizeof(opts));
971 
972  snprintf(name, sizeof(name), "fg:%d:%d", fgp->fg.index, ifp->ifilter.index);
973  opts.name = name;
974 
975  ret = ofilter_bind_ifilter(ofilter_src, ifp, &opts);
976  if (ret < 0)
977  return ret;
978 
979  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fg_src->index, out_idx),
980  SCH_FILTER_IN(fgp->sch_idx, ifp->ifilter.index));
981  if (ret < 0)
982  return ret;
983 
984  return 0;
985 }
986 
988 {
989  InputFilterPriv *ifp;
990  InputFilter *ifilter;
991 
992  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
993  if (!ifp)
994  return NULL;
995 
996  ifilter = &ifp->ifilter;
997  ifilter->graph = fg;
998 
999  ifp->frame = av_frame_alloc();
1000  if (!ifp->frame)
1001  return NULL;
1002 
1003  ifilter->index = fg->nb_inputs - 1;
1004  ifp->format = -1;
1008 
1010  if (!ifp->frame_queue)
1011  return NULL;
1012 
1013  return ifilter;
1014 }
1015 
1017 {
1018  FilterGraph *fg = *pfg;
1019  FilterGraphPriv *fgp;
1020 
1021  if (!fg)
1022  return;
1023  fgp = fgp_from_fg(fg);
1024 
1025  for (int j = 0; j < fg->nb_inputs; j++) {
1026  InputFilter *ifilter = fg->inputs[j];
1027  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1028 
1029  if (ifp->frame_queue) {
1030  AVFrame *frame;
1031  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
1032  av_frame_free(&frame);
1033  av_fifo_freep2(&ifp->frame_queue);
1034  }
1035  av_frame_free(&ifp->sub2video.frame);
1036 
1037  av_frame_free(&ifp->frame);
1038  av_frame_free(&ifp->opts.fallback);
1039 
1041  av_freep(&ifilter->linklabel);
1042  av_freep(&ifp->opts.name);
1044  av_freep(&ifilter->name);
1045  av_freep(&ifilter->input_name);
1046  av_freep(&fg->inputs[j]);
1047  }
1048  av_freep(&fg->inputs);
1049  for (int j = 0; j < fg->nb_outputs; j++) {
1050  OutputFilter *ofilter = fg->outputs[j];
1051  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1052 
1053  av_frame_free(&ofp->fps.last_frame);
1054  av_dict_free(&ofp->sws_opts);
1055  av_dict_free(&ofp->swr_opts);
1056 
1057  av_freep(&ofilter->linklabel);
1058  av_freep(&ofilter->name);
1059  av_freep(&ofilter->output_name);
1060  av_freep(&ofilter->apad);
1063  av_freep(&fg->outputs[j]);
1064  }
1065  av_freep(&fg->outputs);
1066  av_freep(&fg->graph_desc);
1067 
1068  av_frame_free(&fgp->frame);
1069  av_frame_free(&fgp->frame_enc);
1070 
1071  av_freep(pfg);
1072 }
1073 
1074 static const char *fg_item_name(void *obj)
1075 {
1076  const FilterGraphPriv *fgp = obj;
1077 
1078  return fgp->log_name;
1079 }
1080 
1081 static const AVClass fg_class = {
1082  .class_name = "FilterGraph",
1083  .version = LIBAVUTIL_VERSION_INT,
1084  .item_name = fg_item_name,
1085  .category = AV_CLASS_CATEGORY_FILTER,
1086 };
1087 
1088 int fg_create(FilterGraph **pfg, char **graph_desc, Scheduler *sch,
1089  const OutputFilterOptions *opts)
1090 {
1091  FilterGraphPriv *fgp;
1092  FilterGraph *fg;
1093 
1095  AVFilterGraph *graph;
1096  int ret = 0;
1097 
1098  fgp = av_mallocz(sizeof(*fgp));
1099  if (!fgp) {
1100  av_freep(graph_desc);
1101  return AVERROR(ENOMEM);
1102  }
1103  fg = &fgp->fg;
1104 
1105  if (pfg) {
1106  *pfg = fg;
1107  fg->index = -1;
1108  } else {
1110  if (ret < 0) {
1111  av_freep(graph_desc);
1112  av_freep(&fgp);
1113  return ret;
1114  }
1115 
1116  fg->index = nb_filtergraphs - 1;
1117  }
1118 
1119  fg->class = &fg_class;
1120  fg->graph_desc = *graph_desc;
1122  fgp->nb_threads = -1;
1123  fgp->sch = sch;
1124 
1125  *graph_desc = NULL;
1126 
1127  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
1128 
1129  fgp->frame = av_frame_alloc();
1130  fgp->frame_enc = av_frame_alloc();
1131  if (!fgp->frame || !fgp->frame_enc)
1132  return AVERROR(ENOMEM);
1133 
1134  /* this graph is only used for determining the kinds of inputs
1135  * and outputs we have, and is discarded on exit from this function */
1136  graph = avfilter_graph_alloc();
1137  if (!graph)
1138  return AVERROR(ENOMEM);;
1139  graph->nb_threads = 1;
1140 
1141  ret = graph_parse(fg, graph, fg->graph_desc, &inputs, &outputs,
1143  if (ret < 0)
1144  goto fail;
1145 
1146  for (unsigned i = 0; i < graph->nb_filters; i++) {
1147  const AVFilter *f = graph->filters[i]->filter;
1148  if ((!avfilter_filter_pad_count(f, 0) &&
1149  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) ||
1150  !strcmp(f->name, "apad")) {
1151  fgp->have_sources = 1;
1152  break;
1153  }
1154  }
1155 
1156  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1157  InputFilter *const ifilter = ifilter_alloc(fg);
1158 
1159  if (!ifilter) {
1160  ret = AVERROR(ENOMEM);
1161  goto fail;
1162  }
1163 
1164  ifilter->linklabel = cur->name;
1165  cur->name = NULL;
1166 
1167  ifilter->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1168  cur->pad_idx);
1169 
1170  if (ifilter->type != AVMEDIA_TYPE_VIDEO && ifilter->type != AVMEDIA_TYPE_AUDIO) {
1171  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1172  "currently.\n");
1173  ret = AVERROR(ENOSYS);
1174  goto fail;
1175  }
1176 
1177  ifilter->name = describe_filter_link(fg, cur, 1);
1178  if (!ifilter->name) {
1179  ret = AVERROR(ENOMEM);
1180  goto fail;
1181  }
1182  }
1183 
1184  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1185  const enum AVMediaType type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1186  cur->pad_idx);
1187  OutputFilter *const ofilter = ofilter_alloc(fg, type);
1188  OutputFilterPriv *ofp;
1189 
1190  if (!ofilter) {
1191  ret = AVERROR(ENOMEM);
1192  goto fail;
1193  }
1194  ofp = ofp_from_ofilter(ofilter);
1195 
1196  ofilter->linklabel = cur->name;
1197  cur->name = NULL;
1198 
1199  ofilter->name = describe_filter_link(fg, cur, 0);
1200  if (!ofilter->name) {
1201  ret = AVERROR(ENOMEM);
1202  goto fail;
1203  }
1204 
1205  // opts should only be needed in this function to fill fields from filtergraphs
1206  // whose output is meant to be treated as if it was stream, e.g. merged HEIF
1207  // tile groups.
1208  if (opts) {
1209  ofp->flags = opts->flags;
1210  ofp->side_data = opts->side_data;
1211  ofp->nb_side_data = opts->nb_side_data;
1212 
1213  ofp->crop_top = opts->crop_top;
1214  ofp->crop_bottom = opts->crop_bottom;
1215  ofp->crop_left = opts->crop_left;
1216  ofp->crop_right = opts->crop_right;
1217 
1220  if (sd)
1221  memcpy(ofp->displaymatrix, sd->data, sizeof(ofp->displaymatrix));
1222  }
1223  }
1224 
1225  if (!fg->nb_outputs) {
1226  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1227  ret = AVERROR(ENOSYS);
1228  goto fail;
1229  }
1230 
1231  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1232  filter_thread, fgp);
1233  if (ret < 0)
1234  goto fail;
1235  fgp->sch_idx = ret;
1236 
1237 fail:
1240  avfilter_graph_free(&graph);
1241 
1242  if (ret < 0)
1243  return ret;
1244 
1245  return 0;
1246 }
1247 
1249  InputStream *ist,
1250  char **graph_desc,
1251  Scheduler *sch, unsigned sched_idx_enc,
1252  const OutputFilterOptions *opts)
1253 {
1254  const enum AVMediaType type = ist->par->codec_type;
1255  FilterGraph *fg;
1256  FilterGraphPriv *fgp;
1257  int ret;
1258 
1259  ret = fg_create(pfg, graph_desc, sch, NULL);
1260  if (ret < 0)
1261  return ret;
1262  fg = *pfg;
1263  fgp = fgp_from_fg(fg);
1264 
1265  fgp->is_simple = 1;
1266 
1267  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf%s",
1268  av_get_media_type_string(type)[0], opts->name);
1269 
1270  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1271  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1272  "to have exactly 1 input and 1 output. "
1273  "However, it had %d input(s) and %d output(s). Please adjust, "
1274  "or use a complex filtergraph (-filter_complex) instead.\n",
1275  *graph_desc, fg->nb_inputs, fg->nb_outputs);
1276  return AVERROR(EINVAL);
1277  }
1278  if (fg->outputs[0]->type != type) {
1279  av_log(fg, AV_LOG_ERROR, "Filtergraph has a %s output, cannot connect "
1280  "it to %s output stream\n",
1283  return AVERROR(EINVAL);
1284  }
1285 
1286  ret = ifilter_bind_ist(fg->inputs[0], ist, opts->vs);
1287  if (ret < 0)
1288  return ret;
1289 
1290  ret = ofilter_bind_enc(fg->outputs[0], sched_idx_enc, opts);
1291  if (ret < 0)
1292  return ret;
1293 
1294  if (opts->nb_threads >= 0)
1295  fgp->nb_threads = opts->nb_threads;
1296 
1297  return 0;
1298 }
1299 
1300 static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter, int commit)
1301 {
1302  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1303  InputStream *ist = NULL;
1304  enum AVMediaType type = ifilter->type;
1306  const char *spec;
1307  char *p;
1308  int i, ret;
1309 
1310  if (ifilter->linklabel && !strncmp(ifilter->linklabel, "dec:", 4)) {
1311  // bind to a standalone decoder
1312  int dec_idx;
1313 
1314  dec_idx = strtol(ifilter->linklabel + 4, &p, 0);
1315  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1316  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1317  dec_idx, fg->graph_desc);
1318  return AVERROR(EINVAL);
1319  }
1320 
1321  if (type == AVMEDIA_TYPE_VIDEO) {
1322  spec = *p == ':' ? p + 1 : p;
1323  ret = view_specifier_parse(&spec, &vs);
1324  if (ret < 0)
1325  return ret;
1326  }
1327 
1328  ret = ifilter_bind_dec(ifp, decoders[dec_idx], &vs);
1329  if (ret < 0)
1330  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1331  ifilter->name);
1332  return ret;
1333  } else if (ifilter->linklabel) {
1335  AVFormatContext *s;
1336  AVStream *st = NULL;
1337  int file_idx;
1338 
1339  // try finding an unbound filtergraph output with this label
1340  for (int i = 0; i < nb_filtergraphs; i++) {
1341  FilterGraph *fg_src = filtergraphs[i];
1342 
1343  if (fg == fg_src)
1344  continue;
1345 
1346  for (int j = 0; j < fg_src->nb_outputs; j++) {
1347  OutputFilter *ofilter = fg_src->outputs[j];
1348 
1349  if (!ofilter->bound && ofilter->linklabel &&
1350  !strcmp(ofilter->linklabel, ifilter->linklabel)) {
1351  if (commit) {
1352  av_log(fg, AV_LOG_VERBOSE,
1353  "Binding input with label '%s' to filtergraph output %d:%d\n",
1354  ifilter->linklabel, i, j);
1355 
1356  ret = ifilter_bind_fg(ifp, fg_src, j);
1357  if (ret < 0) {
1358  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %s\n",
1359  ifilter->linklabel);
1360  return ret;
1361  }
1362  } else
1363  ofp_from_ofilter(ofilter)->needed = 1;
1364  return 0;
1365  }
1366  }
1367  }
1368 
1369  // bind to an explicitly specified demuxer stream
1370  file_idx = strtol(ifilter->linklabel, &p, 0);
1371  if (file_idx < 0 || file_idx >= nb_input_files) {
1372  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1373  file_idx, fg->graph_desc);
1374  return AVERROR(EINVAL);
1375  }
1376  s = input_files[file_idx]->ctx;
1377 
1378  ret = stream_specifier_parse(&ss, *p == ':' ? p + 1 : p, 1, fg);
1379  if (ret < 0) {
1380  av_log(fg, AV_LOG_ERROR, "Invalid stream specifier: %s\n", p);
1381  return ret;
1382  }
1383 
1384  if (type == AVMEDIA_TYPE_VIDEO) {
1385  spec = ss.remainder ? ss.remainder : "";
1386  ret = view_specifier_parse(&spec, &vs);
1387  if (ret < 0) {
1389  return ret;
1390  }
1391  }
1392 
1393  for (i = 0; i < s->nb_streams; i++) {
1394  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1395  if (stream_type != type &&
1396  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1397  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1398  continue;
1399  if (stream_specifier_match(&ss, s, s->streams[i], fg)) {
1400  st = s->streams[i];
1401  break;
1402  }
1403  }
1405  if (!st) {
1406  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1407  "matches no streams.\n", p, fg->graph_desc);
1408  return AVERROR(EINVAL);
1409  }
1410  ist = input_files[file_idx]->streams[st->index];
1411 
1412  if (commit)
1413  av_log(fg, AV_LOG_VERBOSE,
1414  "Binding input with label '%s' to input stream %d:%d\n",
1415  ifilter->linklabel, ist->file->index, ist->index);
1416  } else {
1417  // try finding an unbound filtergraph output
1418  for (int i = 0; i < nb_filtergraphs; i++) {
1419  FilterGraph *fg_src = filtergraphs[i];
1420 
1421  if (fg == fg_src)
1422  continue;
1423 
1424  for (int j = 0; j < fg_src->nb_outputs; j++) {
1425  OutputFilter *ofilter = fg_src->outputs[j];
1426 
1427  if (!ofilter->bound) {
1428  if (commit) {
1429  av_log(fg, AV_LOG_VERBOSE,
1430  "Binding unlabeled filtergraph input to filtergraph output %d:%d\n", i, j);
1431 
1432  ret = ifilter_bind_fg(ifp, fg_src, j);
1433  if (ret < 0) {
1434  av_log(fg, AV_LOG_ERROR, "Error binding filtergraph input %d:%d\n", i, j);
1435  return ret;
1436  }
1437  } else
1438  ofp_from_ofilter(ofilter)->needed = 1;
1439  return 0;
1440  }
1441  }
1442  }
1443 
1444  ist = ist_find_unused(type);
1445  if (!ist) {
1446  av_log(fg, AV_LOG_FATAL,
1447  "Cannot find an unused %s input stream to feed the "
1448  "unlabeled input pad %s.\n",
1449  av_get_media_type_string(type), ifilter->name);
1450  return AVERROR(EINVAL);
1451  }
1452 
1453  if (commit)
1454  av_log(fg, AV_LOG_VERBOSE,
1455  "Binding unlabeled input %d to input stream %d:%d\n",
1456  ifilter->index, ist->file->index, ist->index);
1457  }
1458  av_assert0(ist);
1459 
1460  if (commit) {
1461  ret = ifilter_bind_ist(ifilter, ist, &vs);
1462  if (ret < 0) {
1463  av_log(fg, AV_LOG_ERROR,
1464  "Error binding an input stream to complex filtergraph input %s.\n",
1465  ifilter->name);
1466  return ret;
1467  }
1468  }
1469 
1470  return 0;
1471 }
1472 
1473 static int bind_inputs(FilterGraph *fg, int commit)
1474 {
1475  // bind filtergraph inputs to input streams or other filtergraphs
1476  for (int i = 0; i < fg->nb_inputs; i++) {
1478  int ret;
1479 
1480  if (ifp->bound)
1481  continue;
1482 
1483  ret = fg_complex_bind_input(fg, &ifp->ifilter, commit);
1484  if (ret < 0)
1485  return ret;
1486  }
1487 
1488  return 0;
1489 }
1490 
1492 {
1493  int ret;
1494 
1495  for (int i = 0; i < nb_filtergraphs; i++) {
1496  ret = bind_inputs(filtergraphs[i], 0);
1497  if (ret < 0)
1498  return ret;
1499  }
1500 
1501  // check that all outputs were bound
1502  for (int i = nb_filtergraphs - 1; i >= 0; i--) {
1503  FilterGraph *fg = filtergraphs[i];
1505 
1506  for (int j = 0; j < fg->nb_outputs; j++) {
1507  OutputFilter *output = fg->outputs[j];
1508  if (!ofp_from_ofilter(output)->needed) {
1509  if (!fg->is_internal) {
1510  av_log(fg, AV_LOG_FATAL,
1511  "Filter '%s' has output %d (%s) unconnected\n",
1512  output->name, j,
1513  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1514  return AVERROR(EINVAL);
1515  }
1516 
1517  av_log(fg, AV_LOG_DEBUG,
1518  "Internal filter '%s' has output %d (%s) unconnected. Removing graph\n",
1519  output->name, j,
1520  output->linklabel ? (const char *)output->linklabel : "unlabeled");
1521  sch_remove_filtergraph(fgp->sch, fgp->sch_idx);
1522  fg_free(&filtergraphs[i]);
1523  nb_filtergraphs--;
1524  if (nb_filtergraphs > 0)
1525  memmove(&filtergraphs[i],
1526  &filtergraphs[i + 1],
1527  (nb_filtergraphs - i) * sizeof(*filtergraphs));
1528  break;
1529  }
1530  }
1531  }
1532 
1533  for (int i = 0; i < nb_filtergraphs; i++) {
1534  ret = bind_inputs(filtergraphs[i], 1);
1535  if (ret < 0)
1536  return ret;
1537  }
1538 
1539  return 0;
1540 }
1541 
1542 static int insert_trim(void *logctx, int64_t start_time, int64_t duration,
1543  AVFilterContext **last_filter, int *pad_idx,
1544  const char *filter_name)
1545 {
1546  AVFilterGraph *graph = (*last_filter)->graph;
1548  const AVFilter *trim;
1549  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1550  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1551  int ret = 0;
1552 
1553  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1554  return 0;
1555 
1556  trim = avfilter_get_by_name(name);
1557  if (!trim) {
1558  av_log(logctx, AV_LOG_ERROR, "%s filter not present, cannot limit "
1559  "recording time.\n", name);
1560  return AVERROR_FILTER_NOT_FOUND;
1561  }
1562 
1563  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1564  if (!ctx)
1565  return AVERROR(ENOMEM);
1566 
1567  if (duration != INT64_MAX) {
1568  ret = av_opt_set_int(ctx, "durationi", duration,
1570  }
1571  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1572  ret = av_opt_set_int(ctx, "starti", start_time,
1574  }
1575  if (ret < 0) {
1576  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1577  return ret;
1578  }
1579 
1581  if (ret < 0)
1582  return ret;
1583 
1584  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1585  if (ret < 0)
1586  return ret;
1587 
1588  *last_filter = ctx;
1589  *pad_idx = 0;
1590  return 0;
1591 }
1592 
1593 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1594  const char *filter_name, const char *args)
1595 {
1596  AVFilterGraph *graph = (*last_filter)->graph;
1597  const AVFilter *filter = avfilter_get_by_name(filter_name);
1599  int ret;
1600 
1601  if (!filter)
1602  return AVERROR_BUG;
1603 
1605  filter,
1606  filter_name, args, NULL, graph);
1607  if (ret < 0)
1608  return ret;
1609 
1610  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1611  if (ret < 0)
1612  return ret;
1613 
1614  *last_filter = ctx;
1615  *pad_idx = 0;
1616  return 0;
1617 }
1618 
1620  OutputFilter *ofilter, AVFilterInOut *out)
1621 {
1622  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1623  AVFilterContext *last_filter = out->filter_ctx;
1624  AVBPrint bprint;
1625  int pad_idx = out->pad_idx;
1626  int ret;
1627  char name[255];
1628 
1629  snprintf(name, sizeof(name), "out_%s", ofilter->output_name);
1631  avfilter_get_by_name("buffersink"),
1632  name, NULL, NULL, graph);
1633 
1634  if (ret < 0)
1635  return ret;
1636 
1637  if (ofp->flags & OFILTER_FLAG_CROP) {
1638  char crop_buf[64];
1639  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1640  ofp->crop_left, ofp->crop_right,
1641  ofp->crop_top, ofp->crop_bottom,
1642  ofp->crop_left, ofp->crop_top);
1643  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1644  if (ret < 0)
1645  return ret;
1646  }
1647 
1648  if (ofp->flags & OFILTER_FLAG_AUTOROTATE) {
1649  int32_t *displaymatrix = ofp->displaymatrix;
1650  double theta;
1651 
1652  theta = get_rotation(displaymatrix);
1653 
1654  if (fabs(theta - 90) < 1.0) {
1655  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1656  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1657  } else if (fabs(theta - 180) < 1.0) {
1658  if (displaymatrix[0] < 0) {
1659  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1660  if (ret < 0)
1661  return ret;
1662  }
1663  if (displaymatrix[4] < 0) {
1664  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1665  }
1666  } else if (fabs(theta - 270) < 1.0) {
1667  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1668  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1669  } else if (fabs(theta) > 1.0) {
1670  char rotate_buf[64];
1671  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1672  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1673  } else if (fabs(theta) < 1.0) {
1674  if (displaymatrix && displaymatrix[4] < 0) {
1675  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1676  }
1677  }
1678  if (ret < 0)
1679  return ret;
1680 
1682  }
1683 
1684  if ((ofp->width || ofp->height) && (ofp->flags & OFILTER_FLAG_AUTOSCALE) &&
1685  // skip add scale for hardware format
1686  !(ofp->format != AV_PIX_FMT_NONE &&
1688  char args[255];
1690  const AVDictionaryEntry *e = NULL;
1691 
1692  snprintf(args, sizeof(args), "%d:%d",
1693  ofp->width, ofp->height);
1694 
1695  while ((e = av_dict_iterate(ofp->sws_opts, e))) {
1696  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1697  }
1698 
1699  snprintf(name, sizeof(name), "scaler_out_%s", ofilter->output_name);
1701  name, args, NULL, graph)) < 0)
1702  return ret;
1703  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1704  return ret;
1705 
1706  last_filter = filter;
1707  pad_idx = 0;
1708  }
1709 
1711  ofp->format != AV_PIX_FMT_NONE || !ofp->pix_fmts);
1713  choose_pix_fmts(ofp, &bprint);
1714  choose_color_spaces(ofp, &bprint);
1715  choose_color_ranges(ofp, &bprint);
1716  choose_alpha_modes(ofp, &bprint);
1717  if (!av_bprint_is_complete(&bprint))
1718  return AVERROR(ENOMEM);
1719 
1720  if (bprint.len) {
1722 
1724  avfilter_get_by_name("format"),
1725  "format", bprint.str, NULL, graph);
1726  av_bprint_finalize(&bprint, NULL);
1727  if (ret < 0)
1728  return ret;
1729  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1730  return ret;
1731 
1732  last_filter = filter;
1733  pad_idx = 0;
1734  }
1735 
1736  snprintf(name, sizeof(name), "trim_out_%s", ofilter->output_name);
1737  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1738  &last_filter, &pad_idx, name);
1739  if (ret < 0)
1740  return ret;
1741 
1742 
1743  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1744  return ret;
1745 
1746  return 0;
1747 }
1748 
1750  OutputFilter *ofilter, AVFilterInOut *out)
1751 {
1752  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1753  AVFilterContext *last_filter = out->filter_ctx;
1754  int pad_idx = out->pad_idx;
1755  AVBPrint args;
1756  char name[255];
1757  int ret;
1758 
1759  snprintf(name, sizeof(name), "out_%s", ofilter->output_name);
1761  avfilter_get_by_name("abuffersink"),
1762  name, NULL, NULL, graph);
1763  if (ret < 0)
1764  return ret;
1765 
1766 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1767  AVFilterContext *filt_ctx; \
1768  \
1769  av_log(ofilter, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1770  "similarly to -af " filter_name "=%s.\n", arg); \
1771  \
1772  ret = avfilter_graph_create_filter(&filt_ctx, \
1773  avfilter_get_by_name(filter_name), \
1774  filter_name, arg, NULL, graph); \
1775  if (ret < 0) \
1776  goto fail; \
1777  \
1778  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1779  if (ret < 0) \
1780  goto fail; \
1781  \
1782  last_filter = filt_ctx; \
1783  pad_idx = 0; \
1784 } while (0)
1786 
1787  choose_sample_fmts(ofp, &args);
1788  choose_sample_rates(ofp, &args);
1789  choose_channel_layouts(ofp, &args);
1790  if (!av_bprint_is_complete(&args)) {
1791  ret = AVERROR(ENOMEM);
1792  goto fail;
1793  }
1794  if (args.len) {
1796 
1797  snprintf(name, sizeof(name), "format_out_%s", ofilter->output_name);
1799  avfilter_get_by_name("aformat"),
1800  name, args.str, NULL, graph);
1801  if (ret < 0)
1802  goto fail;
1803 
1804  ret = avfilter_link(last_filter, pad_idx, format, 0);
1805  if (ret < 0)
1806  goto fail;
1807 
1808  last_filter = format;
1809  pad_idx = 0;
1810  }
1811 
1812  if (ofilter->apad) {
1813  AUTO_INSERT_FILTER("-apad", "apad", ofilter->apad);
1814  fgp->have_sources = 1;
1815  }
1816 
1817  snprintf(name, sizeof(name), "trim for output %s", ofilter->output_name);
1818  ret = insert_trim(fgp, ofp->trim_start_us, ofp->trim_duration_us,
1819  &last_filter, &pad_idx, name);
1820  if (ret < 0)
1821  goto fail;
1822 
1823  if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
1824  goto fail;
1825 fail:
1826  av_bprint_finalize(&args, NULL);
1827 
1828  return ret;
1829 }
1830 
1832  OutputFilter *ofilter, AVFilterInOut *out)
1833 {
1834  switch (ofilter->type) {
1835  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fgp, graph, ofilter, out);
1836  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fgp, graph, ofilter, out);
1837  default: av_assert0(0); return 0;
1838  }
1839 }
1840 
1842 {
1843  ifp->sub2video.last_pts = INT64_MIN;
1844  ifp->sub2video.end_pts = INT64_MIN;
1845 
1846  /* sub2video structure has been (re-)initialized.
1847  Mark it as such so that the system will be
1848  initialized with the first received heartbeat. */
1849  ifp->sub2video.initialize = 1;
1850 }
1851 
1853  InputFilter *ifilter, AVFilterInOut *in)
1854 {
1855  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1856 
1857  AVFilterContext *last_filter;
1858  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1859  const AVPixFmtDescriptor *desc;
1860  char name[255];
1861  int ret, pad_idx = 0;
1863  if (!par)
1864  return AVERROR(ENOMEM);
1865 
1866  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1867  sub2video_prepare(ifp);
1868 
1869  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1870  ifp->opts.name);
1871 
1872  ifilter->filter = avfilter_graph_alloc_filter(graph, buffer_filt, name);
1873  if (!ifilter->filter) {
1874  ret = AVERROR(ENOMEM);
1875  goto fail;
1876  }
1877 
1878  par->format = ifp->format;
1879  par->time_base = ifp->time_base;
1880  par->frame_rate = ifp->opts.framerate;
1881  par->width = ifp->width;
1882  par->height = ifp->height;
1883  par->sample_aspect_ratio = ifp->sample_aspect_ratio.den > 0 ?
1884  ifp->sample_aspect_ratio : (AVRational){ 0, 1 };
1885  par->color_space = ifp->color_space;
1886  par->color_range = ifp->color_range;
1887  par->alpha_mode = ifp->alpha_mode;
1888  par->hw_frames_ctx = ifp->hw_frames_ctx;
1889  par->side_data = ifp->side_data;
1890  par->nb_side_data = ifp->nb_side_data;
1891 
1892  ret = av_buffersrc_parameters_set(ifilter->filter, par);
1893  if (ret < 0)
1894  goto fail;
1895  av_freep(&par);
1896 
1897  ret = avfilter_init_dict(ifilter->filter, NULL);
1898  if (ret < 0)
1899  goto fail;
1900 
1901  last_filter = ifilter->filter;
1902 
1904  av_assert0(desc);
1905 
1906  if ((ifp->opts.flags & IFILTER_FLAG_CROP)) {
1907  char crop_buf[64];
1908  snprintf(crop_buf, sizeof(crop_buf), "w=iw-%u-%u:h=ih-%u-%u:x=%u:y=%u",
1909  ifp->opts.crop_left, ifp->opts.crop_right,
1910  ifp->opts.crop_top, ifp->opts.crop_bottom,
1911  ifp->opts.crop_left, ifp->opts.crop_top);
1912  ret = insert_filter(&last_filter, &pad_idx, "crop", crop_buf);
1913  if (ret < 0)
1914  return ret;
1915  }
1916 
1917  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1918  ifp->displaymatrix_applied = 0;
1919  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1920  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1921  int32_t *displaymatrix = ifp->displaymatrix;
1922  double theta;
1923 
1924  theta = get_rotation(displaymatrix);
1925 
1926  if (fabs(theta - 90) < 1.0) {
1927  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1928  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1929  } else if (fabs(theta - 180) < 1.0) {
1930  if (displaymatrix[0] < 0) {
1931  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1932  if (ret < 0)
1933  return ret;
1934  }
1935  if (displaymatrix[4] < 0) {
1936  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1937  }
1938  } else if (fabs(theta - 270) < 1.0) {
1939  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1940  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1941  } else if (fabs(theta) > 1.0) {
1942  char rotate_buf[64];
1943  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1944  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1945  } else if (fabs(theta) < 1.0) {
1946  if (displaymatrix && displaymatrix[4] < 0) {
1947  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1948  }
1949  }
1950  if (ret < 0)
1951  return ret;
1952 
1953  ifp->displaymatrix_applied = 1;
1954  }
1955 
1956  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1957  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
1958  &last_filter, &pad_idx, name);
1959  if (ret < 0)
1960  return ret;
1961 
1962  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1963  return ret;
1964  return 0;
1965 fail:
1966  av_freep(&par);
1967 
1968  return ret;
1969 }
1970 
1972  InputFilter *ifilter, AVFilterInOut *in)
1973 {
1974  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1975  AVFilterContext *last_filter;
1976  AVBufferSrcParameters *par;
1977  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1978  AVBPrint args;
1979  char name[255];
1980  int ret, pad_idx = 0;
1981 
1983  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1984  ifp->time_base.num, ifp->time_base.den,
1985  ifp->sample_rate,
1987  if (av_channel_layout_check(&ifp->ch_layout) &&
1989  av_bprintf(&args, ":channel_layout=");
1991  } else
1992  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1993  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1994 
1995  if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
1996  name, args.str, NULL,
1997  graph)) < 0)
1998  return ret;
2000  if (!par)
2001  return AVERROR(ENOMEM);
2002  par->side_data = ifp->side_data;
2003  par->nb_side_data = ifp->nb_side_data;
2004  ret = av_buffersrc_parameters_set(ifilter->filter, par);
2005  av_free(par);
2006  if (ret < 0)
2007  return ret;
2008  last_filter = ifilter->filter;
2009 
2010  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
2011  ret = insert_trim(fg, ifp->opts.trim_start_us, ifp->opts.trim_end_us,
2012  &last_filter, &pad_idx, name);
2013  if (ret < 0)
2014  return ret;
2015 
2016  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
2017  return ret;
2018 
2019  return 0;
2020 }
2021 
2023  InputFilter *ifilter, AVFilterInOut *in)
2024 {
2025  switch (ifilter->type) {
2026  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
2027  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
2028  default: av_assert0(0); return 0;
2029  }
2030 }
2031 
2033 {
2034  for (int i = 0; i < fg->nb_outputs; i++)
2035  fg->outputs[i]->filter = NULL;
2036  for (int i = 0; i < fg->nb_inputs; i++)
2037  fg->inputs[i]->filter = NULL;
2038  avfilter_graph_free(&fgt->graph);
2039 }
2040 
2042 {
2043  return f->nb_inputs == 0 &&
2044  (!strcmp(f->filter->name, "buffer") ||
2045  !strcmp(f->filter->name, "abuffer"));
2046 }
2047 
2048 static int graph_is_meta(AVFilterGraph *graph)
2049 {
2050  for (unsigned i = 0; i < graph->nb_filters; i++) {
2051  const AVFilterContext *f = graph->filters[i];
2052 
2053  /* in addition to filters flagged as meta, also
2054  * disregard sinks and buffersources (but not other sources,
2055  * since they introduce data we are not aware of)
2056  */
2057  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
2058  f->nb_outputs == 0 ||
2060  return 0;
2061  }
2062  return 1;
2063 }
2064 
2065 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
2066 
2068 {
2069  FilterGraphPriv *fgp = fgp_from_fg(fg);
2070  AVBufferRef *hw_device;
2071  AVFilterInOut *inputs, *outputs, *cur;
2072  int ret = AVERROR_BUG, i, simple = filtergraph_is_simple(fg);
2073  int have_input_eof = 0;
2074  const char *graph_desc = fg->graph_desc;
2075 
2076  cleanup_filtergraph(fg, fgt);
2077  fgt->graph = avfilter_graph_alloc();
2078  if (!fgt->graph)
2079  return AVERROR(ENOMEM);
2080 
2081  if (simple) {
2082  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
2083 
2084  if (filter_nbthreads) {
2085  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
2086  if (ret < 0)
2087  goto fail;
2088  } else if (fgp->nb_threads >= 0) {
2089  ret = av_opt_set_int(fgt->graph, "threads", fgp->nb_threads, 0);
2090  if (ret < 0)
2091  return ret;
2092  }
2093 
2094  if (av_dict_count(ofp->sws_opts)) {
2096  &fgt->graph->scale_sws_opts,
2097  '=', ':');
2098  if (ret < 0)
2099  goto fail;
2100  }
2101 
2102  if (av_dict_count(ofp->swr_opts)) {
2103  char *args;
2104  ret = av_dict_get_string(ofp->swr_opts, &args, '=', ':');
2105  if (ret < 0)
2106  goto fail;
2107  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
2108  av_free(args);
2109  }
2110  } else {
2112  }
2113 
2114  if (filter_buffered_frames) {
2115  ret = av_opt_set_int(fgt->graph, "max_buffered_frames", filter_buffered_frames, 0);
2116  if (ret < 0)
2117  return ret;
2118  }
2119 
2120  hw_device = hw_device_for_filter();
2121 
2122  ret = graph_parse(fg, fgt->graph, graph_desc, &inputs, &outputs, hw_device);
2123  if (ret < 0)
2124  goto fail;
2125 
2126  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
2127  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
2130  goto fail;
2131  }
2133 
2134  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
2135  ret = configure_output_filter(fgp, fgt->graph, fg->outputs[i], cur);
2136  if (ret < 0) {
2138  goto fail;
2139  }
2140  }
2142 
2143  if (fgp->disable_conversions)
2145  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
2146  goto fail;
2147 
2148  fgp->is_meta = graph_is_meta(fgt->graph);
2149 
2150  /* limit the lists of allowed formats to the ones selected, to
2151  * make sure they stay the same if the filtergraph is reconfigured later */
2152  for (int i = 0; i < fg->nb_outputs; i++) {
2153  const AVFrameSideData *const *sd;
2154  int nb_sd;
2155  OutputFilter *ofilter = fg->outputs[i];
2156  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
2157  AVFilterContext *sink = ofilter->filter;
2158 
2159  ofp->format = av_buffersink_get_format(sink);
2160 
2161  ofp->width = av_buffersink_get_w(sink);
2162  ofp->height = av_buffersink_get_h(sink);
2166 
2167  // If the timing parameters are not locked yet, get the tentative values
2168  // here but don't lock them. They will only be used if no output frames
2169  // are ever produced.
2170  if (!ofp->tb_out_locked) {
2172  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
2173  fr.num > 0 && fr.den > 0)
2174  ofp->fps.framerate = fr;
2175  ofp->tb_out = av_buffersink_get_time_base(sink);
2176  }
2178 
2181  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
2182  if (ret < 0)
2183  goto fail;
2184  sd = av_buffersink_get_side_data(sink, &nb_sd);
2185  if (nb_sd)
2186  for (int j = 0; j < nb_sd; j++) {
2189  if (ret < 0) {
2191  goto fail;
2192  }
2193  }
2194  }
2195 
2196  for (int i = 0; i < fg->nb_inputs; i++) {
2197  InputFilter *ifilter = fg->inputs[i];
2199  AVFrame *tmp;
2200  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
2201  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2202  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
2203  } else {
2204  if (ifp->type_src == AVMEDIA_TYPE_VIDEO) {
2205  if (ifp->displaymatrix_applied)
2207  }
2208  ret = av_buffersrc_add_frame(ifilter->filter, tmp);
2209  }
2210  av_frame_free(&tmp);
2211  if (ret < 0)
2212  goto fail;
2213  }
2214  }
2215 
2216  /* send the EOFs for the finished inputs */
2217  for (int i = 0; i < fg->nb_inputs; i++) {
2218  InputFilter *ifilter = fg->inputs[i];
2219  if (fgt->eof_in[i]) {
2220  ret = av_buffersrc_add_frame(ifilter->filter, NULL);
2221  if (ret < 0)
2222  goto fail;
2223  have_input_eof = 1;
2224  }
2225  }
2226 
2227  if (have_input_eof) {
2228  // make sure the EOF propagates to the end of the graph
2230  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2231  goto fail;
2232  }
2233 
2234  return 0;
2235 fail:
2236  cleanup_filtergraph(fg, fgt);
2237  return ret;
2238 }
2239 
2241 {
2242  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2243  AVFrameSideData *sd;
2244  int ret;
2245 
2246  ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
2247  if (ret < 0)
2248  return ret;
2249 
2250  ifp->time_base = (ifilter->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
2251  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
2252  frame->time_base;
2253 
2254  ifp->format = frame->format;
2255 
2256  ifp->width = frame->width;
2257  ifp->height = frame->height;
2258  ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
2259  ifp->color_space = frame->colorspace;
2260  ifp->color_range = frame->color_range;
2261  ifp->alpha_mode = frame->alpha_mode;
2262 
2263  ifp->sample_rate = frame->sample_rate;
2264  ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
2265  if (ret < 0)
2266  return ret;
2267 
2269  for (int i = 0; i < frame->nb_side_data; i++) {
2270  const AVSideDataDescriptor *desc = av_frame_side_data_desc(frame->side_data[i]->type);
2271 
2272  if (!(desc->props & AV_SIDE_DATA_PROP_GLOBAL) ||
2273  frame->side_data[i]->type == AV_FRAME_DATA_DISPLAYMATRIX)
2274  continue;
2275 
2277  &ifp->nb_side_data,
2278  frame->side_data[i], 0);
2279  if (ret < 0)
2280  return ret;
2281  }
2282 
2284  if (sd)
2285  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
2286  ifp->displaymatrix_present = !!sd;
2287 
2288  /* Copy downmix related side data to InputFilterPriv so it may be propagated
2289  * to the filter chain even though it's not "global", as filters like aresample
2290  * require this information during init and not when remixing a frame */
2292  if (sd) {
2294  &ifp->nb_side_data, sd, 0);
2295  if (ret < 0)
2296  return ret;
2297  memcpy(&ifp->downmixinfo, sd->data, sizeof(ifp->downmixinfo));
2298  }
2299  ifp->downmixinfo_present = !!sd;
2300 
2301  return 0;
2302 }
2303 
2305 {
2306  const OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
2307  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2308 
2309  if (!ifp->opts.framerate.num) {
2310  ifp->opts.framerate = ofp->fps.framerate;
2311  if (ifp->opts.framerate.num > 0 && ifp->opts.framerate.den > 0)
2312  ifp->opts.flags |= IFILTER_FLAG_CFR;
2313  }
2314 
2315  for (int i = 0; i < ofp->nb_side_data; i++) {
2318  if (ret < 0)
2319  return ret;
2320  }
2321 
2322  return 0;
2323 }
2324 
2326 {
2327  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
2328  return fgp->is_simple;
2329 }
2330 
2331 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
2332  double time, const char *target,
2333  const char *command, const char *arg, int all_filters)
2334 {
2335  int ret;
2336 
2337  if (!graph)
2338  return;
2339 
2340  if (time < 0) {
2341  char response[4096];
2342  ret = avfilter_graph_send_command(graph, target, command, arg,
2343  response, sizeof(response),
2344  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
2345  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
2346  fg->index, ret, response);
2347  } else if (!all_filters) {
2348  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
2349  } else {
2350  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
2351  if (ret < 0)
2352  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
2353  }
2354 }
2355 
2356 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
2357 {
2358  int nb_requests, nb_requests_max = -1;
2359  int best_input = -1;
2360 
2361  for (int i = 0; i < fg->nb_inputs; i++) {
2362  InputFilter *ifilter = fg->inputs[i];
2363 
2364  if (fgt->eof_in[i])
2365  continue;
2366 
2367  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
2368  if (nb_requests > nb_requests_max) {
2369  nb_requests_max = nb_requests;
2370  best_input = i;
2371  }
2372  }
2373 
2374  av_assert0(best_input >= 0);
2375 
2376  return best_input;
2377 }
2378 
2380 {
2381  OutputFilter *ofilter = &ofp->ofilter;
2382  FPSConvContext *fps = &ofp->fps;
2383  AVRational tb = (AVRational){ 0, 0 };
2384  AVRational fr;
2385  const FrameData *fd;
2386 
2387  fd = frame_data_c(frame);
2388 
2389  // apply -enc_time_base
2390  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
2391  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
2392  av_log(ofp, AV_LOG_ERROR,
2393  "Demuxing timebase not available - cannot use it for encoding\n");
2394  return AVERROR(EINVAL);
2395  }
2396 
2397  switch (ofp->enc_timebase.num) {
2398  case 0: break;
2399  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
2400  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
2401  default: tb = ofp->enc_timebase; break;
2402  }
2403 
2404  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
2405  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
2406  goto finish;
2407  }
2408 
2409  fr = fps->framerate;
2410  if (!fr.num) {
2411  AVRational fr_sink = av_buffersink_get_frame_rate(ofilter->filter);
2412  if (fr_sink.num > 0 && fr_sink.den > 0)
2413  fr = fr_sink;
2414  }
2415 
2416  if (fps->vsync_method == VSYNC_CFR || fps->vsync_method == VSYNC_VSCFR) {
2417  if (!fr.num && !fps->framerate_max.num) {
2418  fr = (AVRational){25, 1};
2419  av_log(ofp, AV_LOG_WARNING,
2420  "No information "
2421  "about the input framerate is available. Falling "
2422  "back to a default value of 25fps. Use the -r option "
2423  "if you want a different framerate.\n");
2424  }
2425 
2426  if (fps->framerate_max.num &&
2427  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
2428  !fr.den))
2429  fr = fps->framerate_max;
2430  }
2431 
2432  if (fr.num > 0) {
2433  if (fps->framerate_supported) {
2434  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2435  fr = fps->framerate_supported[idx];
2436  }
2437  if (fps->framerate_clip) {
2438  av_reduce(&fr.num, &fr.den,
2439  fr.num, fr.den, fps->framerate_clip);
2440  }
2441  }
2442 
2443  if (!(tb.num > 0 && tb.den > 0))
2444  tb = av_inv_q(fr);
2445  if (!(tb.num > 0 && tb.den > 0))
2446  tb = frame->time_base;
2447 
2448  fps->framerate = fr;
2449 finish:
2450  ofp->tb_out = tb;
2451  ofp->tb_out_locked = 1;
2452 
2453  return 0;
2454 }
2455 
2456 static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame,
2457  AVRational tb_dst, int64_t start_time)
2458 {
2459  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2460 
2461  AVRational tb = tb_dst;
2462  AVRational filter_tb = frame->time_base;
2463  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2464 
2465  if (frame->pts == AV_NOPTS_VALUE)
2466  goto early_exit;
2467 
2468  tb.den <<= extra_bits;
2469  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2471  float_pts /= 1 << extra_bits;
2472  // when float_pts is not exactly an integer,
2473  // avoid exact midpoints to reduce the chance of rounding differences, this
2474  // can be removed in case the fps code is changed to work with integers
2475  if (float_pts != llrint(float_pts))
2476  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2477 
2478  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2480  frame->time_base = tb_dst;
2481 
2482 early_exit:
2483 
2484  if (debug_ts) {
2485  av_log(logctx, AV_LOG_INFO,
2486  "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2487  frame ? av_ts2str(frame->pts) : "NULL",
2488  av_ts2timestr(frame->pts, &tb_dst),
2489  float_pts, tb_dst.num, tb_dst.den);
2490  }
2491 
2492  return float_pts;
2493 }
2494 
2496 {
2497  int64_t max2, min2, m;
2498 
2499  if (a >= b) {
2500  max2 = a;
2501  min2 = b;
2502  } else {
2503  max2 = b;
2504  min2 = a;
2505  }
2506  m = (c >= max2) ? max2 : c;
2507 
2508  return (m >= min2) ? m : min2;
2509 }
2510 
2511 
2512 /* Convert frame timestamps to the encoder timebase and decide how many times
2513  * should this (and possibly previous) frame be repeated in order to conform to
2514  * desired target framerate (if any).
2515  */
2517  int64_t *nb_frames, int64_t *nb_frames_prev)
2518 {
2519  OutputFilter *ofilter = &ofp->ofilter;
2520  FPSConvContext *fps = &ofp->fps;
2521  double delta0, delta, sync_ipts, duration;
2522 
2523  if (!frame) {
2524  *nb_frames_prev = *nb_frames = median3(fps->frames_prev_hist[0],
2525  fps->frames_prev_hist[1],
2526  fps->frames_prev_hist[2]);
2527 
2528  if (!*nb_frames && fps->last_dropped) {
2529  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2530  fps->last_dropped++;
2531  }
2532 
2533  goto finish;
2534  }
2535 
2536  duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
2537 
2538  sync_ipts = adjust_frame_pts_to_encoder_tb(ofilter->graph, frame,
2539  ofp->tb_out, ofp->ts_offset);
2540  /* delta0 is the "drift" between the input frame and
2541  * where it would fall in the output. */
2542  delta0 = sync_ipts - ofp->next_pts;
2543  delta = delta0 + duration;
2544 
2545  // tracks the number of times the PREVIOUS frame should be duplicated,
2546  // mostly for variable framerate (VFR)
2547  *nb_frames_prev = 0;
2548  /* by default, we output a single frame */
2549  *nb_frames = 1;
2550 
2551  if (delta0 < 0 &&
2552  delta > 0 &&
2555  && fps->vsync_method != VSYNC_DROP
2556 #endif
2557  ) {
2558  if (delta0 < -0.6) {
2559  av_log(ofp, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2560  } else
2561  av_log(ofp, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2562  sync_ipts = ofp->next_pts;
2563  duration += delta0;
2564  delta0 = 0;
2565  }
2566 
2567  switch (fps->vsync_method) {
2568  case VSYNC_VSCFR:
2569  if (fps->frame_number == 0 && delta0 >= 0.5) {
2570  av_log(ofp, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2571  delta = duration;
2572  delta0 = 0;
2573  ofp->next_pts = llrint(sync_ipts);
2574  }
2575  case VSYNC_CFR:
2576  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2577  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2578  *nb_frames = 0;
2579  } else if (delta < -1.1)
2580  *nb_frames = 0;
2581  else if (delta > 1.1) {
2582  *nb_frames = llrintf(delta);
2583  if (delta0 > 1.1)
2584  *nb_frames_prev = llrintf(delta0 - 0.6);
2585  }
2586  frame->duration = 1;
2587  break;
2588  case VSYNC_VFR:
2589  if (delta <= -0.6)
2590  *nb_frames = 0;
2591  else if (delta > 0.6)
2592  ofp->next_pts = llrint(sync_ipts);
2593  frame->duration = llrint(duration);
2594  break;
2595 #if FFMPEG_OPT_VSYNC_DROP
2596  case VSYNC_DROP:
2597 #endif
2598  case VSYNC_PASSTHROUGH:
2599  ofp->next_pts = llrint(sync_ipts);
2600  frame->duration = llrint(duration);
2601  break;
2602  default:
2603  av_assert0(0);
2604  }
2605 
2606 finish:
2607  memmove(fps->frames_prev_hist + 1,
2608  fps->frames_prev_hist,
2609  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2610  fps->frames_prev_hist[0] = *nb_frames_prev;
2611 
2612  if (*nb_frames_prev == 0 && fps->last_dropped) {
2613  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2614  av_log(ofp, AV_LOG_VERBOSE,
2615  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2616  fps->frame_number, fps->last_frame->pts);
2617  }
2618  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2619  uint64_t nb_frames_dup;
2620  if (*nb_frames > dts_error_threshold * 30) {
2621  av_log(ofp, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2622  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2623  *nb_frames = 0;
2624  return;
2625  }
2626  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2627  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2628  av_log(ofp, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2629  if (nb_frames_dup > fps->dup_warning) {
2630  av_log(ofp, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2631  fps->dup_warning *= 10;
2632  }
2633  }
2634 
2635  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2636  fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
2637 }
2638 
2639 static void close_input(InputFilterPriv *ifp)
2640 {
2642 
2643  if (!ifp->eof) {
2645  ifp->eof = 1;
2646  }
2647 }
2648 
2650 {
2652  int ret;
2653 
2654  // we are finished and no frames were ever seen at this output,
2655  // at least initialize the encoder with a dummy frame
2656  if (!fgt->got_frame) {
2657  AVFrame *frame = fgt->frame;
2658  FrameData *fd;
2659 
2660  frame->time_base = ofp->tb_out;
2661  frame->format = ofp->format;
2662 
2663  frame->width = ofp->width;
2664  frame->height = ofp->height;
2665  frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
2666 
2667  frame->sample_rate = ofp->sample_rate;
2668  if (ofp->ch_layout.nb_channels) {
2669  ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
2670  if (ret < 0)
2671  return ret;
2672  }
2673 
2674  fd = frame_data(frame);
2675  if (!fd)
2676  return AVERROR(ENOMEM);
2677 
2680  ofp->side_data, ofp->nb_side_data, 0);
2681  if (ret < 0)
2682  return ret;
2683 
2684  fd->frame_rate_filter = ofp->fps.framerate;
2685 
2686  av_assert0(!frame->buf[0]);
2687 
2688  av_log(ofp, AV_LOG_WARNING,
2689  "No filtered frames for output stream, trying to "
2690  "initialize anyway.\n");
2691 
2692  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame);
2693  if (ret < 0) {
2695  return ret;
2696  }
2697  }
2698 
2699  fgt->eof_out[ofp->ofilter.index] = 1;
2700 
2701  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, NULL);
2702  return (ret == AVERROR_EOF) ? 0 : ret;
2703 }
2704 
2706  AVFrame *frame)
2707 {
2709  AVFrame *frame_prev = ofp->fps.last_frame;
2710  enum AVMediaType type = ofp->ofilter.type;
2711 
2712  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2713 
2714  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2715  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2716 
2717  for (int64_t i = 0; i < nb_frames; i++) {
2718  AVFrame *frame_out;
2719  int ret;
2720 
2721  if (type == AVMEDIA_TYPE_VIDEO) {
2722  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2723  frame_prev : frame;
2724  if (!frame_in)
2725  break;
2726 
2727  frame_out = fgp->frame_enc;
2728  ret = av_frame_ref(frame_out, frame_in);
2729  if (ret < 0)
2730  return ret;
2731 
2732  frame_out->pts = ofp->next_pts;
2733 
2734  if (ofp->fps.dropped_keyframe) {
2735  frame_out->flags |= AV_FRAME_FLAG_KEY;
2736  ofp->fps.dropped_keyframe = 0;
2737  }
2738  } else {
2739  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2740  av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
2742 
2743  frame->time_base = ofp->tb_out;
2744  frame->duration = av_rescale_q(frame->nb_samples,
2745  (AVRational){ 1, frame->sample_rate },
2746  ofp->tb_out);
2747 
2748  ofp->next_pts = frame->pts + frame->duration;
2749 
2750  frame_out = frame;
2751  }
2752 
2753  // send the frame to consumers
2754  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->ofilter.index, frame_out);
2755  if (ret < 0) {
2756  av_frame_unref(frame_out);
2757 
2758  if (!fgt->eof_out[ofp->ofilter.index]) {
2759  fgt->eof_out[ofp->ofilter.index] = 1;
2760  fgp->nb_outputs_done++;
2761  }
2762 
2763  return ret == AVERROR_EOF ? 0 : ret;
2764  }
2765 
2766  if (type == AVMEDIA_TYPE_VIDEO) {
2767  ofp->fps.frame_number++;
2768  ofp->next_pts++;
2769 
2770  if (i == nb_frames_prev && frame)
2771  frame->flags &= ~AV_FRAME_FLAG_KEY;
2772  }
2773 
2774  fgt->got_frame = 1;
2775  }
2776 
2777  if (frame && frame_prev) {
2778  av_frame_unref(frame_prev);
2779  av_frame_move_ref(frame_prev, frame);
2780  }
2781 
2782  if (!frame)
2783  return close_output(ofp, fgt);
2784 
2785  return 0;
2786 }
2787 
2789  AVFrame *frame)
2790 {
2793  FrameData *fd;
2794  int ret;
2795 
2798  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->ofilter.index]) {
2799  ret = fg_output_frame(ofp, fgt, NULL);
2800  return (ret < 0) ? ret : 1;
2801  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2802  return 1;
2803  } else if (ret < 0) {
2804  av_log(ofp, AV_LOG_WARNING,
2805  "Error in retrieving a frame from the filtergraph: %s\n",
2806  av_err2str(ret));
2807  return ret;
2808  }
2809 
2810  if (fgt->eof_out[ofp->ofilter.index]) {
2812  return 0;
2813  }
2814 
2816 
2817  if (debug_ts)
2818  av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2819  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
2820  frame->time_base.num, frame->time_base.den);
2821 
2822  // Choose the output timebase the first time we get a frame.
2823  if (!ofp->tb_out_locked) {
2824  ret = choose_out_timebase(ofp, frame);
2825  if (ret < 0) {
2826  av_log(ofp, AV_LOG_ERROR, "Could not choose an output time base\n");
2828  return ret;
2829  }
2830  }
2831 
2832  fd = frame_data(frame);
2833  if (!fd) {
2835  return AVERROR(ENOMEM);
2836  }
2837 
2839  if (!fgt->got_frame) {
2841  ofp->side_data, ofp->nb_side_data, 0);
2842  if (ret < 0)
2843  return ret;
2844  }
2845 
2847 
2848  // only use bits_per_raw_sample passed through from the decoder
2849  // if the filtergraph did not touch the frame data
2850  if (!fgp->is_meta)
2851  fd->bits_per_raw_sample = 0;
2852 
2853  if (ofp->ofilter.type == AVMEDIA_TYPE_VIDEO) {
2854  if (!frame->duration) {
2856  if (fr.num > 0 && fr.den > 0)
2857  frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
2858  }
2859 
2860  fd->frame_rate_filter = ofp->fps.framerate;
2861  }
2862 
2863  ret = fg_output_frame(ofp, fgt, frame);
2865  if (ret < 0)
2866  return ret;
2867 
2868  return 0;
2869 }
2870 
2871 /* retrieve all frames available at filtergraph outputs
2872  * and send them to consumers */
2874  AVFrame *frame)
2875 {
2876  FilterGraphPriv *fgp = fgp_from_fg(fg);
2877  int did_step = 0;
2878 
2879  // graph not configured, just select the input to request
2880  if (!fgt->graph) {
2881  for (int i = 0; i < fg->nb_inputs; i++) {
2883  if (ifp->format < 0 && !fgt->eof_in[i]) {
2884  fgt->next_in = i;
2885  return 0;
2886  }
2887  }
2888 
2889  // This state - graph is not configured, but all inputs are either
2890  // initialized or EOF - should be unreachable because sending EOF to a
2891  // filter without even a fallback format should fail
2892  av_assert0(0);
2893  return AVERROR_BUG;
2894  }
2895 
2896  while (fgp->nb_outputs_done < fg->nb_outputs) {
2897  int ret;
2898 
2899  /* Reap all buffers present in the buffer sinks */
2900  for (int i = 0; i < fg->nb_outputs; i++) {
2902 
2903  ret = 0;
2904  while (!ret) {
2905  ret = fg_output_step(ofp, fgt, frame);
2906  if (ret < 0)
2907  return ret;
2908  }
2909  }
2910 
2911  // return after one iteration, so that scheduler can rate-control us
2912  if (did_step && fgp->have_sources)
2913  return 0;
2914 
2916  if (ret == AVERROR(EAGAIN)) {
2917  fgt->next_in = choose_input(fg, fgt);
2918  return 0;
2919  } else if (ret < 0) {
2920  if (ret == AVERROR_EOF)
2921  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2922  else
2923  av_log(fg, AV_LOG_ERROR,
2924  "Error requesting a frame from the filtergraph: %s\n",
2925  av_err2str(ret));
2926  return ret;
2927  }
2928  fgt->next_in = fg->nb_inputs;
2929 
2930  did_step = 1;
2931  }
2932 
2933  return AVERROR_EOF;
2934 }
2935 
2937 {
2938  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2939  int64_t pts2;
2940 
2941  /* subtitles seem to be usually muxed ahead of other streams;
2942  if not, subtracting a larger time here is necessary */
2943  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2944 
2945  /* do not send the heartbeat frame if the subtitle is already ahead */
2946  if (pts2 <= ifp->sub2video.last_pts)
2947  return;
2948 
2949  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2950  /* if we have hit the end of the current displayed subpicture,
2951  or if we need to initialize the system, update the
2952  overlaid subpicture and its start/end times */
2953  sub2video_update(ifp, pts2 + 1, NULL);
2954  else
2955  sub2video_push_ref(ifp, pts2);
2956 }
2957 
2958 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2959 {
2960  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2961  int ret;
2962 
2963  if (buffer) {
2964  AVFrame *tmp;
2965 
2966  if (!frame)
2967  return 0;
2968 
2969  tmp = av_frame_alloc();
2970  if (!tmp)
2971  return AVERROR(ENOMEM);
2972 
2974 
2975  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2976  if (ret < 0) {
2977  av_frame_free(&tmp);
2978  return ret;
2979  }
2980 
2981  return 0;
2982  }
2983 
2984  // heartbeat frame
2985  if (frame && !frame->buf[0]) {
2986  sub2video_heartbeat(ifilter, frame->pts, frame->time_base);
2987  return 0;
2988  }
2989 
2990  if (!frame) {
2991  if (ifp->sub2video.end_pts < INT64_MAX)
2992  sub2video_update(ifp, INT64_MAX, NULL);
2993 
2994  return av_buffersrc_add_frame(ifilter->filter, NULL);
2995  }
2996 
2997  ifp->width = frame->width ? frame->width : ifp->width;
2998  ifp->height = frame->height ? frame->height : ifp->height;
2999 
3000  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
3001 
3002  return 0;
3003 }
3004 
3005 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
3006  int64_t pts, AVRational tb)
3007 {
3008  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
3009  int ret;
3010 
3011  if (fgt->eof_in[ifilter->index])
3012  return 0;
3013 
3014  fgt->eof_in[ifilter->index] = 1;
3015 
3016  if (ifilter->filter) {
3017  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
3019 
3021  if (ret < 0)
3022  return ret;
3023  } else {
3024  if (ifp->format < 0) {
3025  // the filtergraph was never configured, use the fallback parameters
3026  ifp->format = ifp->opts.fallback->format;
3027  ifp->sample_rate = ifp->opts.fallback->sample_rate;
3028  ifp->width = ifp->opts.fallback->width;
3029  ifp->height = ifp->opts.fallback->height;
3031  ifp->color_space = ifp->opts.fallback->colorspace;
3032  ifp->color_range = ifp->opts.fallback->color_range;
3033  ifp->alpha_mode = ifp->opts.fallback->alpha_mode;
3034  ifp->time_base = ifp->opts.fallback->time_base;
3035 
3037  &ifp->opts.fallback->ch_layout);
3038  if (ret < 0)
3039  return ret;
3040 
3042  ret = clone_side_data(&ifp->side_data, &ifp->nb_side_data,
3043  ifp->opts.fallback->side_data,
3044  ifp->opts.fallback->nb_side_data, 0);
3045  if (ret < 0)
3046  return ret;
3047 
3048  if (ifilter_has_all_input_formats(ifilter->graph)) {
3049  ret = configure_filtergraph(ifilter->graph, fgt);
3050  if (ret < 0) {
3051  av_log(ifilter->graph, AV_LOG_ERROR, "Error initializing filters!\n");
3052  return ret;
3053  }
3054  }
3055  }
3056 
3057  if (ifp->format < 0) {
3058  av_log(ifilter->graph, AV_LOG_ERROR,
3059  "Cannot determine format of input %s after EOF\n",
3060  ifp->opts.name);
3061  return AVERROR_INVALIDDATA;
3062  }
3063  }
3064 
3065  return 0;
3066 }
3067 
3069  VIDEO_CHANGED = (1 << 0),
3070  AUDIO_CHANGED = (1 << 1),
3071  MATRIX_CHANGED = (1 << 2),
3072  DOWNMIX_CHANGED = (1 << 3),
3073  HWACCEL_CHANGED = (1 << 4)
3074 };
3075 
3076 static const char *unknown_if_null(const char *str)
3077 {
3078  return str ? str : "unknown";
3079 }
3080 
3082  InputFilter *ifilter, AVFrame *frame)
3083 {
3084  FilterGraphPriv *fgp = fgp_from_fg(fg);
3085  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
3086  FrameData *fd;
3087  AVFrameSideData *sd;
3088  int need_reinit = 0, ret;
3089 
3090  /* determine if the parameters for this input changed */
3091  switch (ifilter->type) {
3092  case AVMEDIA_TYPE_AUDIO:
3093  if (ifp->format != frame->format ||
3094  ifp->sample_rate != frame->sample_rate ||
3095  av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout))
3096  need_reinit |= AUDIO_CHANGED;
3097  break;
3098  case AVMEDIA_TYPE_VIDEO:
3099  if (ifp->format != frame->format ||
3100  ifp->width != frame->width ||
3101  ifp->height != frame->height ||
3102  ifp->color_space != frame->colorspace ||
3103  ifp->color_range != frame->color_range ||
3104  ifp->alpha_mode != frame->alpha_mode)
3105  need_reinit |= VIDEO_CHANGED;
3106  break;
3107  }
3108 
3110  if (!ifp->displaymatrix_present ||
3111  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
3112  need_reinit |= MATRIX_CHANGED;
3113  } else if (ifp->displaymatrix_present)
3114  need_reinit |= MATRIX_CHANGED;
3115 
3117  if (!ifp->downmixinfo_present ||
3118  memcmp(sd->data, &ifp->downmixinfo, sizeof(ifp->downmixinfo)))
3119  need_reinit |= DOWNMIX_CHANGED;
3120  } else if (ifp->downmixinfo_present)
3121  need_reinit |= DOWNMIX_CHANGED;
3122 
3123  if (need_reinit && fgt->graph && (ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)) {
3124  ifp->nb_dropped++;
3125  av_log_once(fg, AV_LOG_WARNING, AV_LOG_DEBUG, &ifp->drop_warned, "Avoiding reinit; dropping frame pts: %s bound for %s\n", av_ts2str(frame->pts), ifilter->name);
3127  return 0;
3128  }
3129 
3130  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
3131  need_reinit = 0;
3132 
3133  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
3134  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
3135  need_reinit |= HWACCEL_CHANGED;
3136 
3137  if (need_reinit) {
3139  if (ret < 0)
3140  return ret;
3141 
3142  /* Inputs bound to a filtergraph output will have some fields unset.
3143  * Handle them here */
3144  if (ifp->ofilter_src) {
3146  if (ret < 0)
3147  return ret;
3148  }
3149  }
3150 
3151  /* (re)init the graph if possible, otherwise buffer the frame and return */
3152  if (need_reinit || !fgt->graph) {
3153  AVFrame *tmp = av_frame_alloc();
3154 
3155  if (!tmp)
3156  return AVERROR(ENOMEM);
3157 
3158  if (!ifilter_has_all_input_formats(fg)) {
3160 
3161  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
3162  if (ret < 0)
3163  av_frame_free(&tmp);
3164 
3165  return ret;
3166  }
3167 
3168  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
3169  av_frame_free(&tmp);
3170  if (ret < 0)
3171  return ret;
3172 
3173  if (fgt->graph) {
3174  AVBPrint reason;
3176  if (need_reinit & AUDIO_CHANGED) {
3177  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
3178  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
3179  av_channel_layout_describe_bprint(&frame->ch_layout, &reason);
3180  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
3181  }
3182  if (need_reinit & VIDEO_CHANGED) {
3183  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
3184  const char *color_space_name = av_color_space_name(frame->colorspace);
3185  const char *color_range_name = av_color_range_name(frame->color_range);
3186  const char *alpha_mode = av_alpha_mode_name(frame->alpha_mode);
3187  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, %s alpha, ",
3188  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
3189  unknown_if_null(color_space_name), frame->width, frame->height,
3190  unknown_if_null(alpha_mode));
3191  }
3192  if (need_reinit & MATRIX_CHANGED)
3193  av_bprintf(&reason, "display matrix changed, ");
3194  if (need_reinit & DOWNMIX_CHANGED)
3195  av_bprintf(&reason, "downmix medatata changed, ");
3196  if (need_reinit & HWACCEL_CHANGED)
3197  av_bprintf(&reason, "hwaccel changed, ");
3198  if (reason.len > 1)
3199  reason.str[reason.len - 2] = '\0'; // remove last comma
3200  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
3201  } else {
3202  /* Choke all input to avoid buffering excessive frames while the
3203  * initial filter graph is being configured, and before we have a
3204  * preferred input */
3205  sch_filter_choke_inputs(fgp->sch, fgp->sch_idx);
3206  }
3207 
3208  ret = configure_filtergraph(fg, fgt);
3209  if (ret < 0) {
3210  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
3211  return ret;
3212  }
3213  }
3214 
3215  frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
3216  frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
3217  frame->time_base = ifp->time_base;
3218 
3219  if (ifp->displaymatrix_applied)
3221 
3222  fd = frame_data(frame);
3223  if (!fd)
3224  return AVERROR(ENOMEM);
3226 
3229  if (ret < 0) {
3231  if (ret != AVERROR_EOF)
3232  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
3233  return ret;
3234  }
3235 
3236  return 0;
3237 }
3238 
3239 static void fg_thread_set_name(const FilterGraph *fg)
3240 {
3241  char name[16];
3242  if (filtergraph_is_simple(fg)) {
3243  OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[0]);
3244  snprintf(name, sizeof(name), "%cf%s",
3246  ofp->ofilter.output_name);
3247  } else {
3248  snprintf(name, sizeof(name), "fc%d", fg->index);
3249  }
3250 
3252 }
3253 
3255 {
3256  if (fgt->frame_queue_out) {
3257  AVFrame *frame;
3258  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
3259  av_frame_free(&frame);
3261  }
3262 
3263  av_frame_free(&fgt->frame);
3264  av_freep(&fgt->eof_in);
3265  av_freep(&fgt->eof_out);
3266 
3267  avfilter_graph_free(&fgt->graph);
3268 
3269  memset(fgt, 0, sizeof(*fgt));
3270 }
3271 
3272 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
3273 {
3274  memset(fgt, 0, sizeof(*fgt));
3275 
3276  fgt->frame = av_frame_alloc();
3277  if (!fgt->frame)
3278  goto fail;
3279 
3280  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
3281  if (!fgt->eof_in)
3282  goto fail;
3283 
3284  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
3285  if (!fgt->eof_out)
3286  goto fail;
3287 
3289  if (!fgt->frame_queue_out)
3290  goto fail;
3291 
3292  return 0;
3293 
3294 fail:
3295  fg_thread_uninit(fgt);
3296  return AVERROR(ENOMEM);
3297 }
3298 
3299 static int filter_thread(void *arg)
3300 {
3301  FilterGraphPriv *fgp = arg;
3302  FilterGraph *fg = &fgp->fg;
3303 
3304  FilterGraphThread fgt;
3305  int ret = 0, input_status = 0;
3306 
3307  ret = fg_thread_init(&fgt, fg);
3308  if (ret < 0)
3309  goto finish;
3310 
3311  fg_thread_set_name(fg);
3312 
3313  // if we have all input parameters the graph can now be configured
3315  ret = configure_filtergraph(fg, &fgt);
3316  if (ret < 0) {
3317  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
3318  av_err2str(ret));
3319  goto finish;
3320  }
3321  }
3322 
3323  while (1) {
3324  InputFilter *ifilter;
3325  InputFilterPriv *ifp = NULL;
3326  enum FrameOpaque o;
3327  unsigned input_idx = fgt.next_in;
3328 
3329  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
3330  &input_idx, fgt.frame);
3331  if (input_status == AVERROR_EOF) {
3332  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
3333  break;
3334  } else if (input_status == AVERROR(EAGAIN)) {
3335  // should only happen when we didn't request any input
3336  av_assert0(input_idx == fg->nb_inputs);
3337  goto read_frames;
3338  }
3339  av_assert0(input_status >= 0);
3340 
3341  o = (intptr_t)fgt.frame->opaque;
3342 
3343  // message on the control stream
3344  if (input_idx == fg->nb_inputs) {
3345  FilterCommand *fc;
3346 
3347  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
3348 
3349  fc = (FilterCommand*)fgt.frame->buf[0]->data;
3350  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
3351  fc->all_filters);
3352  av_frame_unref(fgt.frame);
3353  continue;
3354  }
3355 
3356  // we received an input frame or EOF
3357  ifilter = fg->inputs[input_idx];
3358  ifp = ifp_from_ifilter(ifilter);
3359 
3360  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
3361  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
3362  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
3363  !fgt.graph);
3364  } else if (fgt.frame->buf[0]) {
3365  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
3366  } else {
3368  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
3369  }
3370  av_frame_unref(fgt.frame);
3371  if (ret == AVERROR_EOF) {
3372  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
3373  input_idx);
3374  close_input(ifp);
3375  continue;
3376  }
3377  if (ret < 0)
3378  goto finish;
3379 
3380 read_frames:
3381  // retrieve all newly available frames
3382  ret = read_frames(fg, &fgt, fgt.frame);
3383  if (ret == AVERROR_EOF) {
3384  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
3385  if (ifp && ifp->opts.flags & IFILTER_FLAG_DROPCHANGED)
3386  av_log(fg, AV_LOG_INFO, "Total changed input frames dropped : %"PRId64"\n", ifp->nb_dropped);
3387  break;
3388  } else if (ret < 0) {
3389  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
3390  av_err2str(ret));
3391  goto finish;
3392  }
3393 
3394  // ensure all inputs no longer accepting data are closed
3395  for (int i = 0; fgt.graph && i < fg->nb_inputs; i++) {
3398  close_input(ifp);
3399  }
3400  }
3401 
3402  for (unsigned i = 0; i < fg->nb_outputs; i++) {
3404 
3405  if (fgt.eof_out[i] || !fgt.graph)
3406  continue;
3407 
3408  ret = fg_output_frame(ofp, &fgt, NULL);
3409  if (ret < 0)
3410  goto finish;
3411  }
3412 
3413 finish:
3414 
3416  print_filtergraph(fg, fgt.graph);
3417 
3418  // EOF is normal termination
3419  if (ret == AVERROR_EOF)
3420  ret = 0;
3421 
3422  fg_thread_uninit(&fgt);
3423 
3424  return ret;
3425 }
3426 
3427 void fg_send_command(FilterGraph *fg, double time, const char *target,
3428  const char *command, const char *arg, int all_filters)
3429 {
3430  FilterGraphPriv *fgp = fgp_from_fg(fg);
3431  AVBufferRef *buf;
3432  FilterCommand *fc;
3433 
3434  fc = av_mallocz(sizeof(*fc));
3435  if (!fc)
3436  return;
3437 
3438  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
3439  if (!buf) {
3440  av_freep(&fc);
3441  return;
3442  }
3443 
3444  fc->target = av_strdup(target);
3445  fc->command = av_strdup(command);
3446  fc->arg = av_strdup(arg);
3447  if (!fc->target || !fc->command || !fc->arg) {
3448  av_buffer_unref(&buf);
3449  return;
3450  }
3451 
3452  fc->time = time;
3453  fc->all_filters = all_filters;
3454 
3455  fgp->frame->buf[0] = buf;
3456  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
3457 
3458  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
3459 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
InputFilterPriv::nb_dropped
uint64_t nb_dropped
Definition: ffmpeg_filter.c:119
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:129
AVSubtitle
Definition: avcodec.h:2094
AVBufferSrcParameters::side_data
AVFrameSideData ** side_data
Definition: buffersrc.h:124
AVBufferSrcParameters::color_space
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
Definition: buffersrc.h:121
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:2022
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:92
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:203
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:469
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:685
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:367
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:631
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:120
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:393
av_clip
#define av_clip
Definition: common.h:100
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2600
OutputFilter::class
const AVClass * class
Definition: ffmpeg.h:391
view_specifier_parse
int view_specifier_parse(const char **pspec, ViewSpecifier *vs)
Definition: ffmpeg_opt.c:309
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:70
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:109
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2099
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2356
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1553
FrameData::nb_side_data
int nb_side_data
Definition: ffmpeg.h:744
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:62
read_binary
static int read_binary(void *logctx, const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:443
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:66
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:94
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:615
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:131
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:987
FrameData
Definition: ffmpeg.h:722
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2331
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:152
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
InputFilterOptions::crop_right
unsigned crop_right
Definition: ffmpeg.h:291
OutputFilter::apad
char * apad
Definition: ffmpeg.h:406
out
static FILE * out
Definition: movenc.c:55
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:659
clone_side_data
static int clone_side_data(AVFrameSideData ***dst, int *nb_dst, AVFrameSideData *const *src, int nb_src, unsigned int flags)
Wrapper calling av_frame_side_data_clone() in a loop for all source entries.
Definition: ffmpeg_utils.h:50
FilterGraph::graph_desc
const char * graph_desc
Definition: ffmpeg.h:428
OutputFilterPriv::sample_fmts
enum AVSampleFormat * sample_fmts
Definition: ffmpeg_filter.c:228
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:137
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:931
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:418
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:155
AVBufferSrcParameters::nb_side_data
int nb_side_data
Definition: buffersrc.h:125
InputFilterOptions::crop_bottom
unsigned crop_bottom
Definition: ffmpeg.h:289
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:37
AVFrame::nb_side_data
int nb_side_data
Definition: frame.h:632
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:2240
stream_specifier_parse
int stream_specifier_parse(StreamSpecifier *ss, const char *spec, int allow_remainder, void *logctx)
Parse a stream specifier string into a form suitable for matching.
Definition: cmdutils.c:1011
ofilter_class
static const AVClass ofilter_class
Definition: ffmpeg_filter.c:647
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:3073
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:62
close_input
static void close_input(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:2639
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:263
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, const ViewSpecifier *vs, InputFilterOptions *opts, SchedulerNode *src)
Definition: ffmpeg_demux.c:1048
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:133
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:226
configure_output_filter
static int configure_output_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1831
av_alpha_mode_name
const char * av_alpha_mode_name(enum AVAlphaMode mode)
Definition: pixdesc.c:3921
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:257
AVSubtitleRect
Definition: avcodec.h:2067
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2098
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts, const ViewSpecifier *vs, SchedulerNode *src)
Definition: ffmpeg_dec.c:1757
OutputFilterPriv::crop_left
unsigned crop_left
Definition: ffmpeg_filter.c:206
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:1016
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:172
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:572
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:696
InputFile::index
int index
Definition: ffmpeg.h:532
sample_rates
static const int sample_rates[]
Definition: dcaenc.h:34
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:434
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:757
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:536
AVFrame::width
int width
Definition: frame.h:506
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:47
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:54
StreamSpecifier
Definition: cmdutils.h:113
ofilter_bind_enc
int ofilter_bind_enc(OutputFilter *ofilter, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:813
AVOption
AVOption.
Definition: opt.h:429
InputFilterPriv::ofilter_src
OutputFilter * ofilter_src
Definition: ffmpeg_filter.c:110
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2705
b
#define b
Definition: input.c:42
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:191
FilterGraph::index
int index
Definition: ffmpeg.h:416
OutputFilter::index
int index
Definition: ffmpeg.h:395
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:130
data
const char data[16]
Definition: mxf.c:149
InputFilter::index
int index
Definition: ffmpeg.h:376
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:176
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:242
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2032
OutputFilterPriv::alpha_mode
enum AVAlphaMode alpha_mode
Definition: ffmpeg_filter.c:202
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:419
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:3069
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
ViewSpecifier
Definition: ffmpeg.h:129
AVDictionary
Definition: dict.c:32
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:678
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:249
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:324
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: side_data.c:249
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:268
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:329
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1749
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:611
AVBufferSrcParameters::height
int height
Definition: buffersrc.h:87
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:326
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2788
FilterGraphPriv
Definition: ffmpeg_filter.c:43
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:599
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:97
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2067
OutputFilterPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:191
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
stream_specifier_uninit
void stream_specifier_uninit(StreamSpecifier *ss)
Definition: cmdutils.c:1002
InputStream
Definition: ffmpeg.h:483
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:76
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:70
OutputFilterOptions
Definition: ffmpeg.h:311
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:276
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:297
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
Definition: avfiltergraph.c:140
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:167
finish
static void finish(void)
Definition: movenc.c:374
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
Underlying C type is a uint8_t* that is either NULL or points to an array allocated with the av_mallo...
Definition: opt.h:286
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3856
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:89
OutputFilterPriv
Definition: ffmpeg_filter.c:187
FrameData::dec
struct FrameData::@6 dec
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:3254
filter_opt_apply
static int filter_opt_apply(void *logctx, AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:492
fail
#define fail()
Definition: checkasm.h:224
InputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:379
AVBufferSrcParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
Definition: buffersrc.h:92
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:328
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:85
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
print_filtergraph
int print_filtergraph(FilterGraph *fg, AVFilterGraph *graph)
Definition: graphprint.c:947
samplefmt.h
OutputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:209
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:274
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:617
AVFrame::alpha_mode
enum AVAlphaMode alpha_mode
Indicates how the alpha channel of the video is to be handled.
Definition: frame.h:789
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:777
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:123
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1852
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
AVDownmixInfo
This structure describes optional metadata relevant to a downmix procedure.
Definition: downmix_info.h:58
pts
static int64_t pts
Definition: transcode_aac.c:644
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:825
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:2048
median3
static int64_t median3(int64_t a, int64_t b, int64_t c)
Definition: ffmpeg_filter.c:2495
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:84
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:156
FrameData::tb
AVRational tb
Definition: ffmpeg.h:732
OutputFilterPriv::sws_opts
AVDictionary * sws_opts
Definition: ffmpeg_filter.c:221
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:70
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:198
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:177
AVRational::num
int num
Numerator.
Definition: rational.h:59
OutputFilter::bound
int bound
Definition: ffmpeg.h:403
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:103
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:277
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:40
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:859
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2936
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
OutputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:210
avassert.h
OutputFilterPriv::trim_start_us
int64_t trim_start_us
Definition: ffmpeg_filter.c:239
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:735
InputFilterPriv::nb_side_data
int nb_side_data
Definition: ffmpeg_filter.c:136
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:3005
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:101
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:649
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:347
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:987
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:933
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:263
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:71
llrintf
#define llrintf(x)
Definition: libm.h:401
s
#define s(width, name)
Definition: cbs_vp9.c:198
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:681
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:64
DOWNMIX_CHANGED
@ DOWNMIX_CHANGED
Definition: ffmpeg_filter.c:3072
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:107
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:420
ofilter_item_name
static const char * ofilter_item_name(void *obj)
Definition: ffmpeg_filter.c:641
AVDictionaryEntry::key
char * key
Definition: dict.h:91
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:201
VIEW_SPECIFIER_TYPE_NONE
@ VIEW_SPECIFIER_TYPE_NONE
Definition: ffmpeg.h:118
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:119
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec, const ViewSpecifier *vs)
Definition: ffmpeg_filter.c:740
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:404
InputFilter
Definition: ffmpeg.h:373
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:57
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:494
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
ctx
static AVFormatContext * ctx
Definition: movenc.c:49
OFILTER_FLAG_AUTOSCALE
@ OFILTER_FLAG_AUTOSCALE
Definition: ffmpeg.h:306
print_graphs_file
char * print_graphs_file
Definition: ffmpeg_opt.c:81
InputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:387
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2100
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:3272
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:279
InputFilterOptions::crop_top
unsigned crop_top
Definition: ffmpeg.h:288
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:374
AV_SIDE_DATA_PROP_GLOBAL
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
Definition: frame.h:303
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
color_range
color_range
Definition: vf_selectivecolor.c:43
av_mallocz
#define av_mallocz(s)
Definition: tableprint_vlc.h:31
InputFilterPriv::sub2video
struct InputFilterPriv::@10 sub2video
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:244
av_buffersink_get_alpha_mode
enum AVAlphaMode av_buffersink_get_alpha_mode(const AVFilterContext *ctx)
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:1074
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1187
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
arg
const char * arg
Definition: jacosubdec.c:65
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:230
if
if(ret)
Definition: filter_design.txt:179
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:197
InputFilterOptions::crop_left
unsigned crop_left
Definition: ffmpeg.h:290
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3772
AVBufferSrcParameters::alpha_mode
enum AVAlphaMode alpha_mode
Video only, the alpha mode.
Definition: buffersrc.h:130
AVFormatContext
Format I/O context.
Definition: avformat.h:1263
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:654
opts
static AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:767
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:394
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1434
OutputFilterPriv::crop_right
unsigned crop_right
Definition: ffmpeg_filter.c:207
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:238
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:882
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:126
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:885
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, const AVChannelLayout *layouts_allowed, const AVChannelLayout *layout_requested)
Definition: ffmpeg_filter.c:773
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:199
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:865
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:174
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:491
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:162
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:142
Decoder
Definition: ffmpeg.h:469
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
OFILTER_FLAG_AUDIO_24BIT
@ OFILTER_FLAG_AUDIO_24BIT
Definition: ffmpeg.h:305
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:934
av_frame_side_data_remove
void av_frame_side_data_remove(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type from an array.
Definition: side_data.c:103
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:591
ofilter_bind_ifilter
static int ofilter_bind_ifilter(OutputFilter *ofilter, InputFilterPriv *ifp, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:928
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:219
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg, enum AVMediaType type)
Definition: ffmpeg_filter.c:655
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2649
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:89
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:67
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:739
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1567
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:957
stream_specifier_match
unsigned stream_specifier_match(const StreamSpecifier *ss, const AVFormatContext *s, const AVStream *st, void *logctx)
Definition: cmdutils.c:1226
AVFilterGraph
Definition: avfilter.h:589
AV_FRAME_SIDE_DATA_FLAG_REPLACE
#define AV_FRAME_SIDE_DATA_FLAG_REPLACE
Don't add a new entry if another of the same type exists.
Definition: frame.h:1060
InputFilterPriv::downmixinfo_present
int downmixinfo_present
Definition: ffmpeg_filter.c:146
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:244
InputFilterOptions
Definition: ffmpeg.h:275
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:94
fg_create_simple
int fg_create_simple(FilterGraph **pfg, InputStream *ist, char **graph_desc, Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts)
Definition: ffmpeg_filter.c:1248
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:125
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:743
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:421
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:870
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:499
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:108
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:273
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:44
FilterGraphPriv::nb_threads
int nb_threads
Definition: ffmpeg_filter.c:59
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:188
FilterGraph
Definition: ffmpeg.h:414
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:946
OutputFilterPriv::crop_bottom
unsigned crop_bottom
Definition: ffmpeg_filter.c:205
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:78
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:293
InputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:381
AVBufferSrcParameters::frame_rate
AVRational frame_rate
Video only, the frame rate of the input video.
Definition: buffersrc.h:100
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:754
AVAlphaMode
AVAlphaMode
Correlation between the alpha channel and color values.
Definition: pixfmt.h:810
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:291
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:594
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2325
VideoSyncMethod
VideoSyncMethod
Definition: ffmpeg.h:66
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1985
FrameData::side_data
AVFrameSideData ** side_data
Definition: ffmpeg.h:743
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:269
f
f
Definition: af_crystalizer.c:122
OutputFilter::output_name
char * output_name
Definition: ffmpeg.h:399
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:3299
AVMediaType
AVMediaType
Definition: avutil.h:198
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:140
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:91
FilterGraphThread
Definition: ffmpeg_filter.c:81
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:144
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:319
OutputFilterPriv::color_ranges
enum AVColorRange * color_ranges
Definition: ffmpeg_filter.c:233
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:82
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:108
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:751
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:597
av_buffersrc_get_status
int av_buffersrc_get_status(AVFilterContext *ctx)
Returns 0 or a negative AVERROR code.
Definition: buffersrc.c:300
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:217
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
sch_filter_choke_inputs
void sch_filter_choke_inputs(Scheduler *sch, unsigned fg_idx)
Called by filtergraph tasks to choke all filter inputs, preventing them from receiving more frames un...
Definition: ffmpeg_sched.c:2663
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:328
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:188
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:127
OutputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:236
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:3071
FilterCommand::time
double time
Definition: ffmpeg_filter.c:259
InputFilterPriv::initialize
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:156
InputFilterPriv::displaymatrix_applied
int displaymatrix_applied
Definition: ffmpeg_filter.c:143
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1484
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:551
AVFrameSideData::data
uint8_t * data
Definition: frame.h:291
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:521
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:55
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:477
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2097
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:98
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1540
FPSConvContext::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg_filter.c:179
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:725
OutputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:397
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:124
AVBufferSrcParameters::time_base
AVRational time_base
The timebase to be used for the timestamps on the input frames.
Definition: buffersrc.h:82
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:511
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:2041
fg_finalise_bindings
int fg_finalise_bindings(void)
Definition: ffmpeg_filter.c:1491
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:3070
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2529
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter, int commit)
Definition: ffmpeg_filter.c:1300
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:233
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:3076
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:294
decoders
Decoder ** decoders
Definition: ffmpeg.c:117
OutputFilterPriv::log_parent
void * log_parent
Definition: ffmpeg_filter.c:190
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:719
nb_decoders
int nb_decoders
Definition: ffmpeg.c:118
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:408
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2873
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:809
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2050
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:3081
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:959
buffersink.h
av_buffersink_get_side_data
const AVFrameSideData *const * av_buffersink_get_side_data(const AVFilterContext *ctx, int *nb_side_data)
Definition: buffersink.c:380
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:839
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:140
OutputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:201
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:260
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:184
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:168
filter_buffered_frames
int filter_buffered_frames
Definition: ffmpeg_opt.c:78
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:122
graph_opts_apply
static int graph_opts_apply(void *logctx, AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:548
FPSConvContext
Definition: ffmpeg_filter.c:165
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
av_malloc
#define av_malloc(s)
Definition: ops_asmgen.c:44
AVBufferSrcParameters::width
int width
Video only, the display dimensions of the input frames.
Definition: buffersrc.h:87
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:737
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: side_data.c:134
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:3427
downmix_info.h
sch_remove_filtergraph
void sch_remove_filtergraph(Scheduler *sch, int idx)
Definition: ffmpeg_sched.c:480
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:49
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:299
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:210
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:69
src2
const pixel * src2
Definition: h264pred_template.c:421
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1971
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:700
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:182
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
needed
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
Definition: filter_design.txt:212
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
OutputFilterPriv::crop_top
unsigned crop_top
Definition: ffmpeg_filter.c:204
delta
float delta
Definition: vorbis_enc_data.h:430
print_graphs
int print_graphs
Definition: ffmpeg_opt.c:80
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:90
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:534
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:75
graph_parse
static int graph_parse(void *logctx, AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:572
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:116
ifilter_parameters_from_ofilter
static int ifilter_parameters_from_ofilter(InputFilter *ifilter, OutputFilter *ofilter)
Definition: ffmpeg_filter.c:2304
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
IFILTER_FLAG_DROPCHANGED
@ IFILTER_FLAG_DROPCHANGED
Definition: ffmpeg.h:272
AVFrame::side_data
AVFrameSideData ** side_data
Definition: frame.h:631
len
int len
Definition: vorbis_enc_data.h:426
SchedulerNode
Definition: ffmpeg_sched.h:103
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:703
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:114
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:312
OutputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:200
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
OFILTER_FLAG_CROP
@ OFILTER_FLAG_CROP
Definition: ffmpeg.h:308
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:957
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:60
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
OutputFilterPriv::needed
int needed
Definition: ffmpeg_filter.c:193
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2653
AVFilter
Filter definition.
Definition: avfilter.h:216
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2516
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:160
OFILTER_FLAG_AUTOROTATE
@ OFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:307
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:92
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:744
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:204
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
AVALPHA_MODE_UNSPECIFIED
@ AVALPHA_MODE_UNSPECIFIED
Unknown alpha handling, or no alpha channel.
Definition: pixfmt.h:811
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:375
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:73
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:783
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:166
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:548
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1593
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:243
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:122
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:3068
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:445
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:531
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:992
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:315
AVBufferSrcParameters::color_range
enum AVColorRange color_range
Definition: buffersrc.h:122
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:88
OutputFilterPriv::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg_filter.c:222
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:506
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:750
IFILTER_FLAG_CROP
@ IFILTER_FLAG_CROP
Definition: ffmpeg.h:271
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:385
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:629
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:117
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:918
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:487
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:932
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:138
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:114
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:442
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:52
insert_trim
static int insert_trim(void *logctx, int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1542
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:270
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:183
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:615
ifilter_bind_fg
static int ifilter_bind_fg(InputFilterPriv *ifp, FilterGraph *fg_src, int out_idx)
Definition: ffmpeg_filter.c:950
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2379
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
OutputFilterPriv::flags
unsigned flags
Definition: ffmpeg_filter.c:246
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:231
AVSideDataDescriptor
This struct describes the properties of a side data type.
Definition: frame.h:337
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:297
InputFilterPriv::side_data
AVFrameSideData ** side_data
Definition: ffmpeg_filter.c:135
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:958
OutputFilterPriv::alpha_modes
enum AVAlphaMode * alpha_modes
Definition: ffmpeg_filter.c:234
AVFilterContext
An instance of a filter.
Definition: avfilter.h:274
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:415
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:449
OutputFilter
Definition: ffmpeg.h:390
InputFilterPriv::drop_warned
int drop_warned
Definition: ffmpeg_filter.c:118
av_log_once
void av_log_once(void *avcl, int initial_level, int subsequent_level, int *state, const char *fmt,...)
Definition: log.c:451
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2958
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:102
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
desc
const char * desc
Definition: libsvtav1.c:82
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
configure_output_video_filter
static int configure_output_video_filter(FilterGraphPriv *fgp, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1619
ViewSpecifier::type
enum ViewSpecifierType type
Definition: ffmpeg.h:130
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:352
OutputFilterPriv::color_spaces
enum AVColorSpace * color_spaces
Definition: ffmpeg_filter.c:232
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:492
av_buffersink_get_colorspace
enum AVColorSpace av_buffersink_get_colorspace(const AVFilterContext *ctx)
av_strdup
#define av_strdup(s)
Definition: ops_asmgen.c:47
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(void *logctx, AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2456
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:411
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:83
llrint
#define llrint(x)
Definition: libm.h:396
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:289
bind_inputs
static int bind_inputs(FilterGraph *fg, int commit)
Definition: ffmpeg_filter.c:1473
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
w
uint8_t w
Definition: llvidencdsp.c:39
InputStream::index
int index
Definition: ffmpeg.h:489
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2573
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:90
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:79
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:255
OutputFilterPriv::pix_fmts
enum AVPixelFormat * pix_fmts
Definition: ffmpeg_filter.c:227
av_frame_side_data_desc
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
Definition: side_data.c:63
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:1081
fg_create
int fg_create(FilterGraph **pfg, char **graph_desc, Scheduler *sch, const OutputFilterOptions *opts)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:1088
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:260
OFILTER_FLAG_DISABLE_CONVERT
@ OFILTER_FLAG_DISABLE_CONVERT
Definition: ffmpeg.h:303
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:247
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:472
AVFormatContext::name
char * name
Name of this format context, only used for logging purposes.
Definition: avformat.h:1888
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:122
InputFilterPriv::end_pts
int64_t end_pts
Definition: ffmpeg_filter.c:153
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:115
av_frame_side_data_get
static const AVFrameSideData * av_frame_side_data_get(AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Wrapper around av_frame_side_data_get_c() to workaround the limitation that for any type T the conver...
Definition: frame.h:1158
int32_t
int32_t
Definition: audioconvert.c:56
InputFilterPriv::alpha_mode
enum AVAlphaMode alpha_mode
Definition: ffmpeg_filter.c:128
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:344
timestamp.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:617
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:196
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:85
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1454
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
graphprint.h
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:104
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:57
OutputFilterPriv::trim_duration_us
int64_t trim_duration_us
Definition: ffmpeg_filter.c:240
read_file_to_string
char * read_file_to_string(const char *filename)
Definition: cmdutils.c:1571
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
InputFilterPriv::downmixinfo
AVDownmixInfo downmixinfo
Definition: ffmpeg_filter.c:147
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2070
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:130
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:298
AVDictionaryEntry::value
char * value
Definition: dict.h:92
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:592
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:277
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:742
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:483
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:214
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:746
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:68
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:197
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:126
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:181
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:3239
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:176
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1841
FilterGraph::is_internal
int is_internal
Definition: ffmpeg.h:426
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:42
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2096
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:256
src
#define src
Definition: vp8dsp.c:248
FilterCommand
Definition: ffmpeg_filter.c:254
duration
static int64_t duration
Definition: ffplay.c:329
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:124
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:410
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:77
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:286
av_buffersink_get_color_range
enum AVColorRange av_buffersink_get_color_range(const AVFilterContext *ctx)
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
InputFilter::input_name
char * input_name
Definition: ffmpeg.h:383
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:104
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:183