FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/bprint.h"
33 #include "libavutil/display.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/pixfmt.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/timestamp.h"
40 
41 // FIXME private header, used for mid_pred()
42 #include "libavcodec/mathops.h"
43 
44 typedef struct FilterGraphPriv {
46 
47  // name used for logging
48  char log_name[32];
49 
50  int is_simple;
51  // true when the filtergraph contains only meta filters
52  // that do not modify the frame data
53  int is_meta;
55 
56  const char *graph_desc;
57 
58  // frame for temporarily holding output from the filtergraph
60  // frame for sending output to the encoder
63 
65 {
66  return (FilterGraphPriv*)fg;
67 }
68 
69 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
70 {
71  return (const FilterGraphPriv*)fg;
72 }
73 
74 typedef struct InputFilterPriv {
76 
78 
80 
81  // used to hold submitted input
83 
84  /* for filters that are not yet bound to an input stream,
85  * this stores the input linklabel, if any */
86  uint8_t *linklabel;
87 
88  // filter data type
90  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
91  // same as type otherwise
93 
94  int eof;
95 
96  // parameters configured for this input
97  int format;
98 
99  int width, height;
101 
104 
106 
108 
110 
113 
114  // fallback parameters to use when no input is ever sent
115  struct {
116  int format;
117 
118  int width;
119  int height;
121 
122  int sample_rate;
124  } fallback;
125 
126  struct {
127  AVFrame *frame;
128 
129  int64_t last_pts;
130  int64_t end_pts;
131 
132  ///< marks if sub2video_update should force an initialization
133  unsigned int initialize;
134  } sub2video;
136 
138 {
139  return (InputFilterPriv*)ifilter;
140 }
141 
142 typedef struct FPSConvContext {
144  /* number of frames emitted by the video-encoding sync code */
145  int64_t frame_number;
146  /* history of nb_frames_prev, i.e. the number of times the
147  * previous frame was duplicated by vsync code in recent
148  * do_video_out() calls */
149  int64_t frames_prev_hist[3];
150 
151  uint64_t dup_warning;
152 
155 
161 
162 typedef struct OutputFilterPriv {
164 
166 
167  /* desired output stream properties */
168  int format;
169  int width, height;
172 
173  // time base in which the output is sent to our downstream
174  // does not need to match the filtersink's timebase
176  // at least one frame with the above timebase was sent
177  // to our downstream, so it cannot change anymore
179 
181 
182  // those are only set if no format is specified and the encoder gives us multiple options
183  // They point directly to the relevant lists of the encoder.
184  const int *formats;
186  const int *sample_rates;
187 
189  // offset for output timestamps, in AV_TIME_BASE_Q
190  int64_t ts_offset;
191  int64_t next_pts;
193 
194  // set to 1 after at least one frame passed through this output
197 
199 {
200  return (OutputFilterPriv*)ofilter;
201 }
202 
203 static int configure_filtergraph(FilterGraph *fg);
204 
206 {
207  AVFrame *frame = ifp->sub2video.frame;
208  int ret;
209 
211 
212  frame->width = ifp->width;
213  frame->height = ifp->height;
214  frame->format = ifp->format;
215 
217  if (ret < 0)
218  return ret;
219 
220  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
221 
222  return 0;
223 }
224 
225 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
226  AVSubtitleRect *r)
227 {
228  uint32_t *pal, *dst2;
229  uint8_t *src, *src2;
230  int x, y;
231 
232  if (r->type != SUBTITLE_BITMAP) {
233  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
234  return;
235  }
236  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
237  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
238  r->x, r->y, r->w, r->h, w, h
239  );
240  return;
241  }
242 
243  dst += r->y * dst_linesize + r->x * 4;
244  src = r->data[0];
245  pal = (uint32_t *)r->data[1];
246  for (y = 0; y < r->h; y++) {
247  dst2 = (uint32_t *)dst;
248  src2 = src;
249  for (x = 0; x < r->w; x++)
250  *(dst2++) = pal[*(src2++)];
251  dst += dst_linesize;
252  src += r->linesize[0];
253  }
254 }
255 
256 static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
257 {
258  AVFrame *frame = ifp->sub2video.frame;
259  int ret;
260 
261  av_assert1(frame->data[0]);
262  ifp->sub2video.last_pts = frame->pts = pts;
266  if (ret != AVERROR_EOF && ret < 0)
267  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
268  av_err2str(ret));
269 }
270 
271 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
272  const AVSubtitle *sub)
273 {
274  AVFrame *frame = ifp->sub2video.frame;
275  int8_t *dst;
276  int dst_linesize;
277  int num_rects, i;
278  int64_t pts, end_pts;
279 
280  if (sub) {
281  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
282  AV_TIME_BASE_Q, ifp->time_base);
283  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
284  AV_TIME_BASE_Q, ifp->time_base);
285  num_rects = sub->num_rects;
286  } else {
287  /* If we are initializing the system, utilize current heartbeat
288  PTS as the start time, and show until the following subpicture
289  is received. Otherwise, utilize the previous subpicture's end time
290  as the fall-back value. */
291  pts = ifp->sub2video.initialize ?
292  heartbeat_pts : ifp->sub2video.end_pts;
293  end_pts = INT64_MAX;
294  num_rects = 0;
295  }
296  if (sub2video_get_blank_frame(ifp) < 0) {
298  "Impossible to get a blank canvas.\n");
299  return;
300  }
301  dst = frame->data [0];
302  dst_linesize = frame->linesize[0];
303  for (i = 0; i < num_rects; i++)
304  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
305  sub2video_push_ref(ifp, pts);
306  ifp->sub2video.end_pts = end_pts;
307  ifp->sub2video.initialize = 0;
308 }
309 
310 /* *dst may return be set to NULL (no pixel format found), a static string or a
311  * string backed by the bprint. Nothing has been written to the AVBPrint in case
312  * NULL is returned. The AVBPrint provided should be clean. */
313 static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint,
314  const char **dst)
315 {
316  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
317  OutputStream *ost = ofilter->ost;
318 
319  *dst = NULL;
320 
321  if (ost->keep_pix_fmt || ofp->format != AV_PIX_FMT_NONE) {
322  *dst = ofp->format == AV_PIX_FMT_NONE ? NULL :
324  } else if (ofp->formats) {
325  const enum AVPixelFormat *p = ofp->formats;
326 
327  for (; *p != AV_PIX_FMT_NONE; p++) {
328  const char *name = av_get_pix_fmt_name(*p);
329  av_bprintf(bprint, "%s%c", name, p[1] == AV_PIX_FMT_NONE ? '\0' : '|');
330  }
331  if (!av_bprint_is_complete(bprint))
332  return AVERROR(ENOMEM);
333 
334  *dst = bprint->str;
335  }
336 
337  return 0;
338 }
339 
340 /* Define a function for appending a list of allowed formats
341  * to an AVBPrint. If nonempty, the list will have a header. */
342 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
343 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
344 { \
345  if (ofp->var == none && !ofp->supported_list) \
346  return; \
347  av_bprintf(bprint, #name "="); \
348  if (ofp->var != none) { \
349  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
350  } else { \
351  const type *p; \
352  \
353  for (p = ofp->supported_list; *p != none; p++) { \
354  av_bprintf(bprint, printf_format "|", get_name(*p)); \
355  } \
356  if (bprint->len > 0) \
357  bprint->str[--bprint->len] = '\0'; \
358  } \
359  av_bprint_chars(bprint, ':', 1); \
360 }
361 
362 //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
363 // GET_PIX_FMT_NAME)
364 
367 
369  "%d", )
370 
371 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
372 {
373  if (av_channel_layout_check(&ofp->ch_layout)) {
374  av_bprintf(bprint, "channel_layouts=");
375  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
376  } else if (ofp->ch_layouts) {
377  const AVChannelLayout *p;
378 
379  av_bprintf(bprint, "channel_layouts=");
380  for (p = ofp->ch_layouts; p->nb_channels; p++) {
382  av_bprintf(bprint, "|");
383  }
384  if (bprint->len > 0)
385  bprint->str[--bprint->len] = '\0';
386  } else
387  return;
388  av_bprint_chars(bprint, ':', 1);
389 }
390 
391 static int read_binary(const char *path, uint8_t **data, int *len)
392 {
393  AVIOContext *io = NULL;
394  int64_t fsize;
395  int ret;
396 
397  *data = NULL;
398  *len = 0;
399 
400  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
401  if (ret < 0) {
402  av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
403  path, av_err2str(ret));
404  return ret;
405  }
406 
407  fsize = avio_size(io);
408  if (fsize < 0 || fsize > INT_MAX) {
409  av_log(NULL, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
410  ret = AVERROR(EIO);
411  goto fail;
412  }
413 
414  *data = av_malloc(fsize);
415  if (!*data) {
416  ret = AVERROR(ENOMEM);
417  goto fail;
418  }
419 
420  ret = avio_read(io, *data, fsize);
421  if (ret != fsize) {
422  av_log(NULL, AV_LOG_ERROR, "Error reading file %s\n", path);
423  ret = ret < 0 ? ret : AVERROR(EIO);
424  goto fail;
425  }
426 
427  *len = fsize;
428 
429  ret = 0;
430 fail:
431  avio_close(io);
432  if (ret < 0) {
433  av_freep(data);
434  *len = 0;
435  }
436  return ret;
437 }
438 
439 static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
440 {
441  const AVOption *o = NULL;
442  int ret;
443 
445  if (ret >= 0)
446  return 0;
447 
448  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
450  if (!o)
451  goto err_apply;
452 
453  // key is a valid option name prefixed with '/'
454  // interpret value as a path from which to load the actual option value
455  key++;
456 
457  if (o->type == AV_OPT_TYPE_BINARY) {
458  uint8_t *data;
459  int len;
460 
461  ret = read_binary(val, &data, &len);
462  if (ret < 0)
463  goto err_load;
464 
466  av_freep(&data);
467  } else {
468  char *data = file_read(val);
469  if (!data) {
470  ret = AVERROR(EIO);
471  goto err_load;
472  }
473 
475  av_freep(&data);
476  }
477  if (ret < 0)
478  goto err_apply;
479 
480  return 0;
481 
482 err_apply:
484  "Error applying option '%s' to filter '%s': %s\n",
485  key, f->filter->name, av_err2str(ret));
486  return ret;
487 err_load:
489  "Error loading value for option '%s' from file '%s'\n",
490  key, val);
491  return ret;
492 }
493 
495 {
496  for (size_t i = 0; i < seg->nb_chains; i++) {
497  AVFilterChain *ch = seg->chains[i];
498 
499  for (size_t j = 0; j < ch->nb_filters; j++) {
500  AVFilterParams *p = ch->filters[j];
501  const AVDictionaryEntry *e = NULL;
502 
503  av_assert0(p->filter);
504 
505  while ((e = av_dict_iterate(p->opts, e))) {
506  int ret = filter_opt_apply(p->filter, e->key, e->value);
507  if (ret < 0)
508  return ret;
509  }
510 
511  av_dict_free(&p->opts);
512  }
513  }
514 
515  return 0;
516 }
517 
518 static int graph_parse(AVFilterGraph *graph, const char *desc,
520  AVBufferRef *hw_device)
521 {
523  int ret;
524 
525  *inputs = NULL;
526  *outputs = NULL;
527 
528  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
529  if (ret < 0)
530  return ret;
531 
533  if (ret < 0)
534  goto fail;
535 
536  if (hw_device) {
537  for (int i = 0; i < graph->nb_filters; i++) {
538  AVFilterContext *f = graph->filters[i];
539 
540  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
541  continue;
542  f->hw_device_ctx = av_buffer_ref(hw_device);
543  if (!f->hw_device_ctx) {
544  ret = AVERROR(ENOMEM);
545  goto fail;
546  }
547  }
548  }
549 
550  ret = graph_opts_apply(seg);
551  if (ret < 0)
552  goto fail;
553 
555 
556 fail:
558  return ret;
559 }
560 
561 // Filters can be configured only if the formats of all inputs are known.
563 {
564  int i;
565  for (i = 0; i < fg->nb_inputs; i++) {
567  if (ifp->format < 0)
568  return 0;
569  }
570  return 1;
571 }
572 
573 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
574 {
575  AVFilterContext *ctx = inout->filter_ctx;
576  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
577  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
578 
579  if (nb_pads > 1)
580  return av_strdup(ctx->filter->name);
581  return av_asprintf("%s:%s", ctx->filter->name,
582  avfilter_pad_get_name(pads, inout->pad_idx));
583 }
584 
586 {
587  OutputFilterPriv *ofp;
588  OutputFilter *ofilter;
589 
590  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
591  if (!ofp)
592  return NULL;
593 
594  ofilter = &ofp->ofilter;
595  ofilter->graph = fg;
596  ofp->format = -1;
597  ofilter->last_pts = AV_NOPTS_VALUE;
598 
599  return ofilter;
600 }
601 
602 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
603 {
604  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
605  int ret;
606 
607  av_assert0(!ifp->ist);
608 
609  ifp->ist = ist;
610  ifp->type_src = ist->st->codecpar->codec_type;
611 
612  ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph));
613  if (ret < 0)
614  return ret;
615 
616  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
617  ifp->sub2video.frame = av_frame_alloc();
618  if (!ifp->sub2video.frame)
619  return AVERROR(ENOMEM);
620  }
621 
622  return 0;
623 }
624 
626 {
627  const AVCodec *c = ost->enc_ctx->codec;
628  int i, err;
629 
630  if (ost->enc_ctx->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC) {
631  /* Pass the layout through for all orders but UNSPEC */
632  err = av_channel_layout_copy(&f->ch_layout, &ost->enc_ctx->ch_layout);
633  if (err < 0)
634  return err;
635  return 0;
636  }
637 
638  /* Requested layout is of order UNSPEC */
639  if (!c->ch_layouts) {
640  /* Use the default native layout for the requested amount of channels when the
641  encoder doesn't have a list of supported layouts */
642  av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
643  return 0;
644  }
645  /* Encoder has a list of supported layouts. Pick the first layout in it with the
646  same amount of channels as the requested layout */
647  for (i = 0; c->ch_layouts[i].nb_channels; i++) {
648  if (c->ch_layouts[i].nb_channels == ost->enc_ctx->ch_layout.nb_channels)
649  break;
650  }
651  if (c->ch_layouts[i].nb_channels) {
652  /* Use it if one is found */
653  err = av_channel_layout_copy(&f->ch_layout, &c->ch_layouts[i]);
654  if (err < 0)
655  return err;
656  return 0;
657  }
658  /* If no layout for the amount of channels requested was found, use the default
659  native layout for it. */
660  av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
661 
662  return 0;
663 }
664 
666 {
667  const OutputFile *of = output_files[ost->file_index];
668  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
669  FilterGraph *fg = ofilter->graph;
670  FilterGraphPriv *fgp = fgp_from_fg(fg);
671  const AVCodec *c = ost->enc_ctx->codec;
672 
673  av_assert0(!ofilter->ost);
674 
675  ofilter->ost = ost;
676  av_freep(&ofilter->linklabel);
677 
678  ofp->ts_offset = of->start_time == AV_NOPTS_VALUE ? 0 : of->start_time;
679  ofp->enc_timebase = ost->enc_timebase;
680 
681  switch (ost->enc_ctx->codec_type) {
682  case AVMEDIA_TYPE_VIDEO:
683  ofp->width = ost->enc_ctx->width;
684  ofp->height = ost->enc_ctx->height;
685  if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
686  ofp->format = ost->enc_ctx->pix_fmt;
687  } else {
688  ofp->formats = c->pix_fmts;
689 
690  // MJPEG encoder exports a full list of supported pixel formats,
691  // but the full-range ones are experimental-only.
692  // Restrict the auto-conversion list unless -strict experimental
693  // has been specified.
694  if (!strcmp(c->name, "mjpeg")) {
695  // FIXME: YUV420P etc. are actually supported with full color range,
696  // yet the latter information isn't available here.
697  static const enum AVPixelFormat mjpeg_formats[] =
699  AV_PIX_FMT_NONE };
700 
701  const AVDictionaryEntry *strict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
702  int strict_val = ost->enc_ctx->strict_std_compliance;
703 
704  if (strict) {
705  const AVOption *o = av_opt_find(ost->enc_ctx, strict->key, NULL, 0, 0);
706  av_assert0(o);
707  av_opt_eval_int(ost->enc_ctx, o, strict->value, &strict_val);
708  }
709 
710  if (strict_val > FF_COMPLIANCE_UNOFFICIAL)
711  ofp->formats = mjpeg_formats;
712  }
713  }
714 
715  fgp->disable_conversions |= ost->keep_pix_fmt;
716 
717  ofp->fps.last_frame = av_frame_alloc();
718  if (!ofp->fps.last_frame)
719  return AVERROR(ENOMEM);
720 
721  ofp->fps.framerate = ost->frame_rate;
722  ofp->fps.framerate_max = ost->max_frame_rate;
723  ofp->fps.framerate_supported = ost->force_fps ?
724  NULL : c->supported_framerates;
725 
726  // reduce frame rate for mpeg4 to be within the spec limits
727  if (c->id == AV_CODEC_ID_MPEG4)
728  ofp->fps.framerate_clip = 65535;
729 
730  ofp->fps.dup_warning = 1000;
731 
732  break;
733  case AVMEDIA_TYPE_AUDIO:
734  if (ost->enc_ctx->sample_fmt != AV_SAMPLE_FMT_NONE) {
735  ofp->format = ost->enc_ctx->sample_fmt;
736  } else {
737  ofp->formats = c->sample_fmts;
738  }
739  if (ost->enc_ctx->sample_rate) {
740  ofp->sample_rate = ost->enc_ctx->sample_rate;
741  } else {
742  ofp->sample_rates = c->supported_samplerates;
743  }
744  if (ost->enc_ctx->ch_layout.nb_channels) {
745  int ret = set_channel_layout(ofp, ost);
746  if (ret < 0)
747  return ret;
748  } else if (c->ch_layouts) {
749  ofp->ch_layouts = c->ch_layouts;
750  }
751  break;
752  }
753 
754  // if we have all input parameters and all outputs are bound,
755  // the graph can now be configured
757  int ret;
758 
759  for (int i = 0; i < fg->nb_outputs; i++)
760  if (!fg->outputs[i]->ost)
761  return 0;
762 
764  if (ret < 0) {
765  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
766  av_err2str(ret));
767  return ret;
768  }
769  }
770 
771  return 0;
772 }
773 
775 {
776  InputFilterPriv *ifp;
777  InputFilter *ifilter;
778 
779  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
780  if (!ifp)
781  return NULL;
782 
783  ifilter = &ifp->ifilter;
784  ifilter->graph = fg;
785 
786  ifp->frame = av_frame_alloc();
787  if (!ifp->frame)
788  return NULL;
789 
790  ifp->format = -1;
791  ifp->fallback.format = -1;
792 
794  if (!ifp->frame_queue)
795  return NULL;
796 
797  return ifilter;
798 }
799 
800 void fg_free(FilterGraph **pfg)
801 {
802  FilterGraph *fg = *pfg;
803  FilterGraphPriv *fgp;
804 
805  if (!fg)
806  return;
807  fgp = fgp_from_fg(fg);
808 
810  for (int j = 0; j < fg->nb_inputs; j++) {
811  InputFilter *ifilter = fg->inputs[j];
812  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
813 
814  if (ifp->frame_queue) {
815  AVFrame *frame;
816  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
819  }
821 
823 
824  av_frame_free(&ifp->frame);
825 
827  av_freep(&ifp->linklabel);
828  av_freep(&ifilter->name);
829  av_freep(&fg->inputs[j]);
830  }
831  av_freep(&fg->inputs);
832  for (int j = 0; j < fg->nb_outputs; j++) {
833  OutputFilter *ofilter = fg->outputs[j];
834  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
835 
837 
838  av_freep(&ofilter->linklabel);
839  av_freep(&ofilter->name);
841  av_freep(&fg->outputs[j]);
842  }
843  av_freep(&fg->outputs);
844  av_freep(&fgp->graph_desc);
845 
846  av_frame_free(&fgp->frame);
847  av_frame_free(&fgp->frame_enc);
848 
849  av_freep(pfg);
850 }
851 
852 static const char *fg_item_name(void *obj)
853 {
854  const FilterGraphPriv *fgp = obj;
855 
856  return fgp->log_name;
857 }
858 
859 static const AVClass fg_class = {
860  .class_name = "FilterGraph",
861  .version = LIBAVUTIL_VERSION_INT,
862  .item_name = fg_item_name,
863  .category = AV_CLASS_CATEGORY_FILTER,
864 };
865 
866 int fg_create(FilterGraph **pfg, char *graph_desc)
867 {
868  FilterGraphPriv *fgp;
869  FilterGraph *fg;
870 
872  AVFilterGraph *graph;
873  int ret = 0;
874 
875  fgp = allocate_array_elem(&filtergraphs, sizeof(*fgp), &nb_filtergraphs);
876  if (!fgp)
877  return AVERROR(ENOMEM);
878  fg = &fgp->fg;
879 
880  if (pfg)
881  *pfg = fg;
882 
883  fg->class = &fg_class;
884  fg->index = nb_filtergraphs - 1;
885  fgp->graph_desc = graph_desc;
887 
888  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
889 
890  fgp->frame = av_frame_alloc();
891  fgp->frame_enc = av_frame_alloc();
892  if (!fgp->frame || !fgp->frame_enc)
893  return AVERROR(ENOMEM);
894 
895  /* this graph is only used for determining the kinds of inputs
896  * and outputs we have, and is discarded on exit from this function */
897  graph = avfilter_graph_alloc();
898  if (!graph)
899  return AVERROR(ENOMEM);;
900  graph->nb_threads = 1;
901 
902  ret = graph_parse(graph, fgp->graph_desc, &inputs, &outputs, NULL);
903  if (ret < 0)
904  goto fail;
905 
906  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
907  InputFilter *const ifilter = ifilter_alloc(fg);
908  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
909 
910  ifp->linklabel = cur->name;
911  cur->name = NULL;
912 
913  ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
914  cur->pad_idx);
915  ifilter->name = describe_filter_link(fg, cur, 1);
916  if (!ifilter->name) {
917  ret = AVERROR(ENOMEM);
918  goto fail;
919  }
920  }
921 
922  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
923  OutputFilter *const ofilter = ofilter_alloc(fg);
924 
925  if (!ofilter)
926  goto fail;
927 
928  ofilter->linklabel = cur->name;
929  cur->name = NULL;
930 
931  ofilter->type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
932  cur->pad_idx);
933  ofilter->name = describe_filter_link(fg, cur, 0);
934  if (!ofilter->name) {
935  ret = AVERROR(ENOMEM);
936  goto fail;
937  }
938  }
939 
940  if (!fg->nb_outputs) {
941  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
942  ret = AVERROR(ENOSYS);
943  goto fail;
944  }
945 
946 fail:
949  avfilter_graph_free(&graph);
950 
951  if (ret < 0)
952  return ret;
953 
954  return 0;
955 }
956 
958  char *graph_desc)
959 {
960  FilterGraph *fg;
961  FilterGraphPriv *fgp;
962  int ret;
963 
964  ret = fg_create(&fg, graph_desc);
965  if (ret < 0)
966  return ret;
967  fgp = fgp_from_fg(fg);
968 
969  fgp->is_simple = 1;
970 
971  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf#%d:%d",
972  av_get_media_type_string(ost->type)[0],
973  ost->file_index, ost->index);
974 
975  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
976  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
977  "to have exactly 1 input and 1 output. "
978  "However, it had %d input(s) and %d output(s). Please adjust, "
979  "or use a complex filtergraph (-filter_complex) instead.\n",
980  graph_desc, fg->nb_inputs, fg->nb_outputs);
981  return AVERROR(EINVAL);
982  }
983 
984  ost->filter = fg->outputs[0];
985 
986  ret = ifilter_bind_ist(fg->inputs[0], ist);
987  if (ret < 0)
988  return ret;
989 
990  ret = ofilter_bind_ost(fg->outputs[0], ost);
991  if (ret < 0)
992  return ret;
993 
994  return 0;
995 }
996 
997 static int init_input_filter(FilterGraph *fg, InputFilter *ifilter)
998 {
999  FilterGraphPriv *fgp = fgp_from_fg(fg);
1000  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1001  InputStream *ist = NULL;
1002  enum AVMediaType type = ifp->type;
1003  int i, ret;
1004 
1005  // TODO: support other filter types
1007  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1008  "currently.\n");
1009  return AVERROR(ENOSYS);
1010  }
1011 
1012  if (ifp->linklabel) {
1013  AVFormatContext *s;
1014  AVStream *st = NULL;
1015  char *p;
1016  int file_idx = strtol(ifp->linklabel, &p, 0);
1017 
1018  if (file_idx < 0 || file_idx >= nb_input_files) {
1019  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1020  file_idx, fgp->graph_desc);
1021  return AVERROR(EINVAL);
1022  }
1023  s = input_files[file_idx]->ctx;
1024 
1025  for (i = 0; i < s->nb_streams; i++) {
1026  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1027  if (stream_type != type &&
1028  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1029  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1030  continue;
1031  if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
1032  st = s->streams[i];
1033  break;
1034  }
1035  }
1036  if (!st) {
1037  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1038  "matches no streams.\n", p, fgp->graph_desc);
1039  return AVERROR(EINVAL);
1040  }
1041  ist = input_files[file_idx]->streams[st->index];
1042  } else {
1043  ist = ist_find_unused(type);
1044  if (!ist) {
1045  av_log(fg, AV_LOG_FATAL, "Cannot find a matching stream for "
1046  "unlabeled input pad %s\n", ifilter->name);
1047  return AVERROR(EINVAL);
1048  }
1049  }
1050  av_assert0(ist);
1051 
1052  ret = ifilter_bind_ist(ifilter, ist);
1053  if (ret < 0) {
1054  av_log(fg, AV_LOG_ERROR,
1055  "Error binding an input stream to complex filtergraph input %s.\n",
1056  ifilter->name);
1057  return ret;
1058  }
1059 
1060  return 0;
1061 }
1062 
1064 {
1065  // bind filtergraph inputs to input streams
1066  for (int i = 0; i < fg->nb_inputs; i++) {
1067  int ret = init_input_filter(fg, fg->inputs[i]);
1068  if (ret < 0)
1069  return ret;
1070  }
1071  return 0;
1072 }
1073 
1074 static int insert_trim(int64_t start_time, int64_t duration,
1075  AVFilterContext **last_filter, int *pad_idx,
1076  const char *filter_name)
1077 {
1078  AVFilterGraph *graph = (*last_filter)->graph;
1080  const AVFilter *trim;
1081  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1082  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1083  int ret = 0;
1084 
1085  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1086  return 0;
1087 
1088  trim = avfilter_get_by_name(name);
1089  if (!trim) {
1090  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
1091  "recording time.\n", name);
1092  return AVERROR_FILTER_NOT_FOUND;
1093  }
1094 
1095  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1096  if (!ctx)
1097  return AVERROR(ENOMEM);
1098 
1099  if (duration != INT64_MAX) {
1100  ret = av_opt_set_int(ctx, "durationi", duration,
1102  }
1103  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1104  ret = av_opt_set_int(ctx, "starti", start_time,
1106  }
1107  if (ret < 0) {
1108  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1109  return ret;
1110  }
1111 
1113  if (ret < 0)
1114  return ret;
1115 
1116  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1117  if (ret < 0)
1118  return ret;
1119 
1120  *last_filter = ctx;
1121  *pad_idx = 0;
1122  return 0;
1123 }
1124 
1125 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1126  const char *filter_name, const char *args)
1127 {
1128  AVFilterGraph *graph = (*last_filter)->graph;
1130  int ret;
1131 
1133  avfilter_get_by_name(filter_name),
1134  filter_name, args, NULL, graph);
1135  if (ret < 0)
1136  return ret;
1137 
1138  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1139  if (ret < 0)
1140  return ret;
1141 
1142  *last_filter = ctx;
1143  *pad_idx = 0;
1144  return 0;
1145 }
1146 
1148 {
1149  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1150  OutputStream *ost = ofilter->ost;
1151  OutputFile *of = output_files[ost->file_index];
1152  AVFilterContext *last_filter = out->filter_ctx;
1153  AVBPrint bprint;
1154  int pad_idx = out->pad_idx;
1155  int ret;
1156  const char *pix_fmts;
1157  char name[255];
1158 
1159  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
1161  avfilter_get_by_name("buffersink"),
1162  name, NULL, NULL, fg->graph);
1163 
1164  if (ret < 0)
1165  return ret;
1166 
1167  if ((ofp->width || ofp->height) && ofilter->ost->autoscale) {
1168  char args[255];
1170  const AVDictionaryEntry *e = NULL;
1171 
1172  snprintf(args, sizeof(args), "%d:%d",
1173  ofp->width, ofp->height);
1174 
1175  while ((e = av_dict_iterate(ost->sws_dict, e))) {
1176  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1177  }
1178 
1179  snprintf(name, sizeof(name), "scaler_out_%d_%d",
1180  ost->file_index, ost->index);
1182  name, args, NULL, fg->graph)) < 0)
1183  return ret;
1184  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1185  return ret;
1186 
1187  last_filter = filter;
1188  pad_idx = 0;
1189  }
1190 
1192  ret = choose_pix_fmts(ofilter, &bprint, &pix_fmts);
1193  if (ret < 0)
1194  return ret;
1195 
1196  if (pix_fmts) {
1198 
1200  avfilter_get_by_name("format"),
1201  "format", pix_fmts, NULL, fg->graph);
1202  av_bprint_finalize(&bprint, NULL);
1203  if (ret < 0)
1204  return ret;
1205  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1206  return ret;
1207 
1208  last_filter = filter;
1209  pad_idx = 0;
1210  }
1211 
1212  snprintf(name, sizeof(name), "trim_out_%d_%d",
1213  ost->file_index, ost->index);
1215  &last_filter, &pad_idx, name);
1216  if (ret < 0)
1217  return ret;
1218 
1219 
1220  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1221  return ret;
1222 
1223  return 0;
1224 }
1225 
1227 {
1228  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1229  OutputStream *ost = ofilter->ost;
1230  OutputFile *of = output_files[ost->file_index];
1231  AVFilterContext *last_filter = out->filter_ctx;
1232  int pad_idx = out->pad_idx;
1233  AVBPrint args;
1234  char name[255];
1235  int ret;
1236 
1237  snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
1239  avfilter_get_by_name("abuffersink"),
1240  name, NULL, NULL, fg->graph);
1241  if (ret < 0)
1242  return ret;
1243  if ((ret = av_opt_set_int(ofp->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1244  return ret;
1245 
1246 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1247  AVFilterContext *filt_ctx; \
1248  \
1249  av_log(fg, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1250  "similarly to -af " filter_name "=%s.\n", arg); \
1251  \
1252  ret = avfilter_graph_create_filter(&filt_ctx, \
1253  avfilter_get_by_name(filter_name), \
1254  filter_name, arg, NULL, fg->graph); \
1255  if (ret < 0) \
1256  goto fail; \
1257  \
1258  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1259  if (ret < 0) \
1260  goto fail; \
1261  \
1262  last_filter = filt_ctx; \
1263  pad_idx = 0; \
1264 } while (0)
1266 #if FFMPEG_OPT_MAP_CHANNEL
1267  if (ost->audio_channels_mapped) {
1268  AVChannelLayout mapped_layout = { 0 };
1269  int i;
1270  av_channel_layout_default(&mapped_layout, ost->audio_channels_mapped);
1271  av_channel_layout_describe_bprint(&mapped_layout, &args);
1272  for (i = 0; i < ost->audio_channels_mapped; i++)
1273  if (ost->audio_channels_map[i] != -1)
1274  av_bprintf(&args, "|c%d=c%d", i, ost->audio_channels_map[i]);
1275 
1276  AUTO_INSERT_FILTER("-map_channel", "pan", args.str);
1277  av_bprint_clear(&args);
1278  }
1279 #endif
1280 
1281  choose_sample_fmts(ofp, &args);
1282  choose_sample_rates(ofp, &args);
1283  choose_channel_layouts(ofp, &args);
1284  if (!av_bprint_is_complete(&args)) {
1285  ret = AVERROR(ENOMEM);
1286  goto fail;
1287  }
1288  if (args.len) {
1290 
1291  snprintf(name, sizeof(name), "format_out_%d_%d",
1292  ost->file_index, ost->index);
1294  avfilter_get_by_name("aformat"),
1295  name, args.str, NULL, fg->graph);
1296  if (ret < 0)
1297  goto fail;
1298 
1299  ret = avfilter_link(last_filter, pad_idx, format, 0);
1300  if (ret < 0)
1301  goto fail;
1302 
1303  last_filter = format;
1304  pad_idx = 0;
1305  }
1306 
1307  if (ost->apad && of->shortest) {
1308  int i;
1309 
1310  for (i = 0; i < of->nb_streams; i++)
1312  break;
1313 
1314  if (i < of->nb_streams) {
1315  AUTO_INSERT_FILTER("-apad", "apad", ost->apad);
1316  }
1317  }
1318 
1319  snprintf(name, sizeof(name), "trim for output stream %d:%d",
1320  ost->file_index, ost->index);
1322  &last_filter, &pad_idx, name);
1323  if (ret < 0)
1324  goto fail;
1325 
1326  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1327  goto fail;
1328 fail:
1329  av_bprint_finalize(&args, NULL);
1330 
1331  return ret;
1332 }
1333 
1335  AVFilterInOut *out)
1336 {
1337  if (!ofilter->ost) {
1338  av_log(fg, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
1339  return AVERROR(EINVAL);
1340  }
1341 
1342  switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
1343  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
1344  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
1345  default: av_assert0(0); return 0;
1346  }
1347 }
1348 
1350 {
1351  int i;
1352  for (i = 0; i < nb_filtergraphs; i++) {
1353  int n;
1354  for (n = 0; n < filtergraphs[i]->nb_outputs; n++) {
1356  if (!output->ost) {
1358  "Filter %s has an unconnected output\n", output->name);
1359  return AVERROR(EINVAL);
1360  }
1361  }
1362  }
1363 
1364  return 0;
1365 }
1366 
1368 {
1369  ifp->sub2video.last_pts = INT64_MIN;
1370  ifp->sub2video.end_pts = INT64_MIN;
1371 
1372  /* sub2video structure has been (re-)initialized.
1373  Mark it as such so that the system will be
1374  initialized with the first received heartbeat. */
1375  ifp->sub2video.initialize = 1;
1376 }
1377 
1379  AVFilterInOut *in)
1380 {
1381  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1382 
1383  AVFilterContext *last_filter;
1384  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1385  const AVPixFmtDescriptor *desc;
1386  InputStream *ist = ifp->ist;
1387  InputFile *f = input_files[ist->file_index];
1388  AVRational fr = ist->framerate;
1389  AVRational sar;
1390  AVBPrint args;
1391  char name[255];
1392  int ret, pad_idx = 0;
1393  int64_t tsoffset = 0;
1395 
1396  if (!par)
1397  return AVERROR(ENOMEM);
1398  memset(par, 0, sizeof(*par));
1399  par->format = AV_PIX_FMT_NONE;
1400 
1401  if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1402  av_log(fg, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
1403  ret = AVERROR(EINVAL);
1404  goto fail;
1405  }
1406 
1407  if (!fr.num)
1408  fr = ist->framerate_guessed;
1409 
1410  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1411  sub2video_prepare(ifp);
1412 
1413  ifp->time_base = ist->framerate.num ? av_inv_q(ist->framerate) :
1414  ist->st->time_base;
1415 
1416  sar = ifp->sample_aspect_ratio;
1417  if(!sar.den)
1418  sar = (AVRational){0,1};
1420  av_bprintf(&args,
1421  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
1422  "pixel_aspect=%d/%d",
1423  ifp->width, ifp->height, ifp->format,
1424  ifp->time_base.num, ifp->time_base.den, sar.num, sar.den);
1425  if (fr.num && fr.den)
1426  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
1427  snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
1428  ist->file_index, ist->index);
1429 
1430 
1431  if ((ret = avfilter_graph_create_filter(&ifp->filter, buffer_filt, name,
1432  args.str, NULL, fg->graph)) < 0)
1433  goto fail;
1434  par->hw_frames_ctx = ifp->hw_frames_ctx;
1435  ret = av_buffersrc_parameters_set(ifp->filter, par);
1436  if (ret < 0)
1437  goto fail;
1438  av_freep(&par);
1439  last_filter = ifp->filter;
1440 
1442  av_assert0(desc);
1443 
1444  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1445  if (ist->autorotate && !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1446  const AVPacketSideData *sd = NULL;
1447  int32_t *displaymatrix = ifp->displaymatrix;
1448  double theta;
1449 
1450  if (!ifp->displaymatrix_present)
1454  if (sd)
1455  displaymatrix = (int32_t *)sd->data;
1456  theta = get_rotation(displaymatrix);
1457 
1458  if (fabs(theta - 90) < 1.0) {
1459  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1460  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1461  } else if (fabs(theta - 180) < 1.0) {
1462  if (displaymatrix[0] < 0) {
1463  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1464  if (ret < 0)
1465  return ret;
1466  }
1467  if (displaymatrix[4] < 0) {
1468  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1469  }
1470  } else if (fabs(theta - 270) < 1.0) {
1471  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1472  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1473  } else if (fabs(theta) > 1.0) {
1474  char rotate_buf[64];
1475  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1476  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1477  } else if (fabs(theta) < 1.0) {
1478  if (displaymatrix && displaymatrix[4] < 0) {
1479  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1480  }
1481  }
1482  if (ret < 0)
1483  return ret;
1484  }
1485 
1486  snprintf(name, sizeof(name), "trim_in_%d_%d",
1487  ist->file_index, ist->index);
1488  if (copy_ts) {
1489  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
1490  if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
1491  tsoffset += f->ctx->start_time;
1492  }
1493  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
1494  AV_NOPTS_VALUE : tsoffset, f->recording_time,
1495  &last_filter, &pad_idx, name);
1496  if (ret < 0)
1497  return ret;
1498 
1499  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1500  return ret;
1501  return 0;
1502 fail:
1503  av_freep(&par);
1504 
1505  return ret;
1506 }
1507 
1509  AVFilterInOut *in)
1510 {
1511  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1512  AVFilterContext *last_filter;
1513  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1514  InputStream *ist = ifp->ist;
1515  InputFile *f = input_files[ist->file_index];
1516  AVBPrint args;
1517  char name[255];
1518  int ret, pad_idx = 0;
1519  int64_t tsoffset = 0;
1520 
1521  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
1522  av_log(fg, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
1523  return AVERROR(EINVAL);
1524  }
1525 
1526  ifp->time_base = (AVRational){ 1, ifp->sample_rate };
1527 
1529  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1530  ifp->time_base.num, ifp->time_base.den,
1531  ifp->sample_rate,
1533  if (av_channel_layout_check(&ifp->ch_layout) &&
1535  av_bprintf(&args, ":channel_layout=");
1537  } else
1538  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1539  snprintf(name, sizeof(name), "graph_%d_in_%d_%d", fg->index,
1540  ist->file_index, ist->index);
1541 
1542  if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1543  name, args.str, NULL,
1544  fg->graph)) < 0)
1545  return ret;
1546  last_filter = ifp->filter;
1547 
1548  snprintf(name, sizeof(name), "trim for input stream %d:%d",
1549  ist->file_index, ist->index);
1550  if (copy_ts) {
1551  tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
1552  if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
1553  tsoffset += f->ctx->start_time;
1554  }
1555  ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
1556  AV_NOPTS_VALUE : tsoffset, f->recording_time,
1557  &last_filter, &pad_idx, name);
1558  if (ret < 0)
1559  return ret;
1560 
1561  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1562  return ret;
1563 
1564  return 0;
1565 }
1566 
1568  AVFilterInOut *in)
1569 {
1570  switch (ifp_from_ifilter(ifilter)->type) {
1571  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
1572  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
1573  default: av_assert0(0); return 0;
1574  }
1575 }
1576 
1578 {
1579  int i;
1580  for (i = 0; i < fg->nb_outputs; i++)
1582  for (i = 0; i < fg->nb_inputs; i++)
1583  ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1584  avfilter_graph_free(&fg->graph);
1585 }
1586 
1588 {
1589  return f->nb_inputs == 0 &&
1590  (!strcmp(f->filter->name, "buffer") ||
1591  !strcmp(f->filter->name, "abuffer"));
1592 }
1593 
1594 static int graph_is_meta(AVFilterGraph *graph)
1595 {
1596  for (unsigned i = 0; i < graph->nb_filters; i++) {
1597  const AVFilterContext *f = graph->filters[i];
1598 
1599  /* in addition to filters flagged as meta, also
1600  * disregard sinks and buffersources (but not other sources,
1601  * since they introduce data we are not aware of)
1602  */
1603  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1604  f->nb_outputs == 0 ||
1606  return 0;
1607  }
1608  return 1;
1609 }
1610 
1612 {
1613  FilterGraphPriv *fgp = fgp_from_fg(fg);
1614  AVBufferRef *hw_device;
1615  AVFilterInOut *inputs, *outputs, *cur;
1616  int ret, i, simple = filtergraph_is_simple(fg);
1617  const char *graph_desc = fgp->graph_desc;
1618 
1619  cleanup_filtergraph(fg);
1620  if (!(fg->graph = avfilter_graph_alloc()))
1621  return AVERROR(ENOMEM);
1622 
1623  if (simple) {
1624  OutputStream *ost = fg->outputs[0]->ost;
1625 
1626  if (filter_nbthreads) {
1627  ret = av_opt_set(fg->graph, "threads", filter_nbthreads, 0);
1628  if (ret < 0)
1629  goto fail;
1630  } else {
1631  const AVDictionaryEntry *e = NULL;
1632  e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
1633  if (e)
1634  av_opt_set(fg->graph, "threads", e->value, 0);
1635  }
1636 
1637  if (av_dict_count(ost->sws_dict)) {
1638  ret = av_dict_get_string(ost->sws_dict,
1639  &fg->graph->scale_sws_opts,
1640  '=', ':');
1641  if (ret < 0)
1642  goto fail;
1643  }
1644 
1645  if (av_dict_count(ost->swr_opts)) {
1646  char *args;
1647  ret = av_dict_get_string(ost->swr_opts, &args, '=', ':');
1648  if (ret < 0)
1649  goto fail;
1650  av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
1651  av_free(args);
1652  }
1653  } else {
1655  }
1656 
1657  hw_device = hw_device_for_filter();
1658 
1659  if ((ret = graph_parse(fg->graph, graph_desc, &inputs, &outputs, hw_device)) < 0)
1660  goto fail;
1661 
1662  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1663  if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
1666  goto fail;
1667  }
1669 
1670  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1671  ret = configure_output_filter(fg, fg->outputs[i], cur);
1672  if (ret < 0) {
1674  goto fail;
1675  }
1676  }
1678 
1679  if (fgp->disable_conversions)
1681  if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
1682  goto fail;
1683 
1684  fgp->is_meta = graph_is_meta(fg->graph);
1685 
1686  /* limit the lists of allowed formats to the ones selected, to
1687  * make sure they stay the same if the filtergraph is reconfigured later */
1688  for (i = 0; i < fg->nb_outputs; i++) {
1689  OutputFilter *ofilter = fg->outputs[i];
1690  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1691  AVFilterContext *sink = ofp->filter;
1692 
1693  ofp->format = av_buffersink_get_format(sink);
1694 
1695  ofp->width = av_buffersink_get_w(sink);
1696  ofp->height = av_buffersink_get_h(sink);
1697 
1698  // If the timing parameters are not locked yet, get the tentative values
1699  // here but don't lock them. They will only be used if no output frames
1700  // are ever produced.
1701  if (!ofp->tb_out_locked) {
1703  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
1704  fr.num > 0 && fr.den > 0)
1705  ofp->fps.framerate = fr;
1706  ofp->tb_out = av_buffersink_get_time_base(sink);
1707  }
1709 
1712  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
1713  if (ret < 0)
1714  goto fail;
1715  }
1716 
1717  for (i = 0; i < fg->nb_inputs; i++) {
1719  AVFrame *tmp;
1720  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
1721  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
1722  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)tmp->buf[0]->data);
1723  } else {
1725  }
1726  av_frame_free(&tmp);
1727  if (ret < 0)
1728  goto fail;
1729  }
1730  }
1731 
1732  /* send the EOFs for the finished inputs */
1733  for (i = 0; i < fg->nb_inputs; i++) {
1735  if (ifp->eof) {
1737  if (ret < 0)
1738  goto fail;
1739  }
1740  }
1741 
1742  return 0;
1743 
1744 fail:
1745  cleanup_filtergraph(fg);
1746  return ret;
1747 }
1748 
1750 {
1751  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1752 
1753  if (dec->codec_type == AVMEDIA_TYPE_VIDEO) {
1754  ifp->fallback.format = dec->pix_fmt;
1755  ifp->fallback.width = dec->width;
1756  ifp->fallback.height = dec->height;
1758  } else if (dec->codec_type == AVMEDIA_TYPE_AUDIO) {
1759  int ret;
1760 
1761  ifp->fallback.format = dec->sample_fmt;
1762  ifp->fallback.sample_rate = dec->sample_rate;
1763 
1765  if (ret < 0)
1766  return ret;
1767  } else {
1768  // for subtitles (i.e. sub2video) we set the actual parameters,
1769  // rather than just fallback
1770  ifp->width = ifp->ist->sub2video.w;
1771  ifp->height = ifp->ist->sub2video.h;
1772 
1773  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
1774  palettes for all rectangles are identical or compatible */
1775  ifp->format = AV_PIX_FMT_RGB32;
1776 
1777  av_log(NULL, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n", ifp->width, ifp->height);
1778  }
1779 
1780  return 0;
1781 }
1782 
1784 {
1785  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1786  AVFrameSideData *sd;
1787  int ret;
1788 
1790  if (ret < 0)
1791  return ret;
1792 
1793  ifp->format = frame->format;
1794 
1795  ifp->width = frame->width;
1796  ifp->height = frame->height;
1798 
1799  ifp->sample_rate = frame->sample_rate;
1801  if (ret < 0)
1802  return ret;
1803 
1805  if (sd)
1806  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
1807  ifp->displaymatrix_present = !!sd;
1808 
1809  return 0;
1810 }
1811 
1813 {
1814  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
1815  return fgp->is_simple;
1816 }
1817 
1818 void fg_send_command(FilterGraph *fg, double time, const char *target,
1819  const char *command, const char *arg, int all_filters)
1820 {
1821  int ret;
1822 
1823  if (!fg->graph)
1824  return;
1825 
1826  if (time < 0) {
1827  char response[4096];
1828  ret = avfilter_graph_send_command(fg->graph, target, command, arg,
1829  response, sizeof(response),
1830  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
1831  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
1832  fg->index, ret, response);
1833  } else if (!all_filters) {
1834  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
1835  } else {
1836  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
1837  if (ret < 0)
1838  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
1839  }
1840 }
1841 
1843 {
1844  OutputFilter *ofilter = &ofp->ofilter;
1845  FPSConvContext *fps = &ofp->fps;
1846  AVRational tb = (AVRational){ 0, 0 };
1847  AVRational fr;
1848  FrameData *fd;
1849 
1850  fd = frame_data(frame);
1851 
1852  // apply -enc_time_base
1853  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
1854  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
1855  av_log(ofilter->ost, AV_LOG_ERROR,
1856  "Demuxing timebase not available - cannot use it for encoding\n");
1857  return AVERROR(EINVAL);
1858  }
1859 
1860  switch (ofp->enc_timebase.num) {
1861  case 0: break;
1862  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
1863  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
1864  default: tb = ofp->enc_timebase; break;
1865  }
1866 
1867  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
1868  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
1869  goto finish;
1870  }
1871 
1872  fr = fps->framerate;
1873  if (!fr.num) {
1875  if (fr_sink.num > 0 && fr_sink.den > 0)
1876  fr = fr_sink;
1877  }
1878 
1879  if (ofilter->ost->is_cfr) {
1880  if (!fr.num && !fps->framerate_max.num) {
1881  fr = (AVRational){25, 1};
1882  av_log(ofilter->ost, AV_LOG_WARNING,
1883  "No information "
1884  "about the input framerate is available. Falling "
1885  "back to a default value of 25fps. Use the -r option "
1886  "if you want a different framerate.\n");
1887  }
1888 
1889  if (fps->framerate_max.num &&
1890  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
1891  !fr.den))
1892  fr = fps->framerate_max;
1893  }
1894 
1895  if (fr.num > 0) {
1896  if (fps->framerate_supported) {
1897  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
1898  fr = fps->framerate_supported[idx];
1899  }
1900  if (fps->framerate_clip) {
1901  av_reduce(&fr.num, &fr.den,
1902  fr.num, fr.den, fps->framerate_clip);
1903  }
1904  }
1905 
1906  if (!(tb.num > 0 && tb.den > 0))
1907  tb = av_inv_q(fr);
1908  if (!(tb.num > 0 && tb.den > 0))
1909  tb = frame->time_base;
1910 
1911 finish:
1912  ofp->tb_out = tb;
1913  fps->framerate = fr;
1914  ofp->tb_out_locked = 1;
1915 
1916  return 0;
1917 }
1918 
1920  int64_t start_time)
1921 {
1922  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
1923 
1924  AVRational tb = tb_dst;
1925  AVRational filter_tb = frame->time_base;
1926  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1927 
1928  if (frame->pts == AV_NOPTS_VALUE)
1929  goto early_exit;
1930 
1931  tb.den <<= extra_bits;
1932  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
1934  float_pts /= 1 << extra_bits;
1935  // when float_pts is not exactly an integer,
1936  // avoid exact midpoints to reduce the chance of rounding differences, this
1937  // can be removed in case the fps code is changed to work with integers
1938  if (float_pts != llrint(float_pts))
1939  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1940 
1941  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
1943  frame->time_base = tb_dst;
1944 
1945 early_exit:
1946 
1947  if (debug_ts) {
1948  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1949  frame ? av_ts2str(frame->pts) : "NULL",
1950  av_ts2timestr(frame->pts, &tb_dst),
1951  float_pts, tb_dst.num, tb_dst.den);
1952  }
1953 
1954  return float_pts;
1955 }
1956 
1957 /* Convert frame timestamps to the encoder timebase and decide how many times
1958  * should this (and possibly previous) frame be repeated in order to conform to
1959  * desired target framerate (if any).
1960  */
1962  int64_t *nb_frames, int64_t *nb_frames_prev)
1963 {
1964  OutputFilter *ofilter = &ofp->ofilter;
1965  OutputStream *ost = ofilter->ost;
1966  FPSConvContext *fps = &ofp->fps;
1967  double delta0, delta, sync_ipts, duration;
1968 
1969  if (!frame) {
1970  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
1971  fps->frames_prev_hist[1],
1972  fps->frames_prev_hist[2]);
1973 
1974  if (!*nb_frames && fps->last_dropped) {
1975  ofilter->nb_frames_drop++;
1976  fps->last_dropped++;
1977  }
1978 
1979  goto finish;
1980  }
1981 
1983 
1984  sync_ipts = adjust_frame_pts_to_encoder_tb(frame, ofp->tb_out, ofp->ts_offset);
1985  /* delta0 is the "drift" between the input frame and
1986  * where it would fall in the output. */
1987  delta0 = sync_ipts - ofp->next_pts;
1988  delta = delta0 + duration;
1989 
1990  // tracks the number of times the PREVIOUS frame should be duplicated,
1991  // mostly for variable framerate (VFR)
1992  *nb_frames_prev = 0;
1993  /* by default, we output a single frame */
1994  *nb_frames = 1;
1995 
1996  if (delta0 < 0 &&
1997  delta > 0 &&
1998  ost->vsync_method != VSYNC_PASSTHROUGH &&
1999  ost->vsync_method != VSYNC_DROP) {
2000  if (delta0 < -0.6) {
2001  av_log(ost, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2002  } else
2003  av_log(ost, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2004  sync_ipts = ofp->next_pts;
2005  duration += delta0;
2006  delta0 = 0;
2007  }
2008 
2009  switch (ost->vsync_method) {
2010  case VSYNC_VSCFR:
2011  if (fps->frame_number == 0 && delta0 >= 0.5) {
2012  av_log(ost, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2013  delta = duration;
2014  delta0 = 0;
2015  ofp->next_pts = llrint(sync_ipts);
2016  }
2017  case VSYNC_CFR:
2018  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2019  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2020  *nb_frames = 0;
2021  } else if (delta < -1.1)
2022  *nb_frames = 0;
2023  else if (delta > 1.1) {
2024  *nb_frames = llrintf(delta);
2025  if (delta0 > 1.1)
2026  *nb_frames_prev = llrintf(delta0 - 0.6);
2027  }
2028  frame->duration = 1;
2029  break;
2030  case VSYNC_VFR:
2031  if (delta <= -0.6)
2032  *nb_frames = 0;
2033  else if (delta > 0.6)
2034  ofp->next_pts = llrint(sync_ipts);
2036  break;
2037  case VSYNC_DROP:
2038  case VSYNC_PASSTHROUGH:
2039  ofp->next_pts = llrint(sync_ipts);
2041  break;
2042  default:
2043  av_assert0(0);
2044  }
2045 
2046 finish:
2047  memmove(fps->frames_prev_hist + 1,
2048  fps->frames_prev_hist,
2049  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2050  fps->frames_prev_hist[0] = *nb_frames_prev;
2051 
2052  if (*nb_frames_prev == 0 && fps->last_dropped) {
2053  ofilter->nb_frames_drop++;
2055  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2056  fps->frame_number, fps->last_frame->pts);
2057  }
2058  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2059  if (*nb_frames > dts_error_threshold * 30) {
2060  av_log(ost, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2061  ofilter->nb_frames_drop++;
2062  *nb_frames = 0;
2063  return;
2064  }
2065  ofilter->nb_frames_dup += *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev);
2066  av_log(ost, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2067  if (ofilter->nb_frames_dup > fps->dup_warning) {
2068  av_log(ost, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2069  fps->dup_warning *= 10;
2070  }
2071  }
2072 
2073  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2075 }
2076 
2078 {
2080  OutputStream *ost = ofp->ofilter.ost;
2081  AVFrame *frame_prev = ofp->fps.last_frame;
2082  enum AVMediaType type = ofp->ofilter.type;
2083 
2084  int64_t nb_frames = 1, nb_frames_prev = 0;
2085 
2086  if (type == AVMEDIA_TYPE_VIDEO)
2087  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2088 
2089  for (int64_t i = 0; i < nb_frames; i++) {
2090  AVFrame *frame_out;
2091  int ret;
2092 
2093  if (type == AVMEDIA_TYPE_VIDEO) {
2094  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2095  frame_prev : frame;
2096  if (!frame_in)
2097  break;
2098 
2099  frame_out = fgp->frame_enc;
2100  ret = av_frame_ref(frame_out, frame_in);
2101  if (ret < 0)
2102  return ret;
2103 
2104  frame_out->pts = ofp->next_pts;
2105 
2106  if (ofp->fps.dropped_keyframe) {
2107  frame_out->flags |= AV_FRAME_FLAG_KEY;
2108  ofp->fps.dropped_keyframe = 0;
2109  }
2110  } else {
2111  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2114 
2115  frame->time_base = ofp->tb_out;
2117  (AVRational){ 1, frame->sample_rate },
2118  ofp->tb_out);
2119 
2120  ofp->next_pts = frame->pts + frame->duration;
2121 
2122  frame_out = frame;
2123  }
2124 
2125  ret = enc_frame(ost, frame_out);
2126  av_frame_unref(frame_out);
2127  if (ret < 0)
2128  return ret;
2129 
2130  if (type == AVMEDIA_TYPE_VIDEO) {
2131  ofp->fps.frame_number++;
2132  ofp->next_pts++;
2133 
2134  if (i == nb_frames_prev && frame)
2136  }
2137 
2138  ofp->got_frame = 1;
2139  }
2140 
2141  if (frame && frame_prev) {
2142  av_frame_unref(frame_prev);
2143  av_frame_move_ref(frame_prev, frame);
2144  }
2145 
2146  return 0;
2147 }
2148 
2150 {
2152  OutputStream *ost = ofp->ofilter.ost;
2153  AVFrame *frame = fgp->frame;
2154  AVFilterContext *filter = ofp->filter;
2155  FrameData *fd;
2156  int ret;
2157 
2160  if (flush && ret == AVERROR_EOF && ofp->got_frame &&
2161  ost->type == AVMEDIA_TYPE_VIDEO) {
2162  ret = fg_output_frame(ofp, NULL);
2163  return (ret < 0) ? ret : 1;
2164  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2165  return 1;
2166  } else if (ret < 0) {
2167  av_log(fgp, AV_LOG_WARNING,
2168  "Error in retrieving a frame from the filtergraph: %s\n",
2169  av_err2str(ret));
2170  return ret;
2171  }
2172  if (ost->finished) {
2174  return 0;
2175  }
2176 
2178 
2179  if (frame->pts != AV_NOPTS_VALUE) {
2180  ost->filter->last_pts = av_rescale_q(frame->pts, frame->time_base,
2181  AV_TIME_BASE_Q);
2182 
2183  if (debug_ts)
2184  av_log(fgp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2187  }
2188 
2189  // Choose the output timebase the first time we get a frame.
2190  if (!ofp->tb_out_locked) {
2191  ret = choose_out_timebase(ofp, frame);
2192  if (ret < 0) {
2193  av_log(ost, AV_LOG_ERROR, "Could not choose an output time base\n");
2195  return ret;
2196  }
2197  }
2198 
2199  fd = frame_data(frame);
2200  if (!fd) {
2202  return AVERROR(ENOMEM);
2203  }
2204 
2205  // only use bits_per_raw_sample passed through from the decoder
2206  // if the filtergraph did not touch the frame data
2207  if (!fgp->is_meta)
2208  fd->bits_per_raw_sample = 0;
2209 
2210  if (ost->type == AVMEDIA_TYPE_VIDEO) {
2211  if (!frame->duration) {
2213  if (fr.num > 0 && fr.den > 0)
2215  }
2216 
2217  fd->frame_rate_filter = ofp->fps.framerate;
2218  }
2219 
2220  ret = fg_output_frame(ofp, frame);
2222  if (ret < 0)
2223  return ret;
2224 
2225  return 0;
2226 }
2227 
2229 {
2230  if (!fg->graph)
2231  return 0;
2232 
2233  /* Reap all buffers present in the buffer sinks */
2234  for (int i = 0; i < fg->nb_outputs; i++) {
2236  int ret = 0;
2237 
2238  while (!ret) {
2239  ret = fg_output_step(ofp, flush);
2240  if (ret < 0)
2241  return ret;
2242  }
2243  }
2244 
2245  return 0;
2246 }
2247 
2249 {
2250  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2251  int64_t pts2;
2252 
2253  if (!ifilter->graph->graph)
2254  return;
2255 
2256  /* subtitles seem to be usually muxed ahead of other streams;
2257  if not, subtracting a larger time here is necessary */
2258  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2259 
2260  /* do not send the heartbeat frame if the subtitle is already ahead */
2261  if (pts2 <= ifp->sub2video.last_pts)
2262  return;
2263 
2264  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2265  /* if we have hit the end of the current displayed subpicture,
2266  or if we need to initialize the system, update the
2267  overlayed subpicture and its start/end times */
2268  sub2video_update(ifp, pts2 + 1, NULL);
2269 
2271  sub2video_push_ref(ifp, pts2);
2272 }
2273 
2275 {
2276  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2277  int ret;
2278 
2279  if (ifilter->graph->graph) {
2280  if (!frame) {
2281  if (ifp->sub2video.end_pts < INT64_MAX)
2282  sub2video_update(ifp, INT64_MAX, NULL);
2283 
2284  return av_buffersrc_add_frame(ifp->filter, NULL);
2285  }
2286 
2287  ifp->width = frame->width ? frame->width : ifp->width;
2288  ifp->height = frame->height ? frame->height : ifp->height;
2289 
2290  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2291  } else if (frame) {
2293 
2294  if (!tmp)
2295  return AVERROR(ENOMEM);
2296 
2297  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2298  if (ret < 0) {
2299  av_frame_free(&tmp);
2300  return ret;
2301  }
2302  }
2303 
2304  return 0;
2305 }
2306 
2308 {
2309  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2310  int ret;
2311 
2312  ifp->eof = 1;
2313 
2314  if (ifp->filter) {
2315  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2317 
2319  if (ret < 0)
2320  return ret;
2321  } else {
2322  if (ifp->format < 0) {
2323  // the filtergraph was never configured, use the fallback parameters
2324  ifp->format = ifp->fallback.format;
2325  ifp->sample_rate = ifp->fallback.sample_rate;
2326  ifp->width = ifp->fallback.width;
2327  ifp->height = ifp->fallback.height;
2329 
2331  &ifp->fallback.ch_layout);
2332  if (ret < 0)
2333  return ret;
2334 
2335  if (ifilter_has_all_input_formats(ifilter->graph)) {
2336  ret = configure_filtergraph(ifilter->graph);
2337  if (ret < 0) {
2338  av_log(NULL, AV_LOG_ERROR, "Error initializing filters!\n");
2339  return ret;
2340  }
2341  }
2342  }
2343 
2344  if (ifp->format < 0) {
2346  "Cannot determine format of input stream %d:%d after EOF\n",
2347  ifp->ist->file_index, ifp->ist->index);
2348  return AVERROR_INVALIDDATA;
2349  }
2350  }
2351 
2352  return 0;
2353 }
2354 
2355 int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
2356 {
2357  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2358  FilterGraph *fg = ifilter->graph;
2359  AVFrameSideData *sd;
2360  int need_reinit, ret;
2361 
2362  /* determine if the parameters for this input changed */
2363  need_reinit = ifp->format != frame->format;
2364 
2365  switch (ifp->type) {
2366  case AVMEDIA_TYPE_AUDIO:
2367  need_reinit |= ifp->sample_rate != frame->sample_rate ||
2369  break;
2370  case AVMEDIA_TYPE_VIDEO:
2371  need_reinit |= ifp->width != frame->width ||
2372  ifp->height != frame->height;
2373  break;
2374  }
2375 
2376  if (!ifp->ist->reinit_filters && fg->graph)
2377  need_reinit = 0;
2378 
2379  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2380  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2381  need_reinit = 1;
2382 
2384  if (!ifp->displaymatrix_present ||
2385  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2386  need_reinit = 1;
2387  } else if (ifp->displaymatrix_present)
2388  need_reinit = 1;
2389 
2390  if (need_reinit) {
2392  if (ret < 0)
2393  return ret;
2394  }
2395 
2396  /* (re)init the graph if possible, otherwise buffer the frame and return */
2397  if (need_reinit || !fg->graph) {
2398  if (!ifilter_has_all_input_formats(fg)) {
2400  if (!tmp)
2401  return AVERROR(ENOMEM);
2402 
2403  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2404  if (ret < 0)
2405  av_frame_free(&tmp);
2406 
2407  return ret;
2408  }
2409 
2410  ret = reap_filters(fg, 0);
2411  if (ret < 0 && ret != AVERROR_EOF) {
2412  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2413  return ret;
2414  }
2415 
2416  ret = configure_filtergraph(fg);
2417  if (ret < 0) {
2418  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2419  return ret;
2420  }
2421  }
2422 
2423  if (keep_reference) {
2424  ret = av_frame_ref(ifp->frame, frame);
2425  if (ret < 0)
2426  return ret;
2427  } else
2428  av_frame_move_ref(ifp->frame, frame);
2429  frame = ifp->frame;
2430 
2433  frame->time_base = ifp->time_base;
2434 #if LIBAVUTIL_VERSION_MAJOR < 59
2437  )
2438 #endif
2439 
2442  if (ret < 0) {
2444  if (ret != AVERROR_EOF)
2445  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2446  return ret;
2447  }
2448 
2449  return 0;
2450 }
2451 
2453 {
2454  FilterGraphPriv *fgp = fgp_from_fg(graph);
2455  int i, ret;
2456  int nb_requests, nb_requests_max = 0;
2457  InputStream *ist;
2458 
2459  if (!graph->graph) {
2460  for (int i = 0; i < graph->nb_inputs; i++) {
2461  InputFilter *ifilter = graph->inputs[i];
2462  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2463  if (ifp->format < 0 && !ifp->eof) {
2464  *best_ist = ifp->ist;
2465  return 0;
2466  }
2467  }
2468 
2469  // This state - graph is not configured, but all inputs are either
2470  // initialized or EOF - should be unreachable because sending EOF to a
2471  // filter without even a fallback format should fail
2472  av_assert0(0);
2473  return AVERROR_BUG;
2474  }
2475 
2476  *best_ist = NULL;
2478  if (ret >= 0)
2479  return reap_filters(graph, 0);
2480 
2481  if (ret == AVERROR_EOF) {
2482  reap_filters(graph, 1);
2483  for (int i = 0; i < graph->nb_outputs; i++) {
2484  OutputFilter *ofilter = graph->outputs[i];
2485  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
2486 
2487  // we are finished and no frames were ever seen at this output,
2488  // at least initialize the encoder with a dummy frame
2489  if (!ofp->got_frame) {
2490  AVFrame *frame = fgp->frame;
2491  FrameData *fd;
2492 
2493  frame->time_base = ofp->tb_out;
2494  frame->format = ofp->format;
2495 
2496  frame->width = ofp->width;
2497  frame->height = ofp->height;
2499 
2500  frame->sample_rate = ofp->sample_rate;
2501  if (ofp->ch_layout.nb_channels) {
2503  if (ret < 0)
2504  return ret;
2505  }
2506 
2507  fd = frame_data(frame);
2508  if (!fd)
2509  return AVERROR(ENOMEM);
2510 
2511  fd->frame_rate_filter = ofp->fps.framerate;
2512 
2513  av_assert0(!frame->buf[0]);
2514 
2515  av_log(ofilter->ost, AV_LOG_WARNING,
2516  "No filtered frames for output stream, trying to "
2517  "initialize anyway.\n");
2518 
2519  enc_open(ofilter->ost, frame);
2521  }
2522 
2523  close_output_stream(ofilter->ost);
2524  }
2525  return 0;
2526  }
2527  if (ret != AVERROR(EAGAIN))
2528  return ret;
2529 
2530  for (i = 0; i < graph->nb_inputs; i++) {
2531  InputFilter *ifilter = graph->inputs[i];
2532  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2533 
2534  ist = ifp->ist;
2535  if (input_files[ist->file_index]->eagain || ifp->eof)
2536  continue;
2537  nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
2538  if (nb_requests > nb_requests_max) {
2539  nb_requests_max = nb_requests;
2540  *best_ist = ist;
2541  }
2542  }
2543 
2544  if (!*best_ist)
2545  for (i = 0; i < graph->nb_outputs; i++)
2546  graph->outputs[i]->ost->unavailable = 1;
2547 
2548  return 0;
2549 }
AVSubtitle
Definition: avcodec.h:2269
formats
formats
Definition: signature.h:48
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, OutputStream *ost)
Definition: ffmpeg_filter.c:625
init_complex_filtergraph
int init_complex_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:1063
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:109
AVCodec
AVCodec.
Definition: codec.h:187
OutputFilter::last_pts
int64_t last_pts
Definition: ffmpeg.h:304
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:742
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:221
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:125
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:294
av_clip
#define av_clip
Definition: common.h:96
InputFilterPriv::type
enum AVMediaType type
Definition: ffmpeg_filter.c:89
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:67
OutputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:165
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:124
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2274
opt.h
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1049
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:59
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:890
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:103
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:932
InputStream::framerate_guessed
AVRational framerate_guessed
Definition: ffmpeg.h:348
FrameData
Definition: ffmpeg.h:636
close_output_stream
void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:487
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:129
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:515
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: aviobuf.c:1271
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:243
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1064
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:824
InputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:77
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
AVFrame::duration
int64_t duration
Duration of the frame, in the same units as pts.
Definition: frame.h:807
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:316
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:148
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:39
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1783
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:73
InputStream::dec_ctx
AVCodecContext * dec_ctx
Definition: ffmpeg.h:344
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:105
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
AVSubtitleRect
Definition: avcodec.h:2241
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2273
ofilter_bind_ost
int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost)
Definition: ffmpeg_filter.c:665
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:800
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:149
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
OutputFile::start_time
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:629
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1035
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:452
AVFrame::width
int width
Definition: frame.h:412
OutputStream::unavailable
int unavailable
Definition: ffmpeg.h:581
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:342
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:48
w
uint8_t w
Definition: llviddspenc.c:38
check_filter_outputs
int check_filter_outputs(void)
Definition: ffmpeg_filter.c:1349
AVOption
AVOption.
Definition: opt.h:251
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:157
FilterGraph::index
int index
Definition: ffmpeg.h:312
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:102
data
const char data[16]
Definition: mxf.c:148
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:153
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:190
ifilter_parameters_from_dec
int ifilter_parameters_from_dec(InputFilter *ifilter, const AVCodecContext *dec)
Set up fallback filtering parameters from a decoder context.
Definition: ffmpeg_filter.c:1749
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
AV_NOWARN_DEPRECATED
#define AV_NOWARN_DEPRECATED(code)
Disable warnings about deprecated features This is useful for sections of code kept for backward comp...
Definition: attributes.h:126
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:317
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:649
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:198
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:312
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:997
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:317
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:590
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:370
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
ost
static AVStream * ost
Definition: vaapi_transcode.c:42
sample_rate
sample_rate
Definition: ffmpeg_filter.c:368
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple)
Definition: ffmpeg_demux.c:870
FilterGraphPriv
Definition: ffmpeg_filter.c:44
OutputFilter::nb_frames_dup
uint64_t nb_frames_dup
Definition: ffmpeg.h:306
InputFilterPriv::sub2video
struct InputFilterPriv::@5 sub2video
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:740
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
choose_pix_fmts
static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint, const char **dst)
Definition: ffmpeg_filter.c:313
OutputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:624
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
InputStream
Definition: ffmpeg.h:324
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:87
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:81
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:138
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:165
enc_open
int enc_open(OutputStream *ost, const AVFrame *frame)
Definition: ffmpeg_enc.c:168
avio_open2
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1265
finish
static void finish(void)
Definition: movenc.c:342
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
offset must point to a pointer immediately followed by an int for the length
Definition: opt.h:231
OutputFilterPriv
Definition: ffmpeg_filter.c:162
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2107
InputStream::sub2video
struct InputStream::sub2video sub2video
fail
#define fail()
Definition: checkasm.h:138
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:256
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:66
samplefmt.h
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:275
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:205
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:562
val
static double val(void *priv, double ch)
Definition: aeval.c:78
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:802
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:459
pts
static int64_t pts
Definition: transcode_aac.c:643
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:487
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1594
ifilter_sub2video_heartbeat
void ifilter_sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2248
FrameData::tb
AVRational tb
Definition: ffmpeg.h:642
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:64
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:170
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:154
AVRational::num
int num
Numerator.
Definition: rational.h:59
InputFile
Definition: ffmpeg.h:399
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:47
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
check_stream_specifier
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
Definition: cmdutils.c:917
OutputFile::shortest
int shortest
Definition: ffmpeg.h:631
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:75
avassert.h
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:645
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:74
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
duration
int64_t duration
Definition: movenc.c:64
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:774
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:1211
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:68
llrintf
#define llrintf(x)
Definition: libm.h:399
s
#define s(width, name)
Definition: cbs_vp9.c:198
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:61
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:82
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:318
InputStream::framerate
AVRational framerate
Definition: ffmpeg.h:353
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1508
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:2077
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:585
AVDictionaryEntry::key
char * key
Definition: dict.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:112
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:299
InputFilter
Definition: ffmpeg.h:287
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
OutputFilter::ost
struct OutputStream * ost
Definition: ffmpeg.h:293
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AVPacketSideData::data
uint8_t * data
Definition: packet.h:343
ctx
AVFormatContext * ctx
Definition: movenc.c:48
nb_streams
static int nb_streams
Definition: ffprobe.c:328
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:609
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2275
graph_opts_apply
static int graph_opts_apply(AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:494
fg_transcode_step
int fg_transcode_step(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg_filter.c:2452
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:288
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:174
AVCodecParameters::nb_coded_side_data
int nb_coded_side_data
Amount of entries in coded_side_data.
Definition: codec_par.h:228
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:192
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:852
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
frame
static AVFrame * frame
Definition: demux_decode.c:54
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1772
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:185
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:169
AVFormatContext
Format I/O context.
Definition: avformat.h:1115
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:625
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:864
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:295
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1169
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:188
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:880
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:880
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:639
InputStream::sub2video::w
int w
Definition: ffmpeg.h:363
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:171
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:1143
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:151
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:330
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:160
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:111
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:1154
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:1212
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:866
InputFilterPriv::linklabel
uint8_t * linklabel
Definition: ffmpeg_filter.c:86
InputFilterPriv::ist
InputStream * ist
Definition: ffmpeg_filter.c:79
filter_opt_apply
static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:439
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:180
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
Definition: ffmpeg_filter.c:602
mathops.h
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1297
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:1235
AVFilterGraph
Definition: avfilter.h:864
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:100
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:319
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:184
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:624
input_files
InputFile ** input_files
Definition: ffmpeg.c:123
OutputFile::streams
OutputStream ** streams
Definition: ffmpeg.h:623
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
av_packet_side_data_get
const AVPacketSideData * av_packet_side_data_get(const AVPacketSideData *sd, int nb_sd, enum AVPacketSideDataType type)
Get side information from a side data array.
Definition: avpacket.c:650
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:45
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:163
FilterGraph
Definition: ffmpeg.h:310
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:1224
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:73
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1032
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:279
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:869
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:1812
ifilter_sub2video
int ifilter_sub2video(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:2274
f
f
Definition: af_crystalizer.c:121
AVIOContext
Bytestream IO Context.
Definition: avio.h:166
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
AVMediaType
AVMediaType
Definition: avutil.h:199
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:109
AVFifo
Definition: fifo.c:35
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:112
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:307
init_input_filter
static int init_input_filter(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:997
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:84
InputStream::file_index
int file_index
Definition: ffmpeg.h:327
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1029
output_files
OutputFile ** output_files
Definition: ffmpeg.c:126
FrameData::dec
struct FrameData::@3 dec
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:567
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:178
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:328
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:138
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:1919
FilterGraph::graph
AVFilterGraph * graph
Definition: ffmpeg.h:314
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1080
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
insert_trim
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1074
InputFilterPriv::initialize
unsigned int initialize
Definition: ffmpeg_filter.c:133
copy_ts
int copy_ts
Definition: ffmpeg_opt.c:78
graph_parse
static int graph_parse(AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:518
OutputFilter::nb_frames_drop
uint64_t nb_frames_drop
Definition: ffmpeg.h:307
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1217
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:467
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
read_binary
static int read_binary(const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:391
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:54
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:429
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2272
FilterGraphPriv::graph_desc
const char * graph_desc
Definition: ffmpeg_filter.c:56
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1039
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:563
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:99
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:441
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1587
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:225
FF_COMPLIANCE_UNOFFICIAL
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: defs.h:61
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:301
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:942
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2224
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
sample_rates
sample_rates
Definition: ffmpeg_filter.c:368
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:1611
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:904
buffersink.h
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:972
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
InputFilterPriv::fallback
struct InputFilterPriv::@4 fallback
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:420
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:159
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:145
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:95
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
configure_input_filter
static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1567
FPSConvContext
Definition: ffmpeg_filter.c:142
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
configure_output_filter
static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1334
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:647
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:1818
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:50
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:176
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:66
src2
const pixel * src2
Definition: h264pred_template.c:422
display.h
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:157
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:407
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:649
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:69
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:622
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:94
tb
#define tb
Definition: regdef.h:68
AVFrame::pkt_duration
attribute_deprecated int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:700
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
ifilter_send_eof
int ifilter_send_eof(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2307
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
len
int len
Definition: vorbis_enc_data.h:426
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:129
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:346
AVCodecContext::height
int height
Definition: avcodec.h:621
AVCodecParameters::coded_side_data
AVPacketSideData * coded_side_data
Additional data associated with the entire stream.
Definition: codec_par.h:223
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:658
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
AVFilter
Filter definition.
Definition: avfilter.h:166
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:1961
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:137
mid_pred
#define mid_pred
Definition: mathops.h:98
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:95
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:841
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
reap_filters
int reap_filters(FilterGraph *fg, int flush)
Get and encode new output from specified filtergraph, without causing activity.
Definition: ffmpeg_filter.c:2228
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:289
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:69
av_opt_eval_int
int av_opt_eval_int(void *obj, const AVOption *o, const char *val, int *int_out)
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:916
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:143
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:423
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1125
InputStream::reinit_filters
int reinit_filters
Definition: ffmpeg.h:379
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:1195
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:191
choose_channel_layouts
static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
Definition: ffmpeg_filter.c:371
InputFile::eagain
int eagain
Definition: ffmpeg.h:409
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:265
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:447
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:937
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:752
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1378
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AVFrame::height
int height
Definition: frame.h:412
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:847
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:342
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AVBufferSrcParameters::format
int format
video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format,...
Definition: buffersrc.h:78
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:573
outputs
static const AVFilterPad outputs[]
Definition: af_afwtdn.c:1291
AVRational::den
int den
Denominator.
Definition: rational.h:60
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:1210
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:107
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:92
av_bprint_clear
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:232
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:640
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:53
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:133
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:659
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:1842
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:186
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:225
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:1236
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:311
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:647
OutputFilter
Definition: ffmpeg.h:292
OutputFilterPriv::got_frame
int got_frame
Definition: ffmpeg_filter.c:195
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:1577
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:75
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:636
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:449
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
init_simple_filtergraph
int init_simple_filtergraph(InputStream *ist, OutputStream *ost, char *graph_desc)
Definition: ffmpeg_filter.c:957
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:313
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:367
start_at_zero
int start_at_zero
Definition: ffmpeg_opt.c:79
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:90
InputStream::sub2video::h
int h
Definition: ffmpeg.h:363
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, int flush)
Definition: ffmpeg_filter.c:2149
llrint
#define llrint(x)
Definition: libm.h:394
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
OutputStream::is_cfr
int is_cfr
Definition: ffmpeg.h:546
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
OutputStream::autoscale
int autoscale
Definition: ffmpeg.h:554
InputStream::index
int index
Definition: ffmpeg.h:328
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:74
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:859
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:252
file_read
char * file_read(const char *filename)
Definition: ffmpeg_opt.c:750
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:97
InputFilterPriv::end_pts
int64_t end_pts
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:130
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:130
d
d
Definition: ffmpeg_filter.c:368
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:621
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:271
imgutils.h
timestamp.h
OutputStream
Definition: mux.c:53
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
OutputStream::st
AVStream * st
Definition: mux.c:54
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:168
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1187
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:70
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
configure_output_video_filter
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1147
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
h
h
Definition: vp9dsp_template.c:2038
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:342
AVDictionaryEntry::value
char * value
Definition: dict.h:91
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:867
avstring.h
OutputFile::recording_time
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:628
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:175
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1024
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:65
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:169
ifilter_send_frame
int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
Definition: ffmpeg_filter.c:2355
snprintf
#define snprintf
Definition: snprintf.h:34
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:156
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:866
buffersrc.h
enc_frame
int enc_frame(OutputStream *ost, AVFrame *frame)
Definition: ffmpeg_enc.c:850
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:135
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1367
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:822
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2271
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1226
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:67
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:99
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2884
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:88
OutputFile
Definition: ffmpeg.h:615
InputStream::autorotate
int autorotate
Definition: ffmpeg.h:358
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:158