FFmpeg
dnn_backend_tf.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Sergey Lavrushkin
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN tensorflow backend implementation.
24  */
25 
26 #include "libavformat/avio.h"
27 #include "libavutil/avassert.h"
28 #include "libavutil/avstring.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/opt.h"
32 #include "libavcodec/defs.h"
33 #include "../internal.h"
34 #include "dnn_io_proc.h"
35 #include "dnn_backend_common.h"
36 #include "safe_queue.h"
37 #include <tensorflow/c/c_api.h>
38 
39 typedef struct TFOptions{
40  char *sess_config;
41  uint8_t async;
42  uint32_t nireq;
43 } TFOptions;
44 
45 typedef struct TFContext {
46  const AVClass *class;
48 } TFContext;
49 
50 typedef struct TFModel{
53  TF_Graph *graph;
54  TF_Session *session;
55  TF_Status *status;
59 } TFModel;
60 
61 /**
62  * Stores execution parameters for single
63  * call to the TensorFlow C API
64  */
65 typedef struct TFInferRequest {
66  TF_Output *tf_outputs;
67  TF_Tensor **output_tensors;
68  TF_Output *tf_input;
69  TF_Tensor *input_tensor;
71 
72 typedef struct TFRequestItem {
75  TF_Status *status;
78 
79 #define OFFSET(x) offsetof(TFContext, x)
80 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
81 static const AVOption dnn_tensorflow_options[] = {
82  { "sess_config", "config for SessionOptions", OFFSET(options.sess_config), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
84  { NULL }
85 };
86 
87 AVFILTER_DEFINE_CLASS(dnn_tensorflow);
88 
89 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue);
90 static void infer_completion_callback(void *args);
91 static inline void destroy_request_item(TFRequestItem **arg);
92 
93 static void free_buffer(void *data, size_t length)
94 {
95  av_freep(&data);
96 }
97 
98 /**
99  * Free the contents of TensorFlow inference request.
100  * It does not free the TFInferRequest instance.
101  *
102  * @param request pointer to TFInferRequest instance.
103  * NULL pointer is allowed.
104  */
105 static void tf_free_request(TFInferRequest *request)
106 {
107  if (!request)
108  return;
109  if (request->input_tensor) {
110  TF_DeleteTensor(request->input_tensor);
111  request->input_tensor = NULL;
112  }
113  av_freep(&request->tf_input);
114  av_freep(&request->tf_outputs);
115  if (request->output_tensors) {
116  int nb_output = sizeof(*request->output_tensors)/sizeof(request->output_tensors[0]);
117  for (uint32_t i = 0; i < nb_output; ++i) {
118  if (request->output_tensors[i]) {
119  TF_DeleteTensor(request->output_tensors[i]);
120  request->output_tensors[i] = NULL;
121  }
122  }
123  av_freep(&request->output_tensors);
124  }
125 }
126 
127 /**
128  * Create a TensorFlow inference request. All properties
129  * are initially unallocated and set as NULL.
130  *
131  * @return pointer to the allocated TFInferRequest instance.
132  */
134 {
135  TFInferRequest *infer_request = av_malloc(sizeof(TFInferRequest));
136  if (!infer_request) {
137  return NULL;
138  }
139  infer_request->tf_outputs = NULL;
140  infer_request->tf_input = NULL;
141  infer_request->input_tensor = NULL;
142  infer_request->output_tensors = NULL;
143  return infer_request;
144 }
145 
146 /**
147  * Start synchronous inference for the TensorFlow model.
148  *
149  * @param request pointer to the TFRequestItem for inference
150  * @retval 0 if execution is successful
151  * @retval AVERROR(EINVAL) if request is NULL
152  * @retval DNN_GENERIC_ERROR if execution fails
153  */
154 static int tf_start_inference(void *args)
155 {
156  TFRequestItem *request = args;
157  TFInferRequest *infer_request = request->infer_request;
158  LastLevelTaskItem *lltask = request->lltask;
159  TaskItem *task = lltask->task;
160  TFModel *tf_model = task->model;
161 
162  if (!request) {
163  av_log(&tf_model->ctx, AV_LOG_ERROR, "TFRequestItem is NULL\n");
164  return AVERROR(EINVAL);
165  }
166 
167  TF_SessionRun(tf_model->session, NULL,
168  infer_request->tf_input, &infer_request->input_tensor, 1,
169  infer_request->tf_outputs, infer_request->output_tensors,
170  task->nb_output, NULL, 0, NULL,
171  request->status);
172  if (TF_GetCode(request->status) != TF_OK) {
173  av_log(&tf_model->ctx, AV_LOG_ERROR, "%s", TF_Message(request->status));
174  return DNN_GENERIC_ERROR;
175  }
176  return 0;
177 }
178 
179 /**
180  * Free the TFRequestItem completely.
181  *
182  * @param arg Address of the TFInferRequest instance.
183  */
184 static inline void destroy_request_item(TFRequestItem **arg) {
185  TFRequestItem *request;
186  if (!arg) {
187  return;
188  }
189  request = *arg;
190  tf_free_request(request->infer_request);
191  av_freep(&request->infer_request);
192  av_freep(&request->lltask);
193  TF_DeleteStatus(request->status);
195  av_freep(arg);
196 }
197 
198 static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
199 {
200  TFModel *tf_model = task->model;
201  TFContext *ctx = &tf_model->ctx;
202  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
203  if (!lltask) {
204  av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for LastLevelTaskItem\n");
205  return AVERROR(ENOMEM);
206  }
207  task->inference_todo = 1;
208  task->inference_done = 0;
209  lltask->task = task;
210  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
211  av_log(ctx, AV_LOG_ERROR, "Failed to push back lltask_queue.\n");
212  av_freep(&lltask);
213  return AVERROR(ENOMEM);
214  }
215  return 0;
216 }
217 
218 static TF_Buffer *read_graph(const char *model_filename)
219 {
220  TF_Buffer *graph_buf;
221  unsigned char *graph_data = NULL;
222  AVIOContext *model_file_context;
223  long size, bytes_read;
224 
225  if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
226  return NULL;
227  }
228 
229  size = avio_size(model_file_context);
230 
231  graph_data = av_malloc(size);
232  if (!graph_data){
233  avio_closep(&model_file_context);
234  return NULL;
235  }
236  bytes_read = avio_read(model_file_context, graph_data, size);
237  avio_closep(&model_file_context);
238  if (bytes_read != size){
239  av_freep(&graph_data);
240  return NULL;
241  }
242 
243  graph_buf = TF_NewBuffer();
244  graph_buf->data = graph_data;
245  graph_buf->length = size;
246  graph_buf->data_deallocator = free_buffer;
247 
248  return graph_buf;
249 }
250 
251 static TF_Tensor *allocate_input_tensor(const DNNData *input)
252 {
253  TF_DataType dt;
254  size_t size;
255  int64_t input_dims[4] = { 0 };
256 
257  input_dims[0] = 1;
258  input_dims[1] = input->dims[dnn_get_height_idx_by_layout(input->layout)];
259  input_dims[2] = input->dims[dnn_get_width_idx_by_layout(input->layout)];
260  input_dims[3] = input->dims[dnn_get_channel_idx_by_layout(input->layout)];
261  switch (input->dt) {
262  case DNN_FLOAT:
263  dt = TF_FLOAT;
264  size = sizeof(float);
265  break;
266  case DNN_UINT8:
267  dt = TF_UINT8;
268  size = 1;
269  break;
270  default:
271  av_assert0(!"should not reach here");
272  }
273 
274  return TF_AllocateTensor(dt, input_dims, 4,
275  input_dims[1] * input_dims[2] * input_dims[3] * size);
276 }
277 
278 static int get_input_tf(void *model, DNNData *input, const char *input_name)
279 {
280  TFModel *tf_model = model;
281  TFContext *ctx = &tf_model->ctx;
282  TF_Status *status;
283  TF_DataType dt;
284  int64_t dims[4];
285 
286  TF_Output tf_output;
287  tf_output.oper = TF_GraphOperationByName(tf_model->graph, input_name);
288  if (!tf_output.oper) {
289  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
290  return AVERROR(EINVAL);
291  }
292 
293  tf_output.index = 0;
294  dt = TF_OperationOutputType(tf_output);
295  switch (dt) {
296  case TF_FLOAT:
297  input->dt = DNN_FLOAT;
298  break;
299  case TF_UINT8:
300  input->dt = DNN_UINT8;
301  break;
302  default:
303  av_log(ctx, AV_LOG_ERROR, "Unsupported output type %d in model\n", dt);
304  return AVERROR(EINVAL);
305  }
306  input->order = DCO_RGB;
307 
308  status = TF_NewStatus();
309  TF_GraphGetTensorShape(tf_model->graph, tf_output, dims, 4, status);
310  if (TF_GetCode(status) != TF_OK){
311  TF_DeleteStatus(status);
312  av_log(ctx, AV_LOG_ERROR, "Failed to get input tensor shape: number of dimension incorrect\n");
313  return DNN_GENERIC_ERROR;
314  }
315  TF_DeleteStatus(status);
316 
317  // currently only NHWC is supported
318  av_assert0(dims[0] == 1 || dims[0] == -1);
319  for (int i = 0; i < 4; i++)
320  input->dims[i] = dims[i];
321  input->layout = DL_NHWC;
322 
323  return 0;
324 }
325 
326 static int get_output_tf(void *model, const char *input_name, int input_width, int input_height,
327  const char *output_name, int *output_width, int *output_height)
328 {
329  int ret;
330  TFModel *tf_model = model;
331  TFContext *ctx = &tf_model->ctx;
332  TaskItem task;
333  TFRequestItem *request;
334  DNNExecBaseParams exec_params = {
335  .input_name = input_name,
336  .output_names = &output_name,
337  .nb_output = 1,
338  .in_frame = NULL,
339  .out_frame = NULL,
340  };
341 
342  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, tf_model, input_height, input_width, ctx);
343  if (ret != 0) {
344  goto err;
345  }
346 
347  ret = extract_lltask_from_task(&task, tf_model->lltask_queue);
348  if (ret != 0) {
349  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
350  goto err;
351  }
352 
353  request = ff_safe_queue_pop_front(tf_model->request_queue);
354  if (!request) {
355  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
356  ret = AVERROR(EINVAL);
357  goto err;
358  }
359 
360  ret = execute_model_tf(request, tf_model->lltask_queue);
361  *output_width = task.out_frame->width;
362  *output_height = task.out_frame->height;
363 
364 err:
365  av_frame_free(&task.out_frame);
366  av_frame_free(&task.in_frame);
367  return ret;
368 }
369 
370 #define SPACE_CHARS " \t\r\n"
371 static int hex_to_data(uint8_t *data, const char *p)
372 {
373  int c, len, v;
374 
375  len = 0;
376  v = 1;
377  for (;;) {
378  p += strspn(p, SPACE_CHARS);
379  if (*p == '\0')
380  break;
381  c = av_toupper((unsigned char) *p++);
382  if (c >= '0' && c <= '9')
383  c = c - '0';
384  else if (c >= 'A' && c <= 'F')
385  c = c - 'A' + 10;
386  else
387  break;
388  v = (v << 4) | c;
389  if (v & 0x100) {
390  if (data) {
391  data[len] = v;
392  }
393  len++;
394  v = 1;
395  }
396  }
397  return len;
398 }
399 
400 static int load_tf_model(TFModel *tf_model, const char *model_filename)
401 {
402  TFContext *ctx = &tf_model->ctx;
403  TF_Buffer *graph_def;
404  TF_ImportGraphDefOptions *graph_opts;
405  TF_SessionOptions *sess_opts;
406  const TF_Operation *init_op;
407  uint8_t *sess_config = NULL;
408  int sess_config_length = 0;
409 
410  // prepare the sess config data
411  if (tf_model->ctx.options.sess_config != NULL) {
412  const char *config;
413  /*
414  tf_model->ctx.options.sess_config is hex to present the serialized proto
415  required by TF_SetConfig below, so we need to first generate the serialized
416  proto in a python script, tools/python/tf_sess_config.py is a script example
417  to generate the configs of sess_config.
418  */
419  if (strncmp(tf_model->ctx.options.sess_config, "0x", 2) != 0) {
420  av_log(ctx, AV_LOG_ERROR, "sess_config should start with '0x'\n");
421  return AVERROR(EINVAL);
422  }
423  config = tf_model->ctx.options.sess_config + 2;
424  sess_config_length = hex_to_data(NULL, config);
425 
426  sess_config = av_mallocz(sess_config_length + AV_INPUT_BUFFER_PADDING_SIZE);
427  if (!sess_config) {
428  av_log(ctx, AV_LOG_ERROR, "failed to allocate memory\n");
429  return AVERROR(ENOMEM);
430  }
431  if (hex_to_data(sess_config, config) < 0) {
432  av_log(ctx, AV_LOG_ERROR, "failed to convert hex to data\n");
433  return AVERROR(EINVAL);
434  }
435  }
436 
437  graph_def = read_graph(model_filename);
438  if (!graph_def){
439  av_log(ctx, AV_LOG_ERROR, "Failed to read model \"%s\" graph\n", model_filename);
440  av_freep(&sess_config);
441  return AVERROR(EINVAL);
442  }
443  tf_model->graph = TF_NewGraph();
444  tf_model->status = TF_NewStatus();
445  graph_opts = TF_NewImportGraphDefOptions();
446  TF_GraphImportGraphDef(tf_model->graph, graph_def, graph_opts, tf_model->status);
447  TF_DeleteImportGraphDefOptions(graph_opts);
448  TF_DeleteBuffer(graph_def);
449  if (TF_GetCode(tf_model->status) != TF_OK){
450  av_log(ctx, AV_LOG_ERROR, "Failed to import serialized graph to model graph\n");
451  av_freep(&sess_config);
452  return DNN_GENERIC_ERROR;
453  }
454 
455  init_op = TF_GraphOperationByName(tf_model->graph, "init");
456  sess_opts = TF_NewSessionOptions();
457 
458  if (sess_config) {
459  TF_SetConfig(sess_opts, sess_config, sess_config_length,tf_model->status);
460  av_freep(&sess_config);
461  if (TF_GetCode(tf_model->status) != TF_OK) {
462  TF_DeleteSessionOptions(sess_opts);
463  av_log(ctx, AV_LOG_ERROR, "Failed to set config for sess options with %s\n",
464  tf_model->ctx.options.sess_config);
465  return DNN_GENERIC_ERROR;
466  }
467  }
468 
469  tf_model->session = TF_NewSession(tf_model->graph, sess_opts, tf_model->status);
470  TF_DeleteSessionOptions(sess_opts);
471  if (TF_GetCode(tf_model->status) != TF_OK)
472  {
473  av_freep(&sess_config);
474  av_log(ctx, AV_LOG_ERROR, "Failed to create new session with model graph\n");
475  return DNN_GENERIC_ERROR;
476  }
477 
478  // Run initialization operation with name "init" if it is present in graph
479  if (init_op){
480  TF_SessionRun(tf_model->session, NULL,
481  NULL, NULL, 0,
482  NULL, NULL, 0,
483  &init_op, 1, NULL, tf_model->status);
484  if (TF_GetCode(tf_model->status) != TF_OK)
485  {
486  av_freep(&sess_config);
487  av_log(ctx, AV_LOG_ERROR, "Failed to run session when initializing\n");
488  return DNN_GENERIC_ERROR;
489  }
490  }
491 
492  return 0;
493 }
494 
495 static void dnn_free_model_tf(DNNModel **model)
496 {
497  TFModel *tf_model;
498 
499  if (*model){
500  tf_model = (*model)->model;
501  while (ff_safe_queue_size(tf_model->request_queue) != 0) {
503  destroy_request_item(&item);
504  }
506 
507  while (ff_queue_size(tf_model->lltask_queue) != 0) {
509  av_freep(&item);
510  }
511  ff_queue_destroy(tf_model->lltask_queue);
512 
513  while (ff_queue_size(tf_model->task_queue) != 0) {
514  TaskItem *item = ff_queue_pop_front(tf_model->task_queue);
515  av_frame_free(&item->in_frame);
516  av_frame_free(&item->out_frame);
517  av_freep(&item);
518  }
519  ff_queue_destroy(tf_model->task_queue);
520 
521  if (tf_model->graph){
522  TF_DeleteGraph(tf_model->graph);
523  }
524  if (tf_model->session){
525  TF_CloseSession(tf_model->session, tf_model->status);
526  TF_DeleteSession(tf_model->session, tf_model->status);
527  }
528  if (tf_model->status){
529  TF_DeleteStatus(tf_model->status);
530  }
531  av_freep(&tf_model);
532  av_freep(model);
533  }
534 }
535 
536 static DNNModel *dnn_load_model_tf(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
537 {
538  DNNModel *model = NULL;
539  TFModel *tf_model = NULL;
540  TFContext *ctx = NULL;
541 
542  model = av_mallocz(sizeof(DNNModel));
543  if (!model){
544  return NULL;
545  }
546 
547  tf_model = av_mallocz(sizeof(TFModel));
548  if (!tf_model){
549  av_freep(&model);
550  return NULL;
551  }
552  model->model = tf_model;
553  tf_model->model = model;
554  ctx = &tf_model->ctx;
555  ctx->class = &dnn_tensorflow_class;
556 
557  //parse options
559  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
560  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
561  goto err;
562  }
563 
564  if (load_tf_model(tf_model, model_filename) != 0){
565  av_log(ctx, AV_LOG_ERROR, "Failed to load TensorFlow model: \"%s\"\n", model_filename);
566  goto err;
567  }
568 
569  if (ctx->options.nireq <= 0) {
570  ctx->options.nireq = av_cpu_count() / 2 + 1;
571  }
572 
573 #if !HAVE_PTHREAD_CANCEL
574  if (ctx->options.async) {
575  ctx->options.async = 0;
576  av_log(filter_ctx, AV_LOG_WARNING, "pthread is not supported, roll back to sync.\n");
577  }
578 #endif
579 
580  tf_model->request_queue = ff_safe_queue_create();
581  if (!tf_model->request_queue) {
582  goto err;
583  }
584 
585  for (int i = 0; i < ctx->options.nireq; i++) {
586  TFRequestItem *item = av_mallocz(sizeof(*item));
587  if (!item) {
588  goto err;
589  }
590  item->lltask = NULL;
592  if (!item->infer_request) {
593  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for TensorFlow inference request\n");
594  av_freep(&item);
595  goto err;
596  }
597  item->status = TF_NewStatus();
600  item->exec_module.args = item;
601 
602  if (ff_safe_queue_push_back(tf_model->request_queue, item) < 0) {
603  destroy_request_item(&item);
604  goto err;
605  }
606  }
607 
608  tf_model->lltask_queue = ff_queue_create();
609  if (!tf_model->lltask_queue) {
610  goto err;
611  }
612 
613  tf_model->task_queue = ff_queue_create();
614  if (!tf_model->task_queue) {
615  goto err;
616  }
617 
618  model->get_input = &get_input_tf;
619  model->get_output = &get_output_tf;
620  model->options = options;
621  model->filter_ctx = filter_ctx;
622  model->func_type = func_type;
623 
624  return model;
625 err:
626  dnn_free_model_tf(&model);
627  return NULL;
628 }
629 
630 static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
631  DNNData input = { 0 };
632  LastLevelTaskItem *lltask;
633  TaskItem *task;
634  TFInferRequest *infer_request = NULL;
635  TFContext *ctx = &tf_model->ctx;
636  int ret = 0;
637 
638  lltask = ff_queue_pop_front(tf_model->lltask_queue);
639  av_assert0(lltask);
640  task = lltask->task;
641  request->lltask = lltask;
642 
643  ret = get_input_tf(tf_model, &input, task->input_name);
644  if (ret != 0) {
645  goto err;
646  }
647 
648  infer_request = request->infer_request;
649  input.dims[1] = task->in_frame->height;
650  input.dims[2] = task->in_frame->width;
651 
652  infer_request->tf_input = av_malloc(sizeof(TF_Output));
653  if (!infer_request->tf_input) {
654  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
655  ret = AVERROR(ENOMEM);
656  goto err;
657  }
658 
659  infer_request->tf_input->oper = TF_GraphOperationByName(tf_model->graph, task->input_name);
660  if (!infer_request->tf_input->oper){
661  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", task->input_name);
663  goto err;
664  }
665  infer_request->tf_input->index = 0;
666 
667  infer_request->input_tensor = allocate_input_tensor(&input);
668  if (!infer_request->input_tensor){
669  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
670  ret = AVERROR(ENOMEM);
671  goto err;
672  }
673  input.data = (float *)TF_TensorData(infer_request->input_tensor);
674 
675  switch (tf_model->model->func_type) {
676  case DFT_PROCESS_FRAME:
677  if (task->do_ioproc) {
678  if (tf_model->model->frame_pre_proc != NULL) {
679  tf_model->model->frame_pre_proc(task->in_frame, &input, tf_model->model->filter_ctx);
680  } else {
682  }
683  }
684  break;
687  break;
688  default:
689  avpriv_report_missing_feature(ctx, "model function type %d", tf_model->model->func_type);
690  break;
691  }
692 
693  infer_request->tf_outputs = av_malloc_array(task->nb_output, sizeof(TF_Output));
694  if (infer_request->tf_outputs == NULL) {
695  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *tf_outputs\n");
696  ret = AVERROR(ENOMEM);
697  goto err;
698  }
699 
700  infer_request->output_tensors = av_calloc(task->nb_output, sizeof(*infer_request->output_tensors));
701  if (!infer_request->output_tensors) {
702  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output tensor\n");
703  ret = AVERROR(ENOMEM);
704  goto err;
705  }
706 
707  for (int i = 0; i < task->nb_output; ++i) {
708  infer_request->output_tensors[i] = NULL;
709  infer_request->tf_outputs[i].oper = TF_GraphOperationByName(tf_model->graph, task->output_names[i]);
710  if (!infer_request->tf_outputs[i].oper) {
711  av_log(ctx, AV_LOG_ERROR, "Could not find output \"%s\" in model\n", task->output_names[i]);
713  goto err;
714  }
715  infer_request->tf_outputs[i].index = 0;
716  }
717 
718  return 0;
719 err:
720  tf_free_request(infer_request);
721  return ret;
722 }
723 
724 static void infer_completion_callback(void *args) {
725  TFRequestItem *request = args;
726  LastLevelTaskItem *lltask = request->lltask;
727  TaskItem *task = lltask->task;
728  DNNData *outputs;
729  TFInferRequest *infer_request = request->infer_request;
730  TFModel *tf_model = task->model;
731  TFContext *ctx = &tf_model->ctx;
732 
733  outputs = av_calloc(task->nb_output, sizeof(*outputs));
734  if (!outputs) {
735  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *outputs\n");
736  goto err;
737  }
738 
739  for (uint32_t i = 0; i < task->nb_output; ++i) {
741  TF_Dim(infer_request->output_tensors[i], 1);
743  TF_Dim(infer_request->output_tensors[i], 2);
745  TF_Dim(infer_request->output_tensors[i], 3);
746  outputs[i].data = TF_TensorData(infer_request->output_tensors[i]);
747  outputs[i].dt = (DNNDataType)TF_TensorType(infer_request->output_tensors[i]);
748  }
749  switch (tf_model->model->func_type) {
750  case DFT_PROCESS_FRAME:
751  //it only support 1 output if it's frame in & frame out
752  if (task->do_ioproc) {
753  if (tf_model->model->frame_post_proc != NULL) {
754  tf_model->model->frame_post_proc(task->out_frame, outputs, tf_model->model->filter_ctx);
755  } else {
757  }
758  } else {
759  task->out_frame->width =
761  task->out_frame->height =
763  }
764  break;
766  if (!tf_model->model->detect_post_proc) {
767  av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post proc\n");
768  return;
769  }
770  tf_model->model->detect_post_proc(task->in_frame, outputs, task->nb_output, tf_model->model->filter_ctx);
771  break;
772  default:
773  av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this kind of dnn filter now\n");
774  goto err;
775  }
776  task->inference_done++;
777 err:
778  tf_free_request(infer_request);
779  av_freep(&outputs);
780 
781  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
782  destroy_request_item(&request);
783  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
784  }
785 }
786 
787 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
788 {
789  TFModel *tf_model;
790  TFContext *ctx;
791  LastLevelTaskItem *lltask;
792  TaskItem *task;
793  int ret = 0;
794 
795  if (ff_queue_size(lltask_queue) == 0) {
796  destroy_request_item(&request);
797  return 0;
798  }
799 
800  lltask = ff_queue_peek_front(lltask_queue);
801  task = lltask->task;
802  tf_model = task->model;
803  ctx = &tf_model->ctx;
804 
805  ret = fill_model_input_tf(tf_model, request);
806  if (ret != 0) {
807  goto err;
808  }
809 
810  if (task->async) {
811  if (ff_dnn_start_inference_async(ctx, &request->exec_module) != 0) {
812  goto err;
813  }
814  return 0;
815  }
816  else {
817  ret = tf_start_inference(request);
818  if (ret != 0) {
819  goto err;
820  }
821  infer_completion_callback(request);
822  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
823  }
824 err:
825  tf_free_request(request->infer_request);
826  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
827  destroy_request_item(&request);
828  }
829  dnn_free_model_tf(&tf_model->model);
830  return ret;
831 }
832 
833 static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
834 {
835  TFModel *tf_model = model->model;
836  TFContext *ctx = &tf_model->ctx;
837  TaskItem *task;
838  TFRequestItem *request;
839  int ret = 0;
840 
841  ret = ff_check_exec_params(ctx, DNN_TF, model->func_type, exec_params);
842  if (ret != 0) {
843  return ret;
844  }
845 
846  task = av_malloc(sizeof(*task));
847  if (!task) {
848  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
849  return AVERROR(ENOMEM);
850  }
851 
852  ret = ff_dnn_fill_task(task, exec_params, tf_model, ctx->options.async, 1);
853  if (ret != 0) {
854  av_log(ctx, AV_LOG_ERROR, "Fill task with invalid parameter(s).\n");
855  av_freep(&task);
856  return ret;
857  }
858 
859  if (ff_queue_push_back(tf_model->task_queue, task) < 0) {
860  av_freep(&task);
861  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
862  return AVERROR(ENOMEM);
863  }
864 
865  ret = extract_lltask_from_task(task, tf_model->lltask_queue);
866  if (ret != 0) {
867  av_freep(&task);
868  av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
869  return ret;
870  }
871 
872  request = ff_safe_queue_pop_front(tf_model->request_queue);
873  if (!request) {
874  av_freep(&task);
875  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
876  return AVERROR(EINVAL);
877  }
878  return execute_model_tf(request, tf_model->lltask_queue);
879 }
880 
882 {
883  TFModel *tf_model = model->model;
884  return ff_dnn_get_result_common(tf_model->task_queue, in, out);
885 }
886 
887 static int dnn_flush_tf(const DNNModel *model)
888 {
889  TFModel *tf_model = model->model;
890  TFContext *ctx = &tf_model->ctx;
891  TFRequestItem *request;
892  int ret;
893 
894  if (ff_queue_size(tf_model->lltask_queue) == 0) {
895  // no pending task need to flush
896  return 0;
897  }
898 
899  request = ff_safe_queue_pop_front(tf_model->request_queue);
900  if (!request) {
901  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
902  return AVERROR(EINVAL);
903  }
904 
905  ret = fill_model_input_tf(tf_model, request);
906  if (ret != 0) {
907  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
908  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
909  destroy_request_item(&request);
910  }
911  return ret;
912  }
913 
914  return ff_dnn_start_inference_async(ctx, &request->exec_module);
915 }
916 
919  .execute_model = dnn_execute_model_tf,
920  .get_result = dnn_get_result_tf,
921  .flush = dnn_flush_tf,
922  .free_model = dnn_free_model_tf,
923 };
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dnn_tensorflow)
TFOptions::sess_config
char * sess_config
Definition: dnn_backend_tf.c:40
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
TFInferRequest
Stores execution parameters for single call to the TensorFlow C API.
Definition: dnn_backend_tf.c:65
TFInferRequest::tf_outputs
TF_Output * tf_outputs
Definition: dnn_backend_tf.c:66
execute_model_tf
static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
Definition: dnn_backend_tf.c:787
FLAGS
#define FLAGS
Definition: dnn_backend_tf.c:80
av_opt_set_defaults
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1640
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
TFModel::graph
TF_Graph * graph
Definition: dnn_backend_tf.c:53
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:55
TFModel::ctx
TFContext ctx
Definition: dnn_backend_tf.c:51
DNNAsyncExecModule
Common Async Execution Mechanism for the DNN Backends.
Definition: dnn_backend_common.h:58
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
extract_lltask_from_task
static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
Definition: dnn_backend_tf.c:198
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:30
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
LastLevelTaskItem
Definition: dnn_backend_common.h:50
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AVFrame::width
int width
Definition: frame.h:446
dnn_load_model_tf
static DNNModel * dnn_load_model_tf(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_backend_tf.c:536
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
av_opt_set_from_string
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1858
AVOption
AVOption.
Definition: opt.h:346
DNNModule::load_model
DNNModel *(* load_model)(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_interface.h:123
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:110
TFInferRequest::input_tensor
TF_Tensor * input_tensor
Definition: dnn_backend_tf.c:69
data
const char data[16]
Definition: mxf.c:148
avio_open
int avio_open(AVIOContext **s, const char *filename, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:497
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:77
load_tf_model
static int load_tf_model(TFModel *tf_model, const char *model_filename)
Definition: dnn_backend_tf.c:400
dnn_io_proc.h
TFModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_tf.c:56
TaskItem
Definition: dnn_backend_common.h:36
DNNAsyncExecModule::callback
void(* callback)(void *args)
Completion Callback for the backend.
Definition: dnn_backend_common.h:70
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OFFSET
#define OFFSET(x)
Definition: dnn_backend_tf.c:79
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
destroy_request_item
static void destroy_request_item(TFRequestItem **arg)
Free the TFRequestItem completely.
Definition: dnn_backend_tf.c:184
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:99
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
dnn_get_width_idx_by_layout
static int dnn_get_width_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:137
TaskItem::model
void * model
Definition: dnn_backend_common.h:37
filter_ctx
static FilteringContext * filter_ctx
Definition: transcode.c:52
get_input_tf
static int get_input_tf(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_tf.c:278
DL_NHWC
@ DL_NHWC
Definition: dnn_interface.h:62
SPACE_CHARS
#define SPACE_CHARS
Definition: dnn_backend_tf.c:370
Queue
Linear double-ended data structure.
Definition: queue.c:33
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_BACKEND_COMMON_OPTIONS
#define DNN_BACKEND_COMMON_OPTIONS
Definition: dnn_backend_common.h:31
DNN_TF
@ DNN_TF
Definition: dnn_interface.h:35
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
fill_model_input_tf
static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request)
Definition: dnn_backend_tf.c:630
TFRequestItem::exec_module
DNNAsyncExecModule exec_module
Definition: dnn_backend_tf.c:76
float
float
Definition: af_crystalizer.c:121
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:51
read_graph
static TF_Buffer * read_graph(const char *model_filename)
Definition: dnn_backend_tf.c:218
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
DNNData
Definition: dnn_interface.h:65
dnn_tensorflow_options
static const AVOption dnn_tensorflow_options[]
Definition: dnn_backend_tf.c:81
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:156
ctx
AVFormatContext * ctx
Definition: movenc.c:49
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:45
arg
const char * arg
Definition: jacosubdec.c:67
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:182
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:423
NULL
#define NULL
Definition: coverity.c:32
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:113
tf_create_inference_request
static TFInferRequest * tf_create_inference_request(void)
Create a TensorFlow inference request.
Definition: dnn_backend_tf.c:133
ff_dnn_async_module_cleanup
int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
Join the Async Execution thread and set module pointers to NULL.
Definition: dnn_backend_common.c:86
TFModel::task_queue
Queue * task_queue
Definition: dnn_backend_tf.c:58
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_tf.c:724
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:38
TFModel::status
TF_Status * status
Definition: dnn_backend_tf.c:55
tf_free_request
static void tf_free_request(TFInferRequest *request)
Free the contents of TensorFlow inference request.
Definition: dnn_backend_tf.c:105
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:209
options
const OptionDef options[]
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:42
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:46
cpu.h
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:115
size
int size
Definition: twinvq_data.h:10344
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:101
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
dnn_flush_tf
static int dnn_flush_tf(const DNNModel *model)
Definition: dnn_backend_tf.c:887
TFOptions::nireq
uint32_t nireq
Definition: dnn_backend_tf.c:42
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:37
hex_to_data
static int hex_to_data(uint8_t *data, const char *p)
Definition: dnn_backend_tf.c:371
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
tf_start_inference
static int tf_start_inference(void *args)
Start synchronous inference for the TensorFlow model.
Definition: dnn_backend_tf.c:154
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:50
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
ff_dnn_backend_tf
const DNNModule ff_dnn_backend_tf
Definition: dnn_backend_tf.c:917
dnn_execute_model_tf
static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_tf.c:833
TFContext::options
TFOptions options
Definition: dnn_backend_tf.c:47
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
TFRequestItem::status
TF_Status * status
Definition: dnn_backend_tf.c:75
TFInferRequest::output_tensors
TF_Tensor ** output_tensors
Definition: dnn_backend_tf.c:67
TFModel::session
TF_Session * session
Definition: dnn_backend_tf.c:54
TFRequestItem::infer_request
TFInferRequest * infer_request
Definition: dnn_backend_tf.c:73
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
DNNAsyncExecModule::start_inference
int(* start_inference)(void *request)
Synchronous inference function for the backend with corresponding request item as the argument.
Definition: dnn_backend_common.h:63
DNNAsyncExecModule::args
void * args
Argument for the execution functions.
Definition: dnn_backend_common.h:76
av_toupper
static av_const int av_toupper(int c)
Locale-independent conversion of ASCII characters to uppercase.
Definition: avstring.h:227
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
TFOptions::async
uint8_t async
Definition: dnn_backend_tf.c:41
safe_queue.h
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:41
len
int len
Definition: vorbis_enc_data.h:426
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:311
TFInferRequest::tf_input
TF_Output * tf_input
Definition: dnn_backend_tf.c:68
TFContext
Definition: dnn_backend_tf.c:45
ret
ret
Definition: filter_design.txt:187
DNNModel::get_input
int(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:104
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TFModel::model
DNNModel * model
Definition: dnn_backend_tf.c:52
TFModel
Definition: dnn_backend_tf.c:50
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
dnn_get_result_tf
static DNNAsyncStatusType dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out)
Definition: dnn_backend_tf.c:881
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:39
AVFrame::height
int height
Definition: frame.h:446
status
ov_status_e status
Definition: dnn_backend_openvino.c:121
allocate_input_tensor
static TF_Tensor * allocate_input_tensor(const DNNData *input)
Definition: dnn_backend_tf.c:251
dnn_backend_common.h
TFRequestItem::lltask
LastLevelTaskItem * lltask
Definition: dnn_backend_tf.c:74
defs.h
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:612
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:136
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DCO_RGB
@ DCO_RGB
Definition: dnn_interface.h:42
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
ff_dnn_start_inference_async
int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
Start asynchronous inference routine for the TensorFlow model on a detached thread.
Definition: dnn_backend_common.c:105
DNNModel
Definition: dnn_interface.h:93
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
mem.h
dnn_get_height_idx_by_layout
static int dnn_get_height_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:142
dnn_free_model_tf
static void dnn_free_model_tf(DNNModel **model)
Definition: dnn_backend_tf.c:495
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:40
DNNModel::options
const char * options
Definition: dnn_interface.h:97
dnn_get_channel_idx_by_layout
static int dnn_get_channel_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:147
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: avio.c:649
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
TFOptions
Definition: dnn_backend_tf.c:39
free_buffer
static void free_buffer(void *data, size_t length)
Definition: dnn_backend_tf.c:93
get_output_tf
static int get_output_tf(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_tf.c:326
DNNExecBaseParams
Definition: dnn_interface.h:76
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:43
DNNModel::get_output
int(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:106
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
TFRequestItem
Definition: dnn_backend_tf.c:72
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
TFModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_tf.c:57
TaskItem::nb_output
uint32_t nb_output
Definition: dnn_backend_common.h:44
DNNModule
Definition: dnn_interface.h:121
DNNModel::model
void * model
Definition: dnn_interface.h:95
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:42