FFmpeg
dnn_backend_tf.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Sergey Lavrushkin
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN tensorflow backend implementation.
24  */
25 
26 #include "libavformat/avio.h"
27 #include "libavutil/avassert.h"
28 #include "libavutil/avstring.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/opt.h"
32 #include "libavcodec/defs.h"
33 #include "../internal.h"
34 #include "dnn_io_proc.h"
35 #include "dnn_backend_common.h"
36 #include "safe_queue.h"
37 #include <tensorflow/c/c_api.h>
38 
39 typedef struct TFModel {
42  TF_Graph *graph;
43  TF_Session *session;
44  TF_Status *status;
48 } TFModel;
49 
50 /**
51  * Stores execution parameters for single
52  * call to the TensorFlow C API
53  */
54 typedef struct TFInferRequest {
55  TF_Output *tf_outputs;
56  TF_Tensor **output_tensors;
57  TF_Output *tf_input;
58  TF_Tensor *input_tensor;
60 
61 typedef struct TFRequestItem {
64  TF_Status *status;
67 
68 #define OFFSET(x) offsetof(TFOptions, x)
69 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
70 static const AVOption dnn_tensorflow_options[] = {
71  { "sess_config", "config for SessionOptions", OFFSET(sess_config), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
72  { NULL }
73 };
74 
75 
76 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue);
77 static void infer_completion_callback(void *args);
78 static inline void destroy_request_item(TFRequestItem **arg);
79 
80 static void free_buffer(void *data, size_t length)
81 {
82  av_freep(&data);
83 }
84 
85 /**
86  * Free the contents of TensorFlow inference request.
87  * It does not free the TFInferRequest instance.
88  *
89  * @param request pointer to TFInferRequest instance.
90  * NULL pointer is allowed.
91  */
92 static void tf_free_request(TFInferRequest *request)
93 {
94  if (!request)
95  return;
96  if (request->input_tensor) {
97  TF_DeleteTensor(request->input_tensor);
98  request->input_tensor = NULL;
99  }
100  av_freep(&request->tf_input);
101  av_freep(&request->tf_outputs);
102  if (request->output_tensors) {
103  int nb_output = sizeof(*request->output_tensors)/sizeof(request->output_tensors[0]);
104  for (uint32_t i = 0; i < nb_output; ++i) {
105  if (request->output_tensors[i]) {
106  TF_DeleteTensor(request->output_tensors[i]);
107  request->output_tensors[i] = NULL;
108  }
109  }
110  av_freep(&request->output_tensors);
111  }
112 }
113 
114 /**
115  * Create a TensorFlow inference request. All properties
116  * are initially unallocated and set as NULL.
117  *
118  * @return pointer to the allocated TFInferRequest instance.
119  */
121 {
122  TFInferRequest *infer_request = av_malloc(sizeof(TFInferRequest));
123  if (!infer_request) {
124  return NULL;
125  }
126  infer_request->tf_outputs = NULL;
127  infer_request->tf_input = NULL;
128  infer_request->input_tensor = NULL;
129  infer_request->output_tensors = NULL;
130  return infer_request;
131 }
132 
133 /**
134  * Start synchronous inference for the TensorFlow model.
135  *
136  * @param request pointer to the TFRequestItem for inference
137  * @retval 0 if execution is successful
138  * @retval AVERROR(EINVAL) if request is NULL
139  * @retval DNN_GENERIC_ERROR if execution fails
140  */
141 static int tf_start_inference(void *args)
142 {
143  TFRequestItem *request = args;
144  TFInferRequest *infer_request = request->infer_request;
145  LastLevelTaskItem *lltask = request->lltask;
146  TaskItem *task = lltask->task;
147  TFModel *tf_model = task->model;
148 
149  if (!request) {
150  av_log(tf_model->ctx, AV_LOG_ERROR, "TFRequestItem is NULL\n");
151  return AVERROR(EINVAL);
152  }
153 
154  TF_SessionRun(tf_model->session, NULL,
155  infer_request->tf_input, &infer_request->input_tensor, 1,
156  infer_request->tf_outputs, infer_request->output_tensors,
157  task->nb_output, NULL, 0, NULL,
158  request->status);
159  if (TF_GetCode(request->status) != TF_OK) {
160  av_log(tf_model->ctx, AV_LOG_ERROR, "%s", TF_Message(request->status));
161  return DNN_GENERIC_ERROR;
162  }
163  return 0;
164 }
165 
166 /**
167  * Free the TFRequestItem completely.
168  *
169  * @param arg Address of the TFInferRequest instance.
170  */
171 static inline void destroy_request_item(TFRequestItem **arg) {
172  TFRequestItem *request;
173  if (!arg) {
174  return;
175  }
176  request = *arg;
177  tf_free_request(request->infer_request);
178  av_freep(&request->infer_request);
179  av_freep(&request->lltask);
180  TF_DeleteStatus(request->status);
182  av_freep(arg);
183 }
184 
185 static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
186 {
187  TFModel *tf_model = task->model;
188  DnnContext *ctx = tf_model->ctx;
189  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
190  if (!lltask) {
191  av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for LastLevelTaskItem\n");
192  return AVERROR(ENOMEM);
193  }
194  task->inference_todo = 1;
195  task->inference_done = 0;
196  lltask->task = task;
197  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
198  av_log(ctx, AV_LOG_ERROR, "Failed to push back lltask_queue.\n");
199  av_freep(&lltask);
200  return AVERROR(ENOMEM);
201  }
202  return 0;
203 }
204 
205 static TF_Buffer *read_graph(const char *model_filename)
206 {
207  TF_Buffer *graph_buf;
208  unsigned char *graph_data = NULL;
209  AVIOContext *model_file_context;
210  long size, bytes_read;
211 
212  if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
213  return NULL;
214  }
215 
216  size = avio_size(model_file_context);
217 
218  graph_data = av_malloc(size);
219  if (!graph_data){
220  avio_closep(&model_file_context);
221  return NULL;
222  }
223  bytes_read = avio_read(model_file_context, graph_data, size);
224  avio_closep(&model_file_context);
225  if (bytes_read != size){
226  av_freep(&graph_data);
227  return NULL;
228  }
229 
230  graph_buf = TF_NewBuffer();
231  graph_buf->data = graph_data;
232  graph_buf->length = size;
233  graph_buf->data_deallocator = free_buffer;
234 
235  return graph_buf;
236 }
237 
238 static TF_Tensor *allocate_input_tensor(const DNNData *input)
239 {
240  TF_DataType dt;
241  size_t size;
242  int64_t input_dims[4] = { 0 };
243 
244  input_dims[0] = 1;
245  input_dims[1] = input->dims[dnn_get_height_idx_by_layout(input->layout)];
246  input_dims[2] = input->dims[dnn_get_width_idx_by_layout(input->layout)];
247  input_dims[3] = input->dims[dnn_get_channel_idx_by_layout(input->layout)];
248  switch (input->dt) {
249  case DNN_FLOAT:
250  dt = TF_FLOAT;
251  size = sizeof(float);
252  break;
253  case DNN_UINT8:
254  dt = TF_UINT8;
255  size = 1;
256  break;
257  default:
258  av_assert0(!"should not reach here");
259  }
260 
261  return TF_AllocateTensor(dt, input_dims, 4,
262  input_dims[1] * input_dims[2] * input_dims[3] * size);
263 }
264 
265 static int get_input_tf(DNNModel *model, DNNData *input, const char *input_name)
266 {
267  TFModel *tf_model = (TFModel *)model;
268  DnnContext *ctx = tf_model->ctx;
269  TF_Status *status;
270  TF_DataType dt;
271  int64_t dims[4];
272 
273  TF_Output tf_output;
274  tf_output.oper = TF_GraphOperationByName(tf_model->graph, input_name);
275  if (!tf_output.oper) {
276  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
277  return AVERROR(EINVAL);
278  }
279 
280  tf_output.index = 0;
281  dt = TF_OperationOutputType(tf_output);
282  switch (dt) {
283  case TF_FLOAT:
284  input->dt = DNN_FLOAT;
285  break;
286  case TF_UINT8:
287  input->dt = DNN_UINT8;
288  break;
289  default:
290  av_log(ctx, AV_LOG_ERROR, "Unsupported output type %d in model\n", dt);
291  return AVERROR(EINVAL);
292  }
293  input->order = DCO_RGB;
294 
295  status = TF_NewStatus();
296  TF_GraphGetTensorShape(tf_model->graph, tf_output, dims, 4, status);
297  if (TF_GetCode(status) != TF_OK){
298  TF_DeleteStatus(status);
299  av_log(ctx, AV_LOG_ERROR, "Failed to get input tensor shape: number of dimension incorrect\n");
300  return DNN_GENERIC_ERROR;
301  }
302  TF_DeleteStatus(status);
303 
304  // currently only NHWC is supported
305  av_assert0(dims[0] == 1 || dims[0] == -1);
306  for (int i = 0; i < 4; i++)
307  input->dims[i] = dims[i];
308  input->layout = DL_NHWC;
309 
310  return 0;
311 }
312 
313 static int get_output_tf(DNNModel *model, const char *input_name, int input_width, int input_height,
314  const char *output_name, int *output_width, int *output_height)
315 {
316  int ret;
317  TFModel *tf_model = (TFModel *)model;
318  DnnContext *ctx = tf_model->ctx;
319  TaskItem task;
320  TFRequestItem *request;
321  DNNExecBaseParams exec_params = {
322  .input_name = input_name,
323  .output_names = &output_name,
324  .nb_output = 1,
325  .in_frame = NULL,
326  .out_frame = NULL,
327  };
328 
329  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, tf_model, input_height, input_width, ctx);
330  if (ret != 0) {
331  goto err;
332  }
333 
334  ret = extract_lltask_from_task(&task, tf_model->lltask_queue);
335  if (ret != 0) {
336  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
337  goto err;
338  }
339 
340  request = ff_safe_queue_pop_front(tf_model->request_queue);
341  if (!request) {
342  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
343  ret = AVERROR(EINVAL);
344  goto err;
345  }
346 
347  ret = execute_model_tf(request, tf_model->lltask_queue);
348  *output_width = task.out_frame->width;
349  *output_height = task.out_frame->height;
350 
351 err:
352  av_frame_free(&task.out_frame);
353  av_frame_free(&task.in_frame);
354  return ret;
355 }
356 
357 #define SPACE_CHARS " \t\r\n"
358 static int hex_to_data(uint8_t *data, const char *p)
359 {
360  int c, len, v;
361 
362  len = 0;
363  v = 1;
364  for (;;) {
365  p += strspn(p, SPACE_CHARS);
366  if (*p == '\0')
367  break;
368  c = av_toupper((unsigned char) *p++);
369  if (c >= '0' && c <= '9')
370  c = c - '0';
371  else if (c >= 'A' && c <= 'F')
372  c = c - 'A' + 10;
373  else
374  break;
375  v = (v << 4) | c;
376  if (v & 0x100) {
377  if (data) {
378  data[len] = v;
379  }
380  len++;
381  v = 1;
382  }
383  }
384  return len;
385 }
386 
387 static int load_tf_model(TFModel *tf_model, const char *model_filename)
388 {
389  DnnContext *ctx = tf_model->ctx;
390  TF_Buffer *graph_def;
391  TF_ImportGraphDefOptions *graph_opts;
392  TF_SessionOptions *sess_opts;
393  const TF_Operation *init_op;
394  uint8_t *sess_config = NULL;
395  int sess_config_length = 0;
396 
397  // prepare the sess config data
398  if (ctx->tf_option.sess_config != NULL) {
399  const char *config;
400  /*
401  tf_model->ctx.options.sess_config is hex to present the serialized proto
402  required by TF_SetConfig below, so we need to first generate the serialized
403  proto in a python script, tools/python/tf_sess_config.py is a script example
404  to generate the configs of sess_config.
405  */
406  if (strncmp(ctx->tf_option.sess_config, "0x", 2) != 0) {
407  av_log(ctx, AV_LOG_ERROR, "sess_config should start with '0x'\n");
408  return AVERROR(EINVAL);
409  }
410  config = ctx->tf_option.sess_config + 2;
411  sess_config_length = hex_to_data(NULL, config);
412 
413  sess_config = av_mallocz(sess_config_length + AV_INPUT_BUFFER_PADDING_SIZE);
414  if (!sess_config) {
415  av_log(ctx, AV_LOG_ERROR, "failed to allocate memory\n");
416  return AVERROR(ENOMEM);
417  }
418  if (hex_to_data(sess_config, config) < 0) {
419  av_log(ctx, AV_LOG_ERROR, "failed to convert hex to data\n");
420  return AVERROR(EINVAL);
421  }
422  }
423 
424  graph_def = read_graph(model_filename);
425  if (!graph_def){
426  av_log(ctx, AV_LOG_ERROR, "Failed to read model \"%s\" graph\n", model_filename);
427  av_freep(&sess_config);
428  return AVERROR(EINVAL);
429  }
430  tf_model->graph = TF_NewGraph();
431  tf_model->status = TF_NewStatus();
432  graph_opts = TF_NewImportGraphDefOptions();
433  TF_GraphImportGraphDef(tf_model->graph, graph_def, graph_opts, tf_model->status);
434  TF_DeleteImportGraphDefOptions(graph_opts);
435  TF_DeleteBuffer(graph_def);
436  if (TF_GetCode(tf_model->status) != TF_OK){
437  av_log(ctx, AV_LOG_ERROR, "Failed to import serialized graph to model graph\n");
438  av_freep(&sess_config);
439  return DNN_GENERIC_ERROR;
440  }
441 
442  init_op = TF_GraphOperationByName(tf_model->graph, "init");
443  sess_opts = TF_NewSessionOptions();
444 
445  if (sess_config) {
446  TF_SetConfig(sess_opts, sess_config, sess_config_length,tf_model->status);
447  av_freep(&sess_config);
448  if (TF_GetCode(tf_model->status) != TF_OK) {
449  TF_DeleteSessionOptions(sess_opts);
450  av_log(ctx, AV_LOG_ERROR, "Failed to set config for sess options with %s\n",
451  ctx->tf_option.sess_config);
452  return DNN_GENERIC_ERROR;
453  }
454  }
455 
456  tf_model->session = TF_NewSession(tf_model->graph, sess_opts, tf_model->status);
457  TF_DeleteSessionOptions(sess_opts);
458  if (TF_GetCode(tf_model->status) != TF_OK)
459  {
460  av_freep(&sess_config);
461  av_log(ctx, AV_LOG_ERROR, "Failed to create new session with model graph\n");
462  return DNN_GENERIC_ERROR;
463  }
464 
465  // Run initialization operation with name "init" if it is present in graph
466  if (init_op){
467  TF_SessionRun(tf_model->session, NULL,
468  NULL, NULL, 0,
469  NULL, NULL, 0,
470  &init_op, 1, NULL, tf_model->status);
471  if (TF_GetCode(tf_model->status) != TF_OK)
472  {
473  av_freep(&sess_config);
474  av_log(ctx, AV_LOG_ERROR, "Failed to run session when initializing\n");
475  return DNN_GENERIC_ERROR;
476  }
477  }
478 
479  return 0;
480 }
481 
482 static void dnn_free_model_tf(DNNModel **model)
483 {
484  TFModel *tf_model;
485 
486  if (!model || !*model)
487  return;
488 
489  tf_model = (TFModel *)(*model);
490  while (ff_safe_queue_size(tf_model->request_queue) != 0) {
492  destroy_request_item(&item);
493  }
495 
496  while (ff_queue_size(tf_model->lltask_queue) != 0) {
498  av_freep(&item);
499  }
500  ff_queue_destroy(tf_model->lltask_queue);
501 
502  while (ff_queue_size(tf_model->task_queue) != 0) {
503  TaskItem *item = ff_queue_pop_front(tf_model->task_queue);
504  av_frame_free(&item->in_frame);
505  av_frame_free(&item->out_frame);
506  av_freep(&item);
507  }
508  ff_queue_destroy(tf_model->task_queue);
509 
510  if (tf_model->graph){
511  TF_DeleteGraph(tf_model->graph);
512  }
513  if (tf_model->session){
514  TF_CloseSession(tf_model->session, tf_model->status);
515  TF_DeleteSession(tf_model->session, tf_model->status);
516  }
517  if (tf_model->status){
518  TF_DeleteStatus(tf_model->status);
519  }
520  av_freep(&tf_model);
521  *model = NULL;
522 }
523 
525 {
526  DNNModel *model = NULL;
527  TFModel *tf_model = NULL;
528 
529  tf_model = av_mallocz(sizeof(TFModel));
530  if (!tf_model)
531  return NULL;
532  model = &tf_model->model;
533  tf_model->ctx = ctx;
534 
535  if (load_tf_model(tf_model, ctx->model_filename) != 0){
536  av_log(ctx, AV_LOG_ERROR, "Failed to load TensorFlow model: \"%s\"\n", ctx->model_filename);
537  goto err;
538  }
539 
540  if (ctx->nireq <= 0) {
541  ctx->nireq = av_cpu_count() / 2 + 1;
542  }
543 
544 #if !HAVE_PTHREAD_CANCEL
545  if (ctx->options.async) {
546  ctx->options.async = 0;
547  av_log(filter_ctx, AV_LOG_WARNING, "pthread is not supported, roll back to sync.\n");
548  }
549 #endif
550 
551  tf_model->request_queue = ff_safe_queue_create();
552  if (!tf_model->request_queue) {
553  goto err;
554  }
555 
556  for (int i = 0; i < ctx->nireq; i++) {
557  TFRequestItem *item = av_mallocz(sizeof(*item));
558  if (!item) {
559  goto err;
560  }
561  item->lltask = NULL;
563  if (!item->infer_request) {
564  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for TensorFlow inference request\n");
565  av_freep(&item);
566  goto err;
567  }
568  item->status = TF_NewStatus();
571  item->exec_module.args = item;
572 
573  if (ff_safe_queue_push_back(tf_model->request_queue, item) < 0) {
574  destroy_request_item(&item);
575  goto err;
576  }
577  }
578 
579  tf_model->lltask_queue = ff_queue_create();
580  if (!tf_model->lltask_queue) {
581  goto err;
582  }
583 
584  tf_model->task_queue = ff_queue_create();
585  if (!tf_model->task_queue) {
586  goto err;
587  }
588 
589  model->get_input = &get_input_tf;
590  model->get_output = &get_output_tf;
591  model->filter_ctx = filter_ctx;
592  model->func_type = func_type;
593 
594  return model;
595 err:
596  dnn_free_model_tf(&model);
597  return NULL;
598 }
599 
600 static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
601  DNNData input = { 0 };
602  LastLevelTaskItem *lltask;
603  TaskItem *task;
604  TFInferRequest *infer_request = NULL;
605  DnnContext *ctx = tf_model->ctx;
606  int ret = 0;
607 
608  lltask = ff_queue_pop_front(tf_model->lltask_queue);
609  av_assert0(lltask);
610  task = lltask->task;
611  request->lltask = lltask;
612 
613  ret = get_input_tf(&tf_model->model, &input, task->input_name);
614  if (ret != 0) {
615  goto err;
616  }
617 
618  infer_request = request->infer_request;
619  input.dims[1] = task->in_frame->height;
620  input.dims[2] = task->in_frame->width;
621 
622  infer_request->tf_input = av_malloc(sizeof(TF_Output));
623  if (!infer_request->tf_input) {
624  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
625  ret = AVERROR(ENOMEM);
626  goto err;
627  }
628 
629  infer_request->tf_input->oper = TF_GraphOperationByName(tf_model->graph, task->input_name);
630  if (!infer_request->tf_input->oper){
631  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", task->input_name);
633  goto err;
634  }
635  infer_request->tf_input->index = 0;
636 
637  infer_request->input_tensor = allocate_input_tensor(&input);
638  if (!infer_request->input_tensor){
639  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
640  ret = AVERROR(ENOMEM);
641  goto err;
642  }
643  input.data = (float *)TF_TensorData(infer_request->input_tensor);
644 
645  switch (tf_model->model.func_type) {
646  case DFT_PROCESS_FRAME:
647  if (task->do_ioproc) {
648  if (tf_model->model.frame_pre_proc != NULL) {
649  tf_model->model.frame_pre_proc(task->in_frame, &input, tf_model->model.filter_ctx);
650  } else {
652  }
653  }
654  break;
657  break;
658  default:
659  avpriv_report_missing_feature(ctx, "model function type %d", tf_model->model.func_type);
660  break;
661  }
662 
663  infer_request->tf_outputs = av_malloc_array(task->nb_output, sizeof(TF_Output));
664  if (infer_request->tf_outputs == NULL) {
665  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *tf_outputs\n");
666  ret = AVERROR(ENOMEM);
667  goto err;
668  }
669 
670  infer_request->output_tensors = av_calloc(task->nb_output, sizeof(*infer_request->output_tensors));
671  if (!infer_request->output_tensors) {
672  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output tensor\n");
673  ret = AVERROR(ENOMEM);
674  goto err;
675  }
676 
677  for (int i = 0; i < task->nb_output; ++i) {
678  infer_request->output_tensors[i] = NULL;
679  infer_request->tf_outputs[i].oper = TF_GraphOperationByName(tf_model->graph, task->output_names[i]);
680  if (!infer_request->tf_outputs[i].oper) {
681  av_log(ctx, AV_LOG_ERROR, "Could not find output \"%s\" in model\n", task->output_names[i]);
683  goto err;
684  }
685  infer_request->tf_outputs[i].index = 0;
686  }
687 
688  return 0;
689 err:
690  tf_free_request(infer_request);
691  return ret;
692 }
693 
694 static void infer_completion_callback(void *args) {
695  TFRequestItem *request = args;
696  LastLevelTaskItem *lltask = request->lltask;
697  TaskItem *task = lltask->task;
698  DNNData *outputs;
699  TFInferRequest *infer_request = request->infer_request;
700  TFModel *tf_model = task->model;
701  DnnContext *ctx = tf_model->ctx;
702 
703  outputs = av_calloc(task->nb_output, sizeof(*outputs));
704  if (!outputs) {
705  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *outputs\n");
706  goto err;
707  }
708 
709  for (uint32_t i = 0; i < task->nb_output; ++i) {
711  TF_Dim(infer_request->output_tensors[i], 1);
713  TF_Dim(infer_request->output_tensors[i], 2);
715  TF_Dim(infer_request->output_tensors[i], 3);
716  outputs[i].data = TF_TensorData(infer_request->output_tensors[i]);
717  outputs[i].dt = (DNNDataType)TF_TensorType(infer_request->output_tensors[i]);
718  }
719  switch (tf_model->model.func_type) {
720  case DFT_PROCESS_FRAME:
721  //it only support 1 output if it's frame in & frame out
722  if (task->do_ioproc) {
723  if (tf_model->model.frame_post_proc != NULL) {
724  tf_model->model.frame_post_proc(task->out_frame, outputs, tf_model->model.filter_ctx);
725  } else {
727  }
728  } else {
729  task->out_frame->width =
731  task->out_frame->height =
733  }
734  break;
736  if (!tf_model->model.detect_post_proc) {
737  av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post proc\n");
738  return;
739  }
740  tf_model->model.detect_post_proc(task->in_frame, outputs, task->nb_output, tf_model->model.filter_ctx);
741  break;
742  default:
743  av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this kind of dnn filter now\n");
744  goto err;
745  }
746  task->inference_done++;
747 err:
748  tf_free_request(infer_request);
749  av_freep(&outputs);
750 
751  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
752  destroy_request_item(&request);
753  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
754  }
755 }
756 
757 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
758 {
759  TFModel *tf_model;
760  DnnContext *ctx;
761  LastLevelTaskItem *lltask;
762  TaskItem *task;
763  int ret = 0;
764 
765  if (ff_queue_size(lltask_queue) == 0) {
766  destroy_request_item(&request);
767  return 0;
768  }
769 
770  lltask = ff_queue_peek_front(lltask_queue);
771  task = lltask->task;
772  tf_model = task->model;
773  ctx = tf_model->ctx;
774 
775  ret = fill_model_input_tf(tf_model, request);
776  if (ret != 0) {
777  goto err;
778  }
779 
780  if (task->async) {
781  if (ff_dnn_start_inference_async(ctx, &request->exec_module) != 0) {
782  goto err;
783  }
784  return 0;
785  }
786  else {
787  ret = tf_start_inference(request);
788  if (ret != 0) {
789  goto err;
790  }
791  infer_completion_callback(request);
792  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
793  }
794 err:
795  tf_free_request(request->infer_request);
796  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
797  destroy_request_item(&request);
798  }
799 
800  return ret;
801 }
802 
803 static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
804 {
805  TFModel *tf_model = (TFModel *)model;
806  DnnContext *ctx = tf_model->ctx;
807  TaskItem *task;
808  TFRequestItem *request;
809  int ret = 0;
810 
811  ret = ff_check_exec_params(ctx, DNN_TF, model->func_type, exec_params);
812  if (ret != 0) {
813  return ret;
814  }
815 
816  task = av_malloc(sizeof(*task));
817  if (!task) {
818  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
819  return AVERROR(ENOMEM);
820  }
821 
822  ret = ff_dnn_fill_task(task, exec_params, tf_model, ctx->async, 1);
823  if (ret != 0) {
824  av_log(ctx, AV_LOG_ERROR, "Fill task with invalid parameter(s).\n");
825  av_freep(&task);
826  return ret;
827  }
828 
829  if (ff_queue_push_back(tf_model->task_queue, task) < 0) {
830  av_freep(&task);
831  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
832  return AVERROR(ENOMEM);
833  }
834 
835  ret = extract_lltask_from_task(task, tf_model->lltask_queue);
836  if (ret != 0) {
837  av_freep(&task);
838  av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
839  return ret;
840  }
841 
842  request = ff_safe_queue_pop_front(tf_model->request_queue);
843  if (!request) {
844  av_freep(&task);
845  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
846  return AVERROR(EINVAL);
847  }
848  return execute_model_tf(request, tf_model->lltask_queue);
849 }
850 
852 {
853  TFModel *tf_model = (TFModel *)model;
854  return ff_dnn_get_result_common(tf_model->task_queue, in, out);
855 }
856 
857 static int dnn_flush_tf(const DNNModel *model)
858 {
859  TFModel *tf_model = (TFModel *)model;
860  DnnContext *ctx = tf_model->ctx;
861  TFRequestItem *request;
862  int ret;
863 
864  if (ff_queue_size(tf_model->lltask_queue) == 0) {
865  // no pending task need to flush
866  return 0;
867  }
868 
869  request = ff_safe_queue_pop_front(tf_model->request_queue);
870  if (!request) {
871  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
872  return AVERROR(EINVAL);
873  }
874 
875  ret = fill_model_input_tf(tf_model, request);
876  if (ret != 0) {
877  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
878  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
879  destroy_request_item(&request);
880  }
881  return ret;
882  }
883 
884  return ff_dnn_start_inference_async(ctx, &request->exec_module);
885 }
886 
888  .clazz = DNN_DEFINE_CLASS(dnn_tensorflow),
889  .type = DNN_TF,
890  .load_model = dnn_load_model_tf,
891  .execute_model = dnn_execute_model_tf,
892  .get_result = dnn_get_result_tf,
893  .flush = dnn_flush_tf,
894  .free_model = dnn_free_model_tf,
895 };
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
TFInferRequest
Stores execution parameters for single call to the TensorFlow C API.
Definition: dnn_backend_tf.c:54
TFInferRequest::tf_outputs
TF_Output * tf_outputs
Definition: dnn_backend_tf.c:55
execute_model_tf
static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
Definition: dnn_backend_tf.c:757
FLAGS
#define FLAGS
Definition: dnn_backend_tf.c:69
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
TFModel::graph
TF_Graph * graph
Definition: dnn_backend_tf.c:42
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:55
DNNAsyncExecModule
Common Async Execution Mechanism for the DNN Backends.
Definition: dnn_backend_common.h:65
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:56
extract_lltask_from_task
static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
Definition: dnn_backend_tf.c:185
int64_t
long long int64_t
Definition: coverity.c:34
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:30
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
LastLevelTaskItem
Definition: dnn_backend_common.h:57
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AVFrame::width
int width
Definition: frame.h:446
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
AVOption
AVOption.
Definition: opt.h:357
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:110
TFInferRequest::input_tensor
TF_Tensor * input_tensor
Definition: dnn_backend_tf.c:58
data
const char data[16]
Definition: mxf.c:148
avio_open
int avio_open(AVIOContext **s, const char *filename, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:497
TFModel::model
DNNModel model
Definition: dnn_backend_tf.c:40
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:81
load_tf_model
static int load_tf_model(TFModel *tf_model, const char *model_filename)
Definition: dnn_backend_tf.c:387
dnn_io_proc.h
TFModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_tf.c:45
TaskItem
Definition: dnn_backend_common.h:43
DNNAsyncExecModule::callback
void(* callback)(void *args)
Completion Callback for the backend.
Definition: dnn_backend_common.h:77
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OFFSET
#define OFFSET(x)
Definition: dnn_backend_tf.c:68
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
destroy_request_item
static void destroy_request_item(TFRequestItem **arg)
Free the TFRequestItem completely.
Definition: dnn_backend_tf.c:171
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:99
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
dnn_get_width_idx_by_layout
static int dnn_get_width_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:197
TaskItem::model
void * model
Definition: dnn_backend_common.h:44
DnnContext
Definition: dnn_interface.h:143
get_input_tf
static int get_input_tf(DNNModel *model, DNNData *input, const char *input_name)
Definition: dnn_backend_tf.c:265
filter_ctx
static FilteringContext * filter_ctx
Definition: transcode.c:52
dnn_load_model_tf
static DNNModel * dnn_load_model_tf(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
Definition: dnn_backend_tf.c:524
DL_NHWC
@ DL_NHWC
Definition: dnn_interface.h:66
SPACE_CHARS
#define SPACE_CHARS
Definition: dnn_backend_tf.c:357
Queue
Linear double-ended data structure.
Definition: queue.c:33
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_TF
@ DNN_TF
Definition: dnn_interface.h:36
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
fill_model_input_tf
static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request)
Definition: dnn_backend_tf.c:600
TFRequestItem::exec_module
DNNAsyncExecModule exec_module
Definition: dnn_backend_tf.c:65
float
float
Definition: af_crystalizer.c:121
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:58
TFModel::ctx
DnnContext * ctx
Definition: dnn_backend_tf.c:41
read_graph
static TF_Buffer * read_graph(const char *model_filename)
Definition: dnn_backend_tf.c:205
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
DNNData
Definition: dnn_interface.h:69
DNNModule::clazz
const AVClass clazz
Definition: dnn_interface.h:176
dnn_tensorflow_options
static const AVOption dnn_tensorflow_options[]
Definition: dnn_backend_tf.c:70
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:156
DNNModel::get_output
int(* get_output)(struct DNNModel *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:106
ctx
AVFormatContext * ctx
Definition: movenc.c:49
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:52
arg
const char * arg
Definition: jacosubdec.c:67
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:182
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:423
NULL
#define NULL
Definition: coverity.c:32
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:113
tf_create_inference_request
static TFInferRequest * tf_create_inference_request(void)
Create a TensorFlow inference request.
Definition: dnn_backend_tf.c:120
ff_dnn_async_module_cleanup
int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
Join the Async Execution thread and set module pointers to NULL.
Definition: dnn_backend_common.c:86
TFModel::task_queue
Queue * task_queue
Definition: dnn_backend_tf.c:47
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_tf.c:694
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:45
TFModel::status
TF_Status * status
Definition: dnn_backend_tf.c:44
tf_free_request
static void tf_free_request(TFInferRequest *request)
Free the contents of TensorFlow inference request.
Definition: dnn_backend_tf.c:92
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:211
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:49
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:53
cpu.h
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:115
size
int size
Definition: twinvq_data.h:10344
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:101
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
dnn_flush_tf
static int dnn_flush_tf(const DNNModel *model)
Definition: dnn_backend_tf.c:857
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:41
hex_to_data
static int hex_to_data(uint8_t *data, const char *p)
Definition: dnn_backend_tf.c:358
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:41
get_output_tf
static int get_output_tf(DNNModel *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_tf.c:313
tf_start_inference
static int tf_start_inference(void *args)
Start synchronous inference for the TensorFlow model.
Definition: dnn_backend_tf.c:141
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:50
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
DNN_DEFINE_CLASS
#define DNN_DEFINE_CLASS(fname)
Definition: dnn_backend_common.h:39
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
ff_dnn_backend_tf
const DNNModule ff_dnn_backend_tf
Definition: dnn_backend_tf.c:887
dnn_execute_model_tf
static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_tf.c:803
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:59
TFRequestItem::status
TF_Status * status
Definition: dnn_backend_tf.c:64
TFInferRequest::output_tensors
TF_Tensor ** output_tensors
Definition: dnn_backend_tf.c:56
TFModel::session
TF_Session * session
Definition: dnn_backend_tf.c:43
TFRequestItem::infer_request
TFInferRequest * infer_request
Definition: dnn_backend_tf.c:62
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
DNNAsyncExecModule::start_inference
int(* start_inference)(void *request)
Synchronous inference function for the backend with corresponding request item as the argument.
Definition: dnn_backend_common.h:70
DNNAsyncExecModule::args
void * args
Argument for the execution functions.
Definition: dnn_backend_common.h:83
av_toupper
static av_const int av_toupper(int c)
Locale-independent conversion of ASCII characters to uppercase.
Definition: avstring.h:227
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
safe_queue.h
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:48
len
int len
Definition: vorbis_enc_data.h:426
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:311
TFInferRequest::tf_input
TF_Output * tf_input
Definition: dnn_backend_tf.c:57
ret
ret
Definition: filter_design.txt:187
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:41
TFModel
Definition: dnn_backend_tf.c:39
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
dnn_get_result_tf
static DNNAsyncStatusType dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out)
Definition: dnn_backend_tf.c:851
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:46
AVFrame::height
int height
Definition: frame.h:446
status
ov_status_e status
Definition: dnn_backend_openvino.c:101
allocate_input_tensor
static TF_Tensor * allocate_input_tensor(const DNNData *input)
Definition: dnn_backend_tf.c:238
dnn_backend_common.h
TFRequestItem::lltask
LastLevelTaskItem * lltask
Definition: dnn_backend_tf.c:63
defs.h
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:612
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:136
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DCO_RGB
@ DCO_RGB
Definition: dnn_interface.h:46
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
ff_dnn_start_inference_async
int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
Start asynchronous inference routine for the TensorFlow model on a detached thread.
Definition: dnn_backend_common.c:105
DNNModel
Definition: dnn_interface.h:97
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
mem.h
dnn_get_height_idx_by_layout
static int dnn_get_height_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:202
dnn_free_model_tf
static void dnn_free_model_tf(DNNModel **model)
Definition: dnn_backend_tf.c:482
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:47
dnn_get_channel_idx_by_layout
static int dnn_get_channel_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:207
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: avio.c:649
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
free_buffer
static void free_buffer(void *data, size_t length)
Definition: dnn_backend_tf.c:80
DNNExecBaseParams
Definition: dnn_interface.h:80
DNNModel::get_input
int(* get_input)(struct DNNModel *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:104
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:50
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:249
TFRequestItem
Definition: dnn_backend_tf.c:61
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:49
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:58
TFModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_tf.c:46
TaskItem::nb_output
uint32_t nb_output
Definition: dnn_backend_common.h:51
DNNModule
Definition: dnn_interface.h:175
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:42