Go to the documentation of this file.
24 #ifndef AVFILTER_DNN_DNN_BACKEND_COMMON_H
25 #define AVFILTER_DNN_DNN_BACKEND_COMMON_H
28 #include "../dnn_interface.h"
31 #define DNN_DEFINE_CLASS_EXT(name, desc, options) \
34 .item_name = av_default_item_name, \
36 .version = LIBAVUTIL_VERSION_INT, \
37 .category = AV_CLASS_CATEGORY_FILTER, \
39 #define DNN_DEFINE_CLASS(fname) \
40 DNN_DEFINE_CLASS_EXT(fname, #fname, fname##_options)
84 #if HAVE_PTHREAD_CANCEL
Common Async Execution Mechanism for the DNN Backends.
int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
Start asynchronous inference routine for the TensorFlow model on a detached thread.
This structure describes decoded (raw) audio or video data.
void(* callback)(void *args)
Completion Callback for the backend.
Linear double-ended data structure.
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
Join the Async Execution thread and set module pointers to NULL.
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
int(* start_inference)(void *request)
Synchronous inference function for the backend with corresponding request item as the argument.
void * args
Argument for the execution functions.
const char ** output_names