[FFmpeg-devel] [PATCH] lavfi/dnn_backend_openvino.c: Fix Memory Leak in execute_model_ov
Shubhanshu Saxena
shubhanshu.e01 at gmail.com
Fri Jun 18 19:23:08 EEST 2021
In cases where the execution inside the function execute_model_ov fails,
push the RequestItem back to the request_queue before returning the error.
In case pushing back fails, release the allocated memory.
Signed-off-by: Shubhanshu Saxena <shubhanshu.e01 at gmail.com>
---
libavfilter/dnn/dnn_backend_openvino.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/libavfilter/dnn/dnn_backend_openvino.c b/libavfilter/dnn/dnn_backend_openvino.c
index 702c4fb9ee..29ec8f6a8f 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -448,12 +448,12 @@ static DNNReturnType execute_model_ov(RequestItem *request, Queue *inferenceq)
status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
- return DNN_ERROR;
+ goto err;
}
status = ie_infer_request_infer_async(request->infer_request);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
- return DNN_ERROR;
+ goto err;
}
return DNN_SUCCESS;
} else {
@@ -464,11 +464,17 @@ static DNNReturnType execute_model_ov(RequestItem *request, Queue *inferenceq)
status = ie_infer_request_infer(request->infer_request);
if (status != OK) {
av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
- return DNN_ERROR;
+ goto err;
}
infer_completion_callback(request);
return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_ERROR;
}
+err:
+ if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
+ ie_infer_request_free(&request->infer_request);
+ av_freep(&request);
+ }
+ return DNN_ERROR;
}
static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
--
2.25.1
More information about the ffmpeg-devel
mailing list