Go to the documentation of this file.
66 for (
int i = 0;
i < dims_count; ++
i) {
67 dst[
i] = pfun(params->
v,
src[
i]);
70 const DnnOperand *input1 = &operands[input_operand_indexes[1]];
72 for (
int i = 0;
i < dims_count; ++
i) {
86 for (
int i = 0;
i < dims_count; ++
i) {
87 dst[
i] = pfun(params->
v,
src[
i]);
90 for (
int i = 0;
i < dims_count; ++
i) {
91 dst[
i] = pfun(
src[
i], params->
v);
94 const DnnOperand *input1 = &operands[input_operand_indexes[1]];
96 for (
int i = 0;
i < dims_count; ++
i) {
156 for (
int i = 0;
i < 4; ++
i)
161 if (
output->length <= 0) {
static float sub(float src0, float src1)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
static float floormod(float src0, float src1)
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
static float realdiv(float src0, float src1)
static av_always_inline float av_int2float(uint32_t i)
Reinterpret a 32-bit integer as a float.
int32_t ff_calculate_operand_data_length(const DnnOperand *oprd)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void * data
data pointer with data length in bytes.
static void math_binary_not_commutative(FunType pfun, const DnnLayerMathBinaryParams *params, const DnnOperand *input, DnnOperand *output, DnnOperand *operands, const int32_t *input_operand_indexes)
static float mul(float src0, float src1)
unsigned int avio_rl32(AVIOContext *s)
static float minimum(float src0, float src1)
int32_t output_operand_index
int32_t input_operand_indexes[4]
a layer can have multiple inputs and one output.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
int ff_dnn_load_layer_math_binary(Layer *layer, AVIOContext *model_file_context, int file_size, int operands_num)
static void math_binary_commutative(FunType pfun, const DnnLayerMathBinaryParams *params, const DnnOperand *input, DnnOperand *output, DnnOperand *operands, const int32_t *input_operand_indexes)
int32_t ff_calculate_operand_dims_count(const DnnOperand *oprd)
int ff_dnn_execute_layer_math_binary(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const void *parameters, NativeContext *ctx)
static float add(float src0, float src1)
DNNMathBinaryOperation bin_op
float(* FunType)(float src0, float src1)