[FFmpeg-devel] [PATCH v4] lavc/vvc_mc: R-V V avg w_avg

Rémi Denis-Courmont remi at remlab.net
Wed Jun 26 19:33:39 EEST 2024


Le tiistaina 11. kesäkuuta 2024, 21.38.10 EEST uk7b at foxmail.com a écrit :
> +#include "libavutil/riscv/asm.S"
> +
> +.macro vsetvlstatic8 w, vlen, is_w
> +        .if \w <= 2
> +                vsetivli        zero, \w, e8, mf8, ta, ma
> +        .elseif \w <= 4 && \vlen == 128
> +                vsetivli        zero, \w, e8, mf4, ta, ma
> +        .elseif \w <= 4 && \vlen >= 256
> +                vsetivli        zero, \w, e8, mf8, ta, ma
> +        .elseif \w <= 8 && \vlen == 128
> +                vsetivli        zero, \w, e8, mf2, ta, ma
> +        .elseif \w <= 8 && \vlen >= 256
> +                vsetivli        zero, \w, e8, mf4, ta, ma
> +        .elseif \w <= 16 && \vlen == 128
> +                vsetivli        zero, \w, e8, m1, ta, ma
> +        .elseif \w <= 16 && \vlen >= 256
> +                vsetivli        zero, \w, e8, mf2, ta, ma
> +        .elseif \w <= 32 && \vlen >= 256
> +                li t0, \w
> +                vsetvli         zero, t0, e8, m1, ta, ma
> +        .elseif \w <= (\vlen / 4) || \is_w
> +                li t0, 64
> +                vsetvli         zero, t0, e8, m2, ta, ma

I am not sure what is_w means or serves here. If you need special cases, this 
feels a bit out of place for this macro.

> +        .else
> +                li t0, \w
> +                vsetvli         zero, t0, e8, m4, ta, ma
> +        .endif
> +.endm
> +
> +.macro vsetvlstatic16 w, vlen, is_w
> +        .if \w <= 2
> +                vsetivli        zero, \w, e16, mf4, ta, ma
> +        .elseif \w <= 4 && \vlen == 128
> +                vsetivli        zero, \w, e16, mf2, ta, ma
> +        .elseif \w <= 4 && \vlen >= 256
> +                vsetivli        zero, \w, e16, mf4, ta, ma
> +        .elseif \w <= 8 && \vlen == 128
> +                vsetivli        zero, \w, e16, m1, ta, ma
> +        .elseif \w <= 8 && \vlen >= 256
> +                vsetivli        zero, \w, e16, mf2, ta, ma
> +        .elseif \w <= 16 && \vlen == 128
> +                vsetivli        zero, \w, e16, m2, ta, ma
> +        .elseif \w <= 16 && \vlen >= 256
> +                vsetivli        zero, \w, e16, m1, ta, ma
> +        .elseif \w <= 32 && \vlen >= 256
> +                li t0, \w
> +                vsetvli         zero, t0, e16, m2, ta, ma
> +        .elseif \w <= (\vlen / 4) || \is_w
> +                li t0, 64
> +                vsetvli         zero, t0, e16, m4, ta, ma
> +        .else
> +                li t0, \w
> +                vsetvli         zero, t0, e16, m8, ta, ma
> +        .endif
> +.endm
> +
> +.macro vsetvlstatic32 w, vlen
> +        .if \w <= 2
> +                vsetivli        zero, \w, e32, mf2, ta, ma
> +        .elseif \w <= 4 && \vlen == 128
> +                vsetivli        zero, \w, e32, m1, ta, ma
> +        .elseif \w <= 4 && \vlen >= 256
> +                vsetivli        zero, \w, e32, mf2, ta, ma
> +        .elseif \w <= 8 && \vlen == 128
> +                vsetivli        zero, \w, e32, m2, ta, ma
> +        .elseif \w <= 8 && \vlen >= 256
> +                vsetivli        zero, \w, e32, m1, ta, ma
> +        .elseif \w <= 16 && \vlen == 128
> +                vsetivli        zero, \w, e32, m4, ta, ma
> +        .elseif \w <= 16 && \vlen >= 256
> +                vsetivli        zero, \w, e32, m2, ta, ma
> +        .elseif \w <= 32 && \vlen >= 256
> +                li t0, \w
> +                vsetvli         zero, t0, e32, m4, ta, ma
> +        .else
> +                li t0, \w
> +                vsetvli         zero, t0, e32, m8, ta, ma
> +        .endif
> +.endm
> +
> +.macro avg_nx1 w, vlen
> +        vsetvlstatic16    \w, \vlen, 0
> +        vle16.v           v0, (a2)
> +        vle16.v           v8, (a3)
> +        vadd.vv           v8, v8, v0
> +        vmax.vx           v8, v8, zero
> +        vsetvlstatic8     \w, \vlen, 0
> +        vnclipu.wi        v8, v8, 7
> +        vse8.v            v8, (a0)
> +.endm
> +
> +.macro avg w, vlen, id
> +\id\w\vlen:

For named labels, it is preferable to use func (perhaps with .variant_cc), to 
get all properties right.

> +.if \w < 128
> +        vsetvlstatic16    \w, \vlen, 0
> +        addi              t0, a2, 128*2
> +        addi              t1, a3, 128*2
> +        add               t2, a0, a1
> +        vle16.v           v0, (a2)
> +        vle16.v           v8, (a3)
> +        addi              a5, a5, -2
> +        vle16.v           v16, (t0)
> +        vle16.v           v24, (t1)
> +        vadd.vv           v8, v8, v0
> +        vadd.vv           v24, v24, v16
> +        vmax.vx           v8, v8, zero
> +        vmax.vx           v24, v24, zero
> +        vsetvlstatic8     \w, \vlen, 0
> +        addi              a2, a2, 128*4
> +        vnclipu.wi        v8, v8, 7
> +        vnclipu.wi        v24, v24, 7
> +        addi              a3, a3, 128*4
> +        vse8.v            v8, (a0)
> +        vse8.v            v24, (t2)
> +        sh1add            a0, a1, a0
> +.else
> +        avg_nx1           128, \vlen
> +        addi              a5, a5, -1
> +        .if \vlen == 128
> +        addi              a2, a2, 64*2
> +        addi              a3, a3, 64*2
> +        addi              a0, a0, 64
> +        avg_nx1           128, \vlen
> +        addi              a0, a0, -64
> +        addi              a2, a2, 128
> +        addi              a3, a3, 128
> +        .else
> +        addi              a2, a2, 128*2
> +        addi              a3, a3, 128*2
> +        .endif
> +        add               a0, a0, a1

I am not sure if I get it, but it seems like this could be a normal vector 
processing loop without specialisation for the vector length, at (almost) no 
performance cost.

> +.endif
> +        bnez              a5, \id\w\vlen\()b
> +        ret
> +.endm
> +
> +
> +.macro AVG_JMP_TABLE id, vlen
> +const jmp_table_\id\vlen
> +        .4byte \id\()2\vlen\()f - jmp_table_\id\vlen
> +        .4byte \id\()4\vlen\()f - jmp_table_\id\vlen
> +        .4byte \id\()8\vlen\()f - jmp_table_\id\vlen
> +        .4byte \id\()16\vlen\()f - jmp_table_\id\vlen
> +        .4byte \id\()32\vlen\()f - jmp_table_\id\vlen
> +        .4byte \id\()64\vlen\()f - jmp_table_\id\vlen
> +        .4byte \id\()128\vlen\()f - jmp_table_\id\vlen
> +endconst
> +.endm
> +
> +.macro AVG_J vlen, id
> +        clz               a4, a4
> +        neg               a4, a4
> +        lla               t5, jmp_table_\id\vlen
> +        sh2add            t0, a4, t5
> +        lw                t0, ((__riscv_xlen-2)<<2)(t0)
> +        add               t0, t0, t5
> +        jr                t0

t0 is a link register, so the branch predictor will treat this a return, but 
it seems to be a tail call instead.

> +.endm
> +
> +.macro func_avg vlen
> +func ff_vvc_avg_8_rvv_\vlen\(), zve32x
> +        AVG_JMP_TABLE     1, \vlen
> +        csrwi             vxrm, 0
> +        AVG_J             \vlen, 1
> +        .irp w,2,4,8,16,32,64,128
> +        avg               \w, \vlen, 1
> +        .endr
> +endfunc
> +.endm
> +
> +.macro w_avg_nx1 w, vlen
> +        vsetvlstatic16    \w, \vlen, 1
> +        vle16.v           v0, (a2)
> +        vle16.v           v8, (a3)
> +        vwmul.vx          v16, v0, a7
> +        vwmacc.vx         v16, t3, v8
> +        vsetvlstatic32    \w, \vlen
> +        vadd.vx           v16, v16, t4

Could t4 be added in 16-bit mode so we don't need to switch vtype?
(Also same below)

> +        vsetvlstatic16    \w, \vlen, 1
> +        vnsrl.wx          v16, v16, t6
> +        vmax.vx           v16, v16, zero
> +        vsetvlstatic8     \w, \vlen, 1
> +        vnclipu.wi        v16, v16, 0
> +        vse8.v            v16, (a0)
> +.endm
> +
> +.macro w_avg w, vlen, id
> +\id\w\vlen:

Same as above.

> +.if \vlen <= 16
> +        vsetvlstatic16    \w, \vlen, 1
> +        addi              t0, a2, 128*2
> +        addi              t1, a3, 128*2
> +        vle16.v           v0, (a2)
> +        vle16.v           v8, (a3)
> +        addi              a5, a5, -2
> +        vle16.v           v20, (t0)
> +        vle16.v           v24, (t1)
> +        vwmul.vx          v16, v0, a7
> +        vwmul.vx          v28, v20, a7
> +        vwmacc.vx         v16, t3, v8
> +        vwmacc.vx         v28, t3, v24
> +        vsetvlstatic32    \w, \vlen
> +        add               t2, a0, a1
> +        vadd.vx           v16, v16, t4
> +        vadd.vx           v28, v28, t4
> +        vsetvlstatic16    \w, \vlen, 1
> +        vnsrl.wx          v16, v16, t6
> +        vnsrl.wx          v28, v28, t6
> +        vmax.vx           v16, v16, zero
> +        vmax.vx           v28, v28, zero
> +        vsetvlstatic8     \w, \vlen, 1
> +        addi              a2, a2, 128*4
> +        vnclipu.wi        v16, v16, 0
> +        vnclipu.wi        v28, v28, 0
> +        vse8.v            v16, (a0)
> +        addi              a3, a3, 128*4
> +        vse8.v            v28, (t2)
> +        sh1add            a0, a1, a0
> +.else
> +        w_avg_nx1         \w, \vlen
> +        addi              a5, a5, -1
> +        .if \w == (\vlen / 2)
> +        addi              a2, a2, (\vlen / 2)
> +        addi              a3, a3, (\vlen / 2)
> +        addi              a0, a0, (\vlen / 4)
> +        w_avg_nx1         \w, \vlen
> +        addi              a2, a2, -(\vlen / 2)
> +        addi              a3, a3, -(\vlen / 2)
> +        addi              a0, a0, -(\vlen / 4)
> +        .elseif \w == 128 && \vlen == 128
> +        .rept 3
> +        addi              a2, a2, (\vlen / 2)
> +        addi              a3, a3, (\vlen / 2)
> +        addi              a0, a0, (\vlen / 4)
> +        w_avg_nx1         \w, \vlen

Can we use a regular loop here instead of repeating the same code?

> +        .endr
> +        addi              a2, a2, -(\vlen / 2) * 3
> +        addi              a3, a3, -(\vlen / 2) * 3
> +        addi              a0, a0, -(\vlen / 4) * 3
> +        .endif
> +
> +        addi              a2, a2, 128*2
> +        addi              a3, a3, 128*2
> +        add               a0, a0, a1
> +.endif
> +        bnez              a5, \id\w\vlen\()b
> +        ret
> +.endm
> +
> +
> +.macro func_w_avg vlen
> +func ff_vvc_w_avg_8_rvv_\vlen\(), zve32x
> +        AVG_JMP_TABLE     2, \vlen
> +        csrwi             vxrm, 0
> +        addi              t6, a6, 7
> +        ld                t3, (sp)
> +        ld                t4, 8(sp)
> +        ld                t5, 16(sp)
> +        addi              t4, t4, 1       // o0 + o1 + 1
> +        add               t4, t4, t5
> +        addi              t5, t6, -1      // shift - 1
> +        sll               t4, t4, t5
> +        AVG_J             \vlen, 2
> +        .irp w,2,4,8,16,32,64,128
> +        w_avg             \w, \vlen, 2
> +        .endr
> +endfunc
> +.endm
> +
> +func_avg 128
> +func_avg 256
> +#if (__riscv_xlen == 64)
> +func_w_avg 128
> +func_w_avg 256
> +#endif
> diff --git a/libavcodec/riscv/vvc/vvcdsp_init.c
> b/libavcodec/riscv/vvc/vvcdsp_init.c new file mode 100644
> index 0000000000..9819a7c570
> --- /dev/null
> +++ b/libavcodec/riscv/vvc/vvcdsp_init.c
> @@ -0,0 +1,72 @@
> +/*
> + * Copyright (c) 2024 Institue of Software Chinese Academy of Sciences
> (ISCAS). + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
> USA + */
> +
> +#include "config.h"
> +
> +#include "libavutil/attributes.h"
> +#include "libavutil/cpu.h"
> +#include "libavutil/riscv/cpu.h"
> +#include "libavcodec/vvc/dsp.h"
> +
> +#define bf(fn, bd,  opt) fn##_##bd##_##opt
> +
> +#define AVG_PROTOTYPES(bd, opt)                                            
>                          \ +void bf(ff_vvc_avg, bd, opt)(uint8_t *dst,
> ptrdiff_t dst_stride,                                     \ +    const
> int16_t *src0, const int16_t *src1, int width, int height);                
>                \ +void bf(ff_vvc_w_avg, bd, opt)(uint8_t *dst, ptrdiff_t
> dst_stride,                                   \ +    const int16_t *src0,
> const int16_t *src1, int width, int height,                                
> \ +    int denom, int w0, int w1, int o0, int o1);
> +
> +AVG_PROTOTYPES(8, rvv_128)
> +AVG_PROTOTYPES(8, rvv_256)
> +
> +void ff_vvc_dsp_init_riscv(VVCDSPContext *const c, const int bd)
> +{
> +#if HAVE_RVV
> +    const int flags = av_get_cpu_flags();
> +    int vlenb = ff_get_rv_vlenb();
> +
> +    if ((flags & AV_CPU_FLAG_RVV_I32) && (flags & AV_CPU_FLAG_RVB_ADDR) &&
> +        vlenb >= 32) {
> +        switch (bd) {
> +            case 8:
> +                c->inter.avg    = ff_vvc_avg_8_rvv_256;
> +# if (__riscv_xlen == 64)
> +                c->inter.w_avg    = ff_vvc_w_avg_8_rvv_256;
> +# endif
> +                break;
> +            default:
> +                break;
> +        }
> +    } else if ((flags & AV_CPU_FLAG_RVV_I32) && (flags &
> AV_CPU_FLAG_RVB_ADDR) && +               vlenb >= 16) {
> +        switch (bd) {
> +            case 8:
> +                c->inter.avg    = ff_vvc_avg_8_rvv_128;
> +# if (__riscv_xlen == 64)
> +                c->inter.w_avg    = ff_vvc_w_avg_8_rvv_128;
> +# endif
> +                break;
> +            default:
> +                break;
> +        }
> +    }
> +#endif
> +}
> diff --git a/libavcodec/vvc/dsp.c b/libavcodec/vvc/dsp.c
> index 41e830a98a..c55a37d255 100644
> --- a/libavcodec/vvc/dsp.c
> +++ b/libavcodec/vvc/dsp.c
> @@ -121,7 +121,9 @@ void ff_vvc_dsp_init(VVCDSPContext *vvcdsp, int
> bit_depth) break;
>      }
> 
> -#if ARCH_X86
> +#if ARCH_RISCV
> +    ff_vvc_dsp_init_riscv(vvcdsp, bit_depth);
> +#elif ARCH_X86
>      ff_vvc_dsp_init_x86(vvcdsp, bit_depth);
>  #endif
>  }
> diff --git a/libavcodec/vvc/dsp.h b/libavcodec/vvc/dsp.h
> index 1f14096c41..e03236dd76 100644
> --- a/libavcodec/vvc/dsp.h
> +++ b/libavcodec/vvc/dsp.h
> @@ -180,6 +180,7 @@ typedef struct VVCDSPContext {
> 
>  void ff_vvc_dsp_init(VVCDSPContext *hpc, int bit_depth);
> 
> +void ff_vvc_dsp_init_riscv(VVCDSPContext *hpc, const int bit_depth);
>  void ff_vvc_dsp_init_x86(VVCDSPContext *hpc, const int bit_depth);
> 
>  #endif /* AVCODEC_VVC_DSP_H */


-- 
雷米‧德尼-库尔蒙
http://www.remlab.net/





More information about the ffmpeg-devel mailing list