[FFmpeg-devel] [PATCH 4/4] avcodec/mips: MSA (MIPS-SIMD-Arch) optimizations for VP9 intra functions

Ronald S. Bultje rsbultje at gmail.com
Thu Jul 16 17:18:57 CEST 2015


Hi,

On Thu, Jul 9, 2015 at 9:15 AM, <shivraj.patil at imgtec.com> wrote:

> +static void intra_predict_vert_4x4_msa(const uint8_t *src, uint8_t *dst,
> +                                       int32_t dst_stride)
> +{
> +    uint32_t src_data;
> +
> +    src_data = LW(src);
> +
> +    SW4(src_data, src_data, src_data, src_data, dst, dst_stride);
> +}
>

Is this faster than the C function? I know this is a fair bit of work, but
ideally you'd profile each individual simd function to see how much faster
it is than the C function. These won't be faster, so they just increase the
binary size. Same is likely true for e.g. the vert_8x8 one.

+static void intra_predict_horiz_4x4_msa(const uint8_t *src, int32_t
> src_stride,
> +                                        uint8_t *dst, int32_t dst_stride)
> +{
> +    uint32_t out0, out1, out2, out3;
> +
> +    out0 = src[0 * src_stride] * 0x01010101;
> +    out1 = src[1 * src_stride] * 0x01010101;
> +    out2 = src[2 * src_stride] * 0x01010101;
> +    out3 = src[3 * src_stride] * 0x01010101;
> +
> +    SW4(out0, out1, out2, out3, dst, dst_stride);
> +}
>

Same question here - I suspect this isn't faster than the C version. Same
for horiz_8x8.


> +static void intra_predict_dc_4x4_msa(const uint8_t *src_top,
> +                                     const uint8_t *src_left,
> +                                     int32_t src_stride_left,
> +                                     uint8_t *dst, int32_t dst_stride,
> +                                     uint8_t is_above, uint8_t is_left)
> +{
> +    uint32_t row;
> +    uint32_t out, addition = 0;
> +    v16u8 src_above, store;
> +    v8u16 sum_above;
> +    v4u32 sum;
> +
> +    if (is_left && is_above) {
> +        src_above = LD_UB(src_top);
> +
> +        sum_above = __msa_hadd_u_h(src_above, src_above);
> +        sum = __msa_hadd_u_w(sum_above, sum_above);
> +        addition = __msa_copy_u_w((v4i32) sum, 0);
> +
> +        for (row = 0; row < 4; row++) {
> +            addition += src_left[row * src_stride_left];
> +        }
> +
> +        addition = (addition + 4) >> 3;
> +        store = (v16u8) __msa_fill_b(addition);
> +    } else if (is_left) {
> +        for (row = 0; row < 4; row++) {
> +            addition += src_left[row * src_stride_left];
> +        }
> +
> +        addition = (addition + 2) >> 2;
> +        store = (v16u8) __msa_fill_b(addition);
> +    } else if (is_above) {
> +        src_above = LD_UB(src_top);
> +
> +        sum_above = __msa_hadd_u_h(src_above, src_above);
> +        sum = __msa_hadd_u_w(sum_above, sum_above);
> +        sum = (v4u32) __msa_srari_w((v4i32) sum, 2);
> +        store = (v16u8) __msa_splati_b((v16i8) sum, 0);
> +    } else {
> +        store = (v16u8) __msa_ldi_b(128);
> +    }
> +
> +    out = __msa_copy_u_w((v4i32) store, 0);
> +
> +    for (row = 4; row--;) {
> +        SW(out, dst);
> +        dst += dst_stride;
> +    }
> +}
> +
> +static void intra_predict_dc_8x8_msa(const uint8_t *src_top,
> +                                     const uint8_t *src_left,
> +                                     int32_t src_stride_left,
> +                                     uint8_t *dst, int32_t dst_stride,
> +                                     uint8_t is_above, uint8_t is_left)
> +{
> +    uint32_t row;
> +    uint32_t out, addition = 0;
> +    v16u8 src_above, store;
> +    v8u16 sum_above;
> +    v4u32 sum_top;
> +    v2u64 sum;
> +
> +    if (is_left && is_above) {
> +        src_above = LD_UB(src_top);
> +
> +        sum_above = __msa_hadd_u_h(src_above, src_above);
> +        sum_top = __msa_hadd_u_w(sum_above, sum_above);
> +        sum = __msa_hadd_u_d(sum_top, sum_top);
> +        addition = __msa_copy_u_w((v4i32) sum, 0);
> +
> +        for (row = 0; row < 8; row++) {
> +            addition += src_left[row * src_stride_left];
> +        }
> +
> +        addition = (addition + 8) >> 4;
> +        store = (v16u8) __msa_fill_b(addition);
> +    } else if (is_left) {
> +        for (row = 0; row < 8; row++) {
> +            addition += src_left[row * src_stride_left];
> +        }
> +
> +        addition = (addition + 4) >> 3;
> +        store = (v16u8) __msa_fill_b(addition);
> +    } else if (is_above) {
> +        src_above = LD_UB(src_top);
> +
> +        sum_above = __msa_hadd_u_h(src_above, src_above);
> +        sum_top = __msa_hadd_u_w(sum_above, sum_above);
> +        sum = __msa_hadd_u_d(sum_top, sum_top);
> +        sum = (v2u64) __msa_srari_d((v2i64) sum, 3);
> +        store = (v16u8) __msa_splati_b((v16i8) sum, 0);
> +    } else {
> +        store = (v16u8) __msa_ldi_b(128);
> +    }
> +
> +    out = __msa_copy_u_w((v4i32) store, 0);
> +
> +    for (row = 8; row--;) {
> +        SW(out, dst);
> +        SW(out, (dst + 4));
> +        dst += dst_stride;
> +    }
> +}
> +
> +static void intra_predict_dc_16x16_msa(const uint8_t *src_top,
> +                                       const uint8_t *src_left,
> +                                       int32_t src_stride_left,
> +                                       uint8_t *dst, int32_t dst_stride,
> +                                       uint8_t is_above, uint8_t is_left)
> +{
> +    uint32_t row;
> +    uint32_t addition = 0;
> +    v16u8 src_above, store;
> +    v8u16 sum_above;
> +    v4u32 sum_top;
> +    v2u64 sum;
> +
> +    if (is_left && is_above) {
> +        src_above = LD_UB(src_top);
> +
> +        sum_above = __msa_hadd_u_h(src_above, src_above);
> +        sum_top = __msa_hadd_u_w(sum_above, sum_above);
> +        sum = __msa_hadd_u_d(sum_top, sum_top);
> +        sum_top = (v4u32) __msa_pckev_w((v4i32) sum, (v4i32) sum);
> +        sum = __msa_hadd_u_d(sum_top, sum_top);
> +        addition = __msa_copy_u_w((v4i32) sum, 0);
> +
> +        for (row = 0; row < 16; row++) {
> +            addition += src_left[row * src_stride_left];
> +        }
> +
> +        addition = (addition + 16) >> 5;
> +        store = (v16u8) __msa_fill_b(addition);
> +    } else if (is_left) {
> +        for (row = 0; row < 16; row++) {
> +            addition += src_left[row * src_stride_left];
> +        }
> +
> +        addition = (addition + 8) >> 4;
> +        store = (v16u8) __msa_fill_b(addition);
> +    } else if (is_above) {
> +        src_above = LD_UB(src_top);
> +
> +        sum_above = __msa_hadd_u_h(src_above, src_above);
> +        sum_top = __msa_hadd_u_w(sum_above, sum_above);
> +        sum = __msa_hadd_u_d(sum_top, sum_top);
> +        sum_top = (v4u32) __msa_pckev_w((v4i32) sum, (v4i32) sum);
> +        sum = __msa_hadd_u_d(sum_top, sum_top);
> +        sum = (v2u64) __msa_srari_d((v2i64) sum, 4);
> +        store = (v16u8) __msa_splati_b((v16i8) sum, 0);
> +    } else {
> +        store = (v16u8) __msa_ldi_b(128);
> +    }
> +
> +    for (row = 16; row--;) {
> +        ST_UB(store, dst);
> +        dst += dst_stride;
> +    }
> +}
> +
> +static void intra_predict_dc_32x32_msa(const uint8_t *src_top,
> +                                       const uint8_t *src_left,
> +                                       int32_t src_stride_left,
> +                                       uint8_t *dst, int32_t dst_stride,
> +                                       uint8_t is_above, uint8_t is_left)
> +{
> +    uint32_t row;
> +    uint32_t addition = 0;
> +    v16u8 src_above1, src_above2, store;
> +    v8u16 sum_above1, sum_above2, sum_above;
> +    v4u32 sum_top;
> +    v2u64 sum;
> +
> +    if (is_left && is_above) {
> +        src_above1 = LD_UB(src_top);
> +        src_above2 = LD_UB(src_top + 16);
> +
> +        HADD_UB2_UH(src_above1, src_above2, sum_above1, sum_above2);
> +
> +        sum_above = sum_above1 + sum_above2;
> +        sum_top = __msa_hadd_u_w(sum_above, sum_above);
> +        sum = __msa_hadd_u_d(sum_top, sum_top);
> +        sum_top = (v4u32) __msa_pckev_w((v4i32) sum, (v4i32) sum);
> +        sum = __msa_hadd_u_d(sum_top, sum_top);
> +        addition = __msa_copy_u_w((v4i32) sum, 0);
> +
> +        for (row = 0; row < 32; row++) {
> +            addition += src_left[row * src_stride_left];
> +        }
> +
> +        addition = (addition + 32) >> 6;
> +        store = (v16u8) __msa_fill_b(addition);
> +    } else if (is_left) {
> +        for (row = 0; row < 32; row++) {
> +            addition += src_left[row * src_stride_left];
> +        }
> +
> +        addition = (addition + 16) >> 5;
> +        store = (v16u8) __msa_fill_b(addition);
> +    } else if (is_above) {
> +        src_above1 = LD_UB(src_top);
> +        src_above2 = LD_UB(src_top + 16);
> +
> +        HADD_UB2_UH(src_above1, src_above2, sum_above1, sum_above2);
> +
> +        sum_above = sum_above1 + sum_above2;
> +        sum_top = __msa_hadd_u_w(sum_above, sum_above);
> +        sum = __msa_hadd_u_d(sum_top, sum_top);
> +        sum_top = (v4u32) __msa_pckev_w((v4i32) sum, (v4i32) sum);
> +        sum = __msa_hadd_u_d(sum_top, sum_top);
> +        sum = (v2u64) __msa_srari_d((v2i64) sum, 5);
> +        store = (v16u8) __msa_splati_b((v16i8) sum, 0);
> +    } else {
> +        store = (v16u8) __msa_ldi_b(128);
> +    }
> +    for (row = 32; row--;) {
> +        ST_UB2(store, store, dst, 16);
> +        dst += dst_stride;
> +    }
> +}
> +
> +#define INTRA_PREDICT_VALDC_4X4_MSA(val)                         \
> +static void intra_predict_##val##dc_4x4_msa(uint8_t *dst,        \
> +                                            int32_t dst_stride)  \
> +{                                                                \
> +    uint32_t row, out;                                           \
> +    v16i8 store;                                                 \
> +                                                                 \
> +    store = __msa_ldi_b(val);                                    \
> +    out = __msa_copy_u_w((v4i32) store, 0);                      \
> +                                                                 \
> +    for (row = 4; row--;)                                        \
> +    {                                                            \
> +        SW(out, dst);                                            \
> +        dst += dst_stride;                                       \
> +    }                                                            \
> +}
> +
> +INTRA_PREDICT_VALDC_4X4_MSA(127);
> +INTRA_PREDICT_VALDC_4X4_MSA(129);
> +
> +#define INTRA_PREDICT_VALDC_8X8_MSA(val)                         \
> +static void intra_predict_##val##dc_8x8_msa(uint8_t *dst,        \
> +                                            int32_t dst_stride)  \
> +{                                                                \
> +    uint32_t row, out;                                           \
> +    v16i8 store;                                                 \
> +                                                                 \
> +    store = __msa_ldi_b(val);                                    \
> +    out = __msa_copy_u_w((v4i32) store, 0);                      \
> +                                                                 \
> +    for (row = 8; row--;)                                        \
> +    {                                                            \
> +        SW(out, dst);                                            \
> +        SW(out, (dst + 4));                                      \
> +        dst += dst_stride;                                       \
> +    }                                                            \
> +}
> +
> +INTRA_PREDICT_VALDC_8X8_MSA(127);
> +INTRA_PREDICT_VALDC_8X8_MSA(129);
>

You've looked too much at corporate h264 decoders (maybe h265 also). FFmpeg
gives special indices to the edge dc versions (or the fixed-value dc
versions), so that the generic dc version (where both a and l are
available) does not need any special branches (your function also isn't
marked as inline).

I also don't see why you have special versions for dc127/dc129, but don't
use that for dc128.

+void ff_vert_4x4_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> +                     const uint8_t *top)
> +{
> +    intra_predict_vert_4x4_msa(top, dst, stride);
> +}
> +
> +void ff_vert_8x8_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> +                     const uint8_t *top)
> +{
> +    intra_predict_vert_8x8_msa(top, dst, stride);
> +}
> +
> +void ff_vert_16x16_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> +                       const uint8_t *top)
> +{
> +    intra_predict_vert_16x16_msa(top, dst, stride);
> +}
> +
> +void ff_vert_32x32_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> +                       const uint8_t *top)
> +{
> +    intra_predict_vert_32x32_msa(top, dst, stride);
> +}
> +
> +void ff_hor_4x4_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> +                    const uint8_t *top)
> +{
> +    intra_predict_horiz_4x4_msa(left + 3, -1, dst, stride);
> +}
> +
> +void ff_hor_8x8_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> +                    const uint8_t *top)
> +{
> +    intra_predict_horiz_8x8_msa(left + 7, -1, dst, stride);
> +}
> +
> +void ff_hor_16x16_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> +                      const uint8_t *top)
> +{
> +    intra_predict_horiz_16x16_msa(left + 15, -1, dst, stride);
> +}
> +
> +void ff_hor_32x32_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> +                      const uint8_t *top)
> +{
> +    intra_predict_horiz_32x32_msa(left + 31, -1, dst, stride);
> +}
> +
> +void ff_dc_4x4_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> +                   const uint8_t *top)
> +{
> +    intra_predict_dc_4x4_msa(top, left, 1, dst, stride, 1, 1);
> +}
> +
> +void ff_dc_8x8_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> +                   const uint8_t *top)
> +{
> +    intra_predict_dc_8x8_msa(top, left, 1, dst, stride, 1, 1);
> +}
> +
> +void ff_dc_16x16_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> +                  const uint8_t *top)
> +{
> +    intra_predict_dc_16x16_msa(top, left, 1, dst, stride, 1, 1);
> +}
> +
> +void ff_dc_32x32_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> +                     const uint8_t *top)
> +{
> +    intra_predict_dc_32x32_msa(top, left, 1, dst, stride, 1, 1);
> +}
> +
> +void ff_dc_left_4x4_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> +                        const uint8_t *top)
> +{
> +    intra_predict_dc_4x4_msa(top, left, 1, dst, stride, 0, 1);
> +}
> +
> +void ff_dc_left_8x8_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> +                        const uint8_t *top)
> +{
> +    intra_predict_dc_8x8_msa(top, left, 1, dst, stride, 0, 1);
> +}
> +
> +void ff_dc_left_16x16_msa(uint8_t *dst, ptrdiff_t stride,
> +                          const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_dc_16x16_msa(top, left, 1, dst, stride, 0, 1);
> +}
> +
> +void ff_dc_left_32x32_msa(uint8_t *dst, ptrdiff_t stride,
> +                          const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_dc_32x32_msa(top, left, 1, dst, stride, 0, 1);
> +}
> +
> +void ff_dc_top_4x4_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> +                       const uint8_t *top)
> +{
> +    intra_predict_dc_4x4_msa(top, left, 1, dst, stride, 1, 0);
> +}
> +
> +void ff_dc_top_8x8_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> +                       const uint8_t *top)
> +{
> +    intra_predict_dc_8x8_msa(top, left, 1, dst, stride, 1, 0);
> +}
> +
> +void ff_dc_top_16x16_msa(uint8_t *dst, ptrdiff_t stride,
> +                         const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_dc_16x16_msa(top, left, 1, dst, stride, 1, 0);
> +}
> +
> +void ff_dc_top_32x32_msa(uint8_t *dst, ptrdiff_t stride,
> +                         const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_dc_32x32_msa(top, left, 1, dst, stride, 1, 0);
> +}
> +
> +void ff_dc_128_4x4_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> +                       const uint8_t *top)
> +{
> +    intra_predict_dc_4x4_msa(top, left, 1, dst, stride, 0, 0);
> +}
> +
> +void ff_dc_128_8x8_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> +                       const uint8_t *top)
> +{
> +    intra_predict_dc_8x8_msa(top, left, 1, dst, stride, 0, 0);
> +}
> +
> +void ff_dc_128_16x16_msa(uint8_t *dst, ptrdiff_t stride,
> +                         const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_dc_16x16_msa(top, left, 1, dst, stride, 0, 0);
> +}
> +
> +void ff_dc_128_32x32_msa(uint8_t *dst, ptrdiff_t stride,
> +                         const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_dc_32x32_msa(top, left, 1, dst, stride, 0, 0);
> +}
> +
> +void ff_dc_127_4x4_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> +                       const uint8_t *top)
> +{
> +    intra_predict_127dc_4x4_msa(dst, stride);
> +}
> +
> +void ff_dc_127_8x8_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> +                       const uint8_t *top)
> +{
> +    intra_predict_127dc_8x8_msa(dst, stride);
> +}
> +
> +void ff_dc_127_16x16_msa(uint8_t *dst, ptrdiff_t stride,
> +                         const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_127dc_16x16_msa(dst, stride);
> +}
> +
> +void ff_dc_127_32x32_msa(uint8_t *dst, ptrdiff_t stride,
> +                         const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_127dc_32x32_msa(dst, stride);
> +}
> +
> +void ff_dc_129_4x4_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> +                       const uint8_t *top)
> +{
> +    intra_predict_129dc_4x4_msa(dst, stride);
> +}
> +
> +void ff_dc_129_8x8_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> +                       const uint8_t *top)
> +{
> +    intra_predict_129dc_8x8_msa(dst, stride);
> +}
> +
> +void ff_dc_129_16x16_msa(uint8_t *dst, ptrdiff_t stride,
> +                         const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_129dc_16x16_msa(dst, stride);
> +}
> +
> +void ff_dc_129_32x32_msa(uint8_t *dst, ptrdiff_t stride,
> +                         const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_129dc_32x32_msa(dst, stride);
> +}
> +
> +void ff_tm_4x4_msa(uint8_t *dst, ptrdiff_t stride,
> +                   const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_tm_4x4_msa(top, left + 3, -1, dst, stride);
> +}
> +
> +void ff_tm_8x8_msa(uint8_t *dst, ptrdiff_t stride,
> +                   const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_tm_8x8_msa(top, left + 7, -1, dst, stride);
> +}
> +
> +void ff_tm_16x16_msa(uint8_t *dst, ptrdiff_t stride,
> +                     const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_tm_16x16_msa(top, left + 15, -1, dst, stride);
> +}
> +
> +void ff_tm_32x32_msa(uint8_t *dst, ptrdiff_t stride,
> +                     const uint8_t *left, const uint8_t *top)
> +{
> +    intra_predict_tm_32x32_msa(top, left + 31, -1, dst, stride);
> +}


Why do you need these wrappers?

Ronald


More information about the ffmpeg-devel mailing list