[FFmpeg-devel] [PATCH 2/2] avcodec/mips: MSA (MIPS-SIMD-Arch) optimizations for HEVC horizontal and vertical mc functions

shivraj.patil at imgtec.com shivraj.patil at imgtec.com
Fri Apr 17 15:12:28 CEST 2015


From: Shivraj Patil <shivraj.patil at imgtec.com>

Signed-off-by: Shivraj Patil <shivraj.patil at imgtec.com>
---
 libavcodec/hevcdsp.c                |    2 +
 libavcodec/hevcdsp.h                |    1 +
 libavcodec/mips/Makefile            |    2 +
 libavcodec/mips/hevcdsp_init_mips.c |   54 ++
 libavcodec/mips/hevcdsp_mips.h      |   49 ++
 libavcodec/mips/hevcdsp_msa.c       | 1259 +++++++++++++++++++++++++++++++++++
 libavutil/mips/generic_macros_msa.h |  285 ++++++++
 7 files changed, 1652 insertions(+)
 create mode 100644 libavcodec/mips/hevcdsp_init_mips.c
 create mode 100644 libavcodec/mips/hevcdsp_mips.h
 create mode 100644 libavcodec/mips/hevcdsp_msa.c
 create mode 100644 libavutil/mips/generic_macros_msa.h

diff --git a/libavcodec/hevcdsp.c b/libavcodec/hevcdsp.c
index 04af178..be01e92 100644
--- a/libavcodec/hevcdsp.c
+++ b/libavcodec/hevcdsp.c
@@ -261,4 +261,6 @@ int i = 0;
         ff_hevc_dsp_init_x86(hevcdsp, bit_depth);
     if (ARCH_ARM)
         ff_hevcdsp_init_arm(hevcdsp, bit_depth);
+    if (ARCH_MIPS)
+        ff_hevc_dsp_init_mips(hevcdsp, bit_depth);
 }
diff --git a/libavcodec/hevcdsp.h b/libavcodec/hevcdsp.h
index a891ea7..d2ea867 100644
--- a/libavcodec/hevcdsp.h
+++ b/libavcodec/hevcdsp.h
@@ -129,4 +129,5 @@ extern const int8_t ff_hevc_qpel_filters[3][16];
 
 void ff_hevc_dsp_init_x86(HEVCDSPContext *c, const int bit_depth);
 void ff_hevcdsp_init_arm(HEVCDSPContext *c, const int bit_depth);
+void ff_hevc_dsp_init_mips(HEVCDSPContext *c, const int bit_depth);
 #endif /* AVCODEC_HEVCDSP_H */
diff --git a/libavcodec/mips/Makefile b/libavcodec/mips/Makefile
index 8e2459f..b41d8c7 100644
--- a/libavcodec/mips/Makefile
+++ b/libavcodec/mips/Makefile
@@ -18,3 +18,5 @@ OBJS-$(CONFIG_AAC_DECODER)                += mips/aacdec_mips.o            \
                                              mips/aacpsdsp_mips.o
 MIPSDSPR1-OBJS-$(CONFIG_AAC_ENCODER)      += mips/aaccoder_mips.o
 MIPSFPU-OBJS-$(CONFIG_AAC_ENCODER)        += mips/iirfilter_mips.o
+OBJS-$(CONFIG_HEVC_DECODER)               += mips/hevcdsp_init_mips.o
+MSA-OBJS-$(CONFIG_HEVC_DECODER)           += mips/hevcdsp_msa.o
diff --git a/libavcodec/mips/hevcdsp_init_mips.c b/libavcodec/mips/hevcdsp_init_mips.c
new file mode 100644
index 0000000..05ed81f
--- /dev/null
+++ b/libavcodec/mips/hevcdsp_init_mips.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale at imgtec.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavcodec/mips/hevcdsp_mips.h"
+
+#if HAVE_MSA
+static av_cold void hevc_dsp_init_msa(HEVCDSPContext *c,
+                                      const int bit_depth)
+{
+    if (8 == bit_depth) {
+        c->put_hevc_qpel[1][0][1] = ff_hevc_put_hevc_qpel_h4_8_msa;
+        c->put_hevc_qpel[3][0][1] = ff_hevc_put_hevc_qpel_h8_8_msa;
+        c->put_hevc_qpel[4][0][1] = ff_hevc_put_hevc_qpel_h12_8_msa;
+        c->put_hevc_qpel[5][0][1] = ff_hevc_put_hevc_qpel_h16_8_msa;
+        c->put_hevc_qpel[6][0][1] = ff_hevc_put_hevc_qpel_h24_8_msa;
+        c->put_hevc_qpel[7][0][1] = ff_hevc_put_hevc_qpel_h32_8_msa;
+        c->put_hevc_qpel[8][0][1] = ff_hevc_put_hevc_qpel_h48_8_msa;
+        c->put_hevc_qpel[9][0][1] = ff_hevc_put_hevc_qpel_h64_8_msa;
+
+        c->put_hevc_qpel[1][1][0] = ff_hevc_put_hevc_qpel_v4_8_msa;
+        c->put_hevc_qpel[3][1][0] = ff_hevc_put_hevc_qpel_v8_8_msa;
+        c->put_hevc_qpel[4][1][0] = ff_hevc_put_hevc_qpel_v12_8_msa;
+        c->put_hevc_qpel[5][1][0] = ff_hevc_put_hevc_qpel_v16_8_msa;
+        c->put_hevc_qpel[6][1][0] = ff_hevc_put_hevc_qpel_v24_8_msa;
+        c->put_hevc_qpel[7][1][0] = ff_hevc_put_hevc_qpel_v32_8_msa;
+        c->put_hevc_qpel[8][1][0] = ff_hevc_put_hevc_qpel_v48_8_msa;
+        c->put_hevc_qpel[9][1][0] = ff_hevc_put_hevc_qpel_v64_8_msa;
+    }
+}
+#endif  // #if HAVE_MSA
+
+void ff_hevc_dsp_init_mips(HEVCDSPContext *c, const int bit_depth)
+{
+#if HAVE_MSA
+    hevc_dsp_init_msa(c, bit_depth);
+#endif  // #if HAVE_MSA
+}
diff --git a/libavcodec/mips/hevcdsp_mips.h b/libavcodec/mips/hevcdsp_mips.h
new file mode 100644
index 0000000..13cdb5b
--- /dev/null
+++ b/libavcodec/mips/hevcdsp_mips.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale at imgtec.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavcodec/hevcdsp.h"
+
+#define MC(PEL, DIR, WIDTH)                                                 \
+void ff_hevc_put_hevc_##PEL##_##DIR####WIDTH##_8_msa(int16_t *dst,          \
+                                                     uint8_t *src,          \
+                                                     ptrdiff_t src_stride,  \
+                                                     int height,            \
+                                                     intptr_t mx,           \
+                                                     intptr_t my,           \
+                                                     int width)
+
+MC(qpel, h, 4);
+MC(qpel, h, 8);
+MC(qpel, h, 12);
+MC(qpel, h, 16);
+MC(qpel, h, 24);
+MC(qpel, h, 32);
+MC(qpel, h, 48);
+MC(qpel, h, 64);
+
+MC(qpel, v, 4);
+MC(qpel, v, 8);
+MC(qpel, v, 12);
+MC(qpel, v, 16);
+MC(qpel, v, 24);
+MC(qpel, v, 32);
+MC(qpel, v, 48);
+MC(qpel, v, 64);
+#undef MC
diff --git a/libavcodec/mips/hevcdsp_msa.c b/libavcodec/mips/hevcdsp_msa.c
new file mode 100644
index 0000000..88e97d6
--- /dev/null
+++ b/libavcodec/mips/hevcdsp_msa.c
@@ -0,0 +1,1259 @@
+/*
+ * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale at imgtec.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/mips/generic_macros_msa.h"
+#include "libavcodec/mips/hevcdsp_mips.h"
+
+#define HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,                         \
+                               filt0, filt1, filt2, filt3,                     \
+                               var_in)                                         \
+( {                                                                            \
+    v8i16 out;                                                                 \
+                                                                               \
+    out = __msa_dpadd_s_h((v8i16) (var_in), (v16i8) (vec0), (v16i8) (filt0));  \
+    out = __msa_dpadd_s_h(out, (v16i8) (vec1), (v16i8) (filt1));               \
+    out = __msa_dpadd_s_h(out, (v16i8) (vec2), (v16i8) (filt2));               \
+    out = __msa_dpadd_s_h(out, (v16i8) (vec3), (v16i8) (filt3));               \
+    out;                                                                       \
+} )
+
+static void hevc_hz_8t_4w_msa(uint8_t * __restrict src, int32_t src_stride,
+                              int16_t * __restrict dst, int32_t dst_stride,
+                              const int8_t * __restrict filter, int32_t height)
+{
+    uint32_t loop_cnt;
+    uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
+    v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
+    v8i16 filt0, filt1, filt2, filt3;
+    v16i8 mask1, mask2, mask3;
+    v8u16 const_vec;
+    v16i8 vec0, vec1, vec2, vec3;
+    v8i16 dst0, dst1, dst2, dst3;
+    v8i16 filter_vec;
+    v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20 };
+
+    src -= 3;
+
+    const_vec = (v8u16) __msa_ldi_h(128);
+    const_vec <<= 6;
+
+    filter_vec = LOAD_SH(filter);
+    filt0 = __msa_splati_h(filter_vec, 0);
+    filt1 = __msa_splati_h(filter_vec, 1);
+    filt2 = __msa_splati_h(filter_vec, 2);
+    filt3 = __msa_splati_h(filter_vec, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+
+    for (loop_cnt = (height >> 3); loop_cnt--;) {
+        LOAD_8VECS_SB(src, src_stride,
+                      src0, src1, src2, src3, src4, src5, src6, src7);
+        src += (8 * src_stride);
+
+        XORI_B_8VECS_SB(src0, src1, src2, src3, src4, src5, src6, src7,
+                        src0, src1, src2, src3, src4, src5, src6, src7, 128);
+
+        vec0 = __msa_vshf_b(mask0, src1, src0);
+        vec1 = __msa_vshf_b(mask1, src1, src0);
+        vec2 = __msa_vshf_b(mask2, src1, src0);
+        vec3 = __msa_vshf_b(mask3, src1, src0);
+
+        dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src3, src2);
+        vec1 = __msa_vshf_b(mask1, src3, src2);
+        vec2 = __msa_vshf_b(mask2, src3, src2);
+        vec3 = __msa_vshf_b(mask3, src3, src2);
+
+        dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src5, src4);
+        vec1 = __msa_vshf_b(mask1, src5, src4);
+        vec2 = __msa_vshf_b(mask2, src5, src4);
+        vec3 = __msa_vshf_b(mask3, src5, src4);
+
+        dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src7, src6);
+        vec1 = __msa_vshf_b(mask1, src7, src6);
+        vec2 = __msa_vshf_b(mask2, src7, src6);
+        vec3 = __msa_vshf_b(mask3, src7, src6);
+
+        dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        out0 = __msa_copy_u_d((v2i64) dst0, 0);
+        out1 = __msa_copy_u_d((v2i64) dst0, 1);
+        out2 = __msa_copy_u_d((v2i64) dst1, 0);
+        out3 = __msa_copy_u_d((v2i64) dst1, 1);
+        out4 = __msa_copy_u_d((v2i64) dst2, 0);
+        out5 = __msa_copy_u_d((v2i64) dst2, 1);
+        out6 = __msa_copy_u_d((v2i64) dst3, 0);
+        out7 = __msa_copy_u_d((v2i64) dst3, 1);
+
+        STORE_DWORD(dst, out0);
+        dst += dst_stride;
+        STORE_DWORD(dst, out1);
+        dst += dst_stride;
+        STORE_DWORD(dst, out2);
+        dst += dst_stride;
+        STORE_DWORD(dst, out3);
+        dst += dst_stride;
+        STORE_DWORD(dst, out4);
+        dst += dst_stride;
+        STORE_DWORD(dst, out5);
+        dst += dst_stride;
+        STORE_DWORD(dst, out6);
+        dst += dst_stride;
+        STORE_DWORD(dst, out7);
+        dst += dst_stride;
+    }
+}
+
+static void hevc_hz_8t_8w_msa(uint8_t * __restrict src, int32_t src_stride,
+                              int16_t * __restrict dst, int32_t dst_stride,
+                              const int8_t * __restrict filter, int32_t height)
+{
+    uint32_t loop_cnt;
+    v16i8 src0, src1, src2, src3;
+    v8i16 filt0, filt1, filt2, filt3;
+    v16i8 mask1, mask2, mask3;
+    v8u16 const_vec;
+    v16i8 vec0, vec1, vec2, vec3;
+    v8i16 dst0, dst1, dst2, dst3;
+    v8i16 filter_vec;
+    v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+
+    src -= 3;
+
+    const_vec = (v8u16) __msa_ldi_h(128);
+    const_vec <<= 6;
+
+    filter_vec = LOAD_SH(filter);
+    filt0 = __msa_splati_h(filter_vec, 0);
+    filt1 = __msa_splati_h(filter_vec, 1);
+    filt2 = __msa_splati_h(filter_vec, 2);
+    filt3 = __msa_splati_h(filter_vec, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+
+    for (loop_cnt = (height >> 2); loop_cnt--;) {
+        LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+        src += (4 * src_stride);
+
+        XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+        vec0 = __msa_vshf_b(mask0, src0, src0);
+        vec1 = __msa_vshf_b(mask1, src0, src0);
+        vec2 = __msa_vshf_b(mask2, src0, src0);
+        vec3 = __msa_vshf_b(mask3, src0, src0);
+
+        dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src1, src1);
+        vec1 = __msa_vshf_b(mask1, src1, src1);
+        vec2 = __msa_vshf_b(mask2, src1, src1);
+        vec3 = __msa_vshf_b(mask3, src1, src1);
+
+        dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src2, src2);
+        vec1 = __msa_vshf_b(mask1, src2, src2);
+        vec2 = __msa_vshf_b(mask2, src2, src2);
+        vec3 = __msa_vshf_b(mask3, src2, src2);
+
+        dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src3, src3);
+        vec1 = __msa_vshf_b(mask1, src3, src3);
+        vec2 = __msa_vshf_b(mask2, src3, src3);
+        vec3 = __msa_vshf_b(mask3, src3, src3);
+
+        dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst0, dst);
+        dst += dst_stride;
+        STORE_SH(dst1, dst);
+        dst += dst_stride;
+        STORE_SH(dst2, dst);
+        dst += dst_stride;
+        STORE_SH(dst3, dst);
+        dst += dst_stride;
+    }
+}
+
+static void hevc_hz_8t_12w_msa(uint8_t * __restrict src, int32_t src_stride,
+                               int16_t * __restrict dst, int32_t dst_stride,
+                               const int8_t * __restrict filter, int32_t height)
+{
+    hevc_hz_8t_8w_msa(src, src_stride, dst, dst_stride, filter, height);
+
+    hevc_hz_8t_4w_msa(src + 8, src_stride, dst + 8, dst_stride, filter, height);
+}
+
+static void hevc_hz_8t_16w_msa(uint8_t * __restrict src, int32_t src_stride,
+                               int16_t * __restrict dst, int32_t dst_stride,
+                               const int8_t * __restrict filter, int32_t height)
+{
+    uint32_t loop_cnt;
+    v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
+    v8i16 filt0, filt1, filt2, filt3;
+    v16i8 mask1, mask2, mask3;
+    v8u16 const_vec;
+    v16i8 vec0, vec1, vec2, vec3;
+    v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+    v8i16 filter_vec;
+    v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+
+    src -= 3;
+
+    const_vec = (v8u16) __msa_ldi_h(128);
+    const_vec <<= 6;
+
+    filter_vec = LOAD_SH(filter);
+    filt0 = __msa_splati_h(filter_vec, 0);
+    filt1 = __msa_splati_h(filter_vec, 1);
+    filt2 = __msa_splati_h(filter_vec, 2);
+    filt3 = __msa_splati_h(filter_vec, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+
+    for (loop_cnt = (height >> 2); loop_cnt--;) {
+        LOAD_4VECS_SB(src, src_stride, src0, src2, src4, src6);
+        LOAD_4VECS_SB(src + 8, src_stride, src1, src3, src5, src7);
+        src += (4 * src_stride);
+
+        XORI_B_8VECS_SB(src0, src1, src2, src3, src4, src5, src6, src7,
+                        src0, src1, src2, src3, src4, src5, src6, src7, 128);
+
+        vec0 = __msa_vshf_b(mask0, src0, src0);
+        vec1 = __msa_vshf_b(mask1, src0, src0);
+        vec2 = __msa_vshf_b(mask2, src0, src0);
+        vec3 = __msa_vshf_b(mask3, src0, src0);
+
+        dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src1, src1);
+        vec1 = __msa_vshf_b(mask1, src1, src1);
+        vec2 = __msa_vshf_b(mask2, src1, src1);
+        vec3 = __msa_vshf_b(mask3, src1, src1);
+
+        dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src2, src2);
+        vec1 = __msa_vshf_b(mask1, src2, src2);
+        vec2 = __msa_vshf_b(mask2, src2, src2);
+        vec3 = __msa_vshf_b(mask3, src2, src2);
+
+        dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src3, src3);
+        vec1 = __msa_vshf_b(mask1, src3, src3);
+        vec2 = __msa_vshf_b(mask2, src3, src3);
+        vec3 = __msa_vshf_b(mask3, src3, src3);
+
+        dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src4, src4);
+        vec1 = __msa_vshf_b(mask1, src4, src4);
+        vec2 = __msa_vshf_b(mask2, src4, src4);
+        vec3 = __msa_vshf_b(mask3, src4, src4);
+
+        dst4 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src5, src5);
+        vec1 = __msa_vshf_b(mask1, src5, src5);
+        vec2 = __msa_vshf_b(mask2, src5, src5);
+        vec3 = __msa_vshf_b(mask3, src5, src5);
+
+        dst5 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src6, src6);
+        vec1 = __msa_vshf_b(mask1, src6, src6);
+        vec2 = __msa_vshf_b(mask2, src6, src6);
+        vec3 = __msa_vshf_b(mask3, src6, src6);
+
+        dst6 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src7, src7);
+        vec1 = __msa_vshf_b(mask1, src7, src7);
+        vec2 = __msa_vshf_b(mask2, src7, src7);
+        vec3 = __msa_vshf_b(mask3, src7, src7);
+
+        dst7 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst0, dst);
+        STORE_SH(dst1, dst + 8);
+        dst += dst_stride;
+        STORE_SH(dst2, dst);
+        STORE_SH(dst3, dst + 8);
+        dst += dst_stride;
+        STORE_SH(dst4, dst);
+        STORE_SH(dst5, dst + 8);
+        dst += dst_stride;
+        STORE_SH(dst6, dst);
+        STORE_SH(dst7, dst + 8);
+        dst += dst_stride;
+    }
+}
+
+static void hevc_hz_8t_24w_msa(uint8_t * __restrict src, int32_t src_stride,
+                               int16_t * __restrict dst, int32_t dst_stride,
+                               const int8_t * __restrict filter, int32_t height)
+{
+    uint32_t loop_cnt;
+    v16i8 src0, src1, src2, src3;
+    v8i16 filt0, filt1, filt2, filt3;
+    v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
+    v16i8 vec0, vec1, vec2, vec3;
+    v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
+    v8i16 filter_vec;
+    v8u16 const_vec;
+    v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+
+    src -= 3;
+
+    filter_vec = LOAD_SH(filter);
+    filt0 = __msa_splati_h(filter_vec, 0);
+    filt1 = __msa_splati_h(filter_vec, 1);
+    filt2 = __msa_splati_h(filter_vec, 2);
+    filt3 = __msa_splati_h(filter_vec, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+    mask4 = mask0 + 8;
+    mask5 = mask0 + 10;
+    mask6 = mask0 + 12;
+    mask7 = mask0 + 14;
+
+    const_vec = (v8u16) __msa_ldi_h(128);
+    const_vec <<= 6;
+
+    for (loop_cnt = (height >> 1); loop_cnt--;) {
+        src0 = LOAD_SB(src);
+        src1 = LOAD_SB(src + 16);
+        src += src_stride;
+        src2 = LOAD_SB(src);
+        src3 = LOAD_SB(src + 16);
+        src += src_stride;
+
+        XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+        vec0 = __msa_vshf_b(mask0, src0, src0);
+        vec1 = __msa_vshf_b(mask1, src0, src0);
+        vec2 = __msa_vshf_b(mask2, src0, src0);
+        vec3 = __msa_vshf_b(mask3, src0, src0);
+
+        dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask4, src1, src0);
+        vec1 = __msa_vshf_b(mask5, src1, src0);
+        vec2 = __msa_vshf_b(mask6, src1, src0);
+        vec3 = __msa_vshf_b(mask7, src1, src0);
+
+        dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src1, src1);
+        vec1 = __msa_vshf_b(mask1, src1, src1);
+        vec2 = __msa_vshf_b(mask2, src1, src1);
+        vec3 = __msa_vshf_b(mask3, src1, src1);
+
+        dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src2, src2);
+        vec1 = __msa_vshf_b(mask1, src2, src2);
+        vec2 = __msa_vshf_b(mask2, src2, src2);
+        vec3 = __msa_vshf_b(mask3, src2, src2);
+
+        dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask4, src3, src2);
+        vec1 = __msa_vshf_b(mask5, src3, src2);
+        vec2 = __msa_vshf_b(mask6, src3, src2);
+        vec3 = __msa_vshf_b(mask7, src3, src2);
+
+        dst4 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src3, src3);
+        vec1 = __msa_vshf_b(mask1, src3, src3);
+        vec2 = __msa_vshf_b(mask2, src3, src3);
+        vec3 = __msa_vshf_b(mask3, src3, src3);
+
+        dst5 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst0, dst);
+        STORE_SH(dst1, dst + 8);
+        STORE_SH(dst2, dst + 16);
+        dst += dst_stride;
+        STORE_SH(dst3, dst);
+        STORE_SH(dst4, dst + 8);
+        STORE_SH(dst5, dst + 16);
+        dst += dst_stride;
+    }
+}
+
+static void hevc_hz_8t_32w_msa(uint8_t * __restrict src, int32_t src_stride,
+                               int16_t * __restrict dst, int32_t dst_stride,
+                               const int8_t * __restrict filter, int32_t height)
+{
+    uint32_t loop_cnt;
+    v16i8 src0, src1, src2;
+    v8i16 filt0, filt1, filt2, filt3;
+    v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
+    v16i8 vec0, vec1, vec2, vec3;
+    v8i16 dst0, dst1, dst2, dst3;
+    v8i16 filter_vec;
+    v8u16 const_vec;
+    v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+
+    src -= 3;
+
+    filter_vec = LOAD_SH(filter);
+    filt0 = __msa_splati_h(filter_vec, 0);
+    filt1 = __msa_splati_h(filter_vec, 1);
+    filt2 = __msa_splati_h(filter_vec, 2);
+    filt3 = __msa_splati_h(filter_vec, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+    mask4 = mask0 + 8;
+    mask5 = mask0 + 10;
+    mask6 = mask0 + 12;
+    mask7 = mask0 + 14;
+
+    const_vec = (v8u16) __msa_ldi_h(128);
+    const_vec <<= 6;
+
+    for (loop_cnt = height; loop_cnt--;) {
+        src0 = LOAD_SB(src);
+        src1 = LOAD_SB(src + 16);
+        src2 = LOAD_SB(src + 24);
+        src += src_stride;
+
+        XORI_B_3VECS_SB(src0, src1, src2, src0, src1, src2, 128);
+
+        vec0 = __msa_vshf_b(mask0, src0, src0);
+        vec1 = __msa_vshf_b(mask1, src0, src0);
+        vec2 = __msa_vshf_b(mask2, src0, src0);
+        vec3 = __msa_vshf_b(mask3, src0, src0);
+
+        dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask4, src1, src0);
+        vec1 = __msa_vshf_b(mask5, src1, src0);
+        vec2 = __msa_vshf_b(mask6, src1, src0);
+        vec3 = __msa_vshf_b(mask7, src1, src0);
+
+        dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src1, src1);
+        vec1 = __msa_vshf_b(mask1, src1, src1);
+        vec2 = __msa_vshf_b(mask2, src1, src1);
+        vec3 = __msa_vshf_b(mask3, src1, src1);
+
+        dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src2, src2);
+        vec1 = __msa_vshf_b(mask1, src2, src2);
+        vec2 = __msa_vshf_b(mask2, src2, src2);
+        vec3 = __msa_vshf_b(mask3, src2, src2);
+
+        dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst0, dst);
+        STORE_SH(dst1, dst + 8);
+        STORE_SH(dst2, dst + 16);
+        STORE_SH(dst3, dst + 24);
+        dst += dst_stride;
+    }
+}
+
+static void hevc_hz_8t_48w_msa(uint8_t * __restrict src, int32_t src_stride,
+                               int16_t * __restrict dst, int32_t dst_stride,
+                               const int8_t * __restrict filter, int32_t height)
+{
+    uint32_t loop_cnt;
+    v16i8 src0, src1, src2, src3;
+    v8i16 filt0, filt1, filt2, filt3;
+    v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
+    v16i8 vec0, vec1, vec2, vec3;
+    v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
+    v8i16 filter_vec;
+    v8u16 const_vec;
+    v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+
+    src -= 3;
+
+    filter_vec = LOAD_SH(filter);
+    filt0 = __msa_splati_h(filter_vec, 0);
+    filt1 = __msa_splati_h(filter_vec, 1);
+    filt2 = __msa_splati_h(filter_vec, 2);
+    filt3 = __msa_splati_h(filter_vec, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+    mask4 = mask0 + 8;
+    mask5 = mask0 + 10;
+    mask6 = mask0 + 12;
+    mask7 = mask0 + 14;
+
+    const_vec = (v8u16) __msa_ldi_h(128);
+    const_vec <<= 6;
+
+    for (loop_cnt = height; loop_cnt--;) {
+        src0 = LOAD_SB(src);
+        src1 = LOAD_SB(src + 16);
+        src2 = LOAD_SB(src + 32);
+        src3 = LOAD_SB(src + 40);
+        src += src_stride;
+
+        XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+        vec0 = __msa_vshf_b(mask0, src0, src0);
+        vec1 = __msa_vshf_b(mask1, src0, src0);
+        vec2 = __msa_vshf_b(mask2, src0, src0);
+        vec3 = __msa_vshf_b(mask3, src0, src0);
+
+        dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask4, src1, src0);
+        vec1 = __msa_vshf_b(mask5, src1, src0);
+        vec2 = __msa_vshf_b(mask6, src1, src0);
+        vec3 = __msa_vshf_b(mask7, src1, src0);
+
+        dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src1, src1);
+        vec1 = __msa_vshf_b(mask1, src1, src1);
+        vec2 = __msa_vshf_b(mask2, src1, src1);
+        vec3 = __msa_vshf_b(mask3, src1, src1);
+
+        dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask4, src2, src1);
+        vec1 = __msa_vshf_b(mask5, src2, src1);
+        vec2 = __msa_vshf_b(mask6, src2, src1);
+        vec3 = __msa_vshf_b(mask7, src2, src1);
+
+        dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src2, src2);
+        vec1 = __msa_vshf_b(mask1, src2, src2);
+        vec2 = __msa_vshf_b(mask2, src2, src2);
+        vec3 = __msa_vshf_b(mask3, src2, src2);
+
+        dst4 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        vec0 = __msa_vshf_b(mask0, src3, src3);
+        vec1 = __msa_vshf_b(mask1, src3, src3);
+        vec2 = __msa_vshf_b(mask2, src3, src3);
+        vec3 = __msa_vshf_b(mask3, src3, src3);
+
+        dst5 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst0, dst);
+        STORE_SH(dst1, dst + 8);
+        STORE_SH(dst2, dst + 16);
+        STORE_SH(dst3, dst + 24);
+        STORE_SH(dst4, dst + 32);
+        STORE_SH(dst5, dst + 40);
+        dst += dst_stride;
+    }
+}
+
+static void hevc_hz_8t_64w_msa(uint8_t * __restrict src, int32_t src_stride,
+                               int16_t * __restrict dst, int32_t dst_stride,
+                               const int8_t * __restrict filter, int32_t height)
+{
+    uint32_t loop_cnt;
+    v16i8 src0, src1, src2, src3, src4;
+    v8i16 filt0, filt1, filt2, filt3;
+    v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
+    v16i8 vec0, vec1, vec2, vec3;
+    v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+    v8i16 filter_vec;
+    v8u16 const_vec;
+    v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
+
+    src -= 3;
+
+    filter_vec = LOAD_SH(filter);
+    filt0 = __msa_splati_h(filter_vec, 0);
+    filt1 = __msa_splati_h(filter_vec, 1);
+    filt2 = __msa_splati_h(filter_vec, 2);
+    filt3 = __msa_splati_h(filter_vec, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+    mask4 = mask0 + 8;
+    mask5 = mask0 + 10;
+    mask6 = mask0 + 12;
+    mask7 = mask0 + 14;
+
+    const_vec = (v8u16) __msa_ldi_h(128);
+    const_vec <<= 6;
+
+    for (loop_cnt = height; loop_cnt--;) {
+        src0 = LOAD_SB(src);
+        src1 = LOAD_SB(src + 16);
+        src2 = LOAD_SB(src + 32);
+        src3 = LOAD_SB(src + 48);
+        src4 = LOAD_SB(src + 56);
+        src += src_stride;
+
+        XORI_B_5VECS_SB(src0, src1, src2, src3, src4,
+                        src0, src1, src2, src3, src4, 128);
+
+        vec0 = __msa_vshf_b(mask0, src0, src0);
+        vec1 = __msa_vshf_b(mask1, src0, src0);
+        vec2 = __msa_vshf_b(mask2, src0, src0);
+        vec3 = __msa_vshf_b(mask3, src0, src0);
+
+        dst0 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst0, dst);
+
+        vec0 = __msa_vshf_b(mask4, src1, src0);
+        vec1 = __msa_vshf_b(mask5, src1, src0);
+        vec2 = __msa_vshf_b(mask6, src1, src0);
+        vec3 = __msa_vshf_b(mask7, src1, src0);
+
+        dst1 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst1, dst + 8);
+
+        vec0 = __msa_vshf_b(mask0, src1, src1);
+        vec1 = __msa_vshf_b(mask1, src1, src1);
+        vec2 = __msa_vshf_b(mask2, src1, src1);
+        vec3 = __msa_vshf_b(mask3, src1, src1);
+
+        dst2 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst2, dst + 16);
+
+        vec0 = __msa_vshf_b(mask4, src2, src1);
+        vec1 = __msa_vshf_b(mask5, src2, src1);
+        vec2 = __msa_vshf_b(mask6, src2, src1);
+        vec3 = __msa_vshf_b(mask7, src2, src1);
+
+        dst3 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst3, dst + 24);
+
+        vec0 = __msa_vshf_b(mask0, src2, src2);
+        vec1 = __msa_vshf_b(mask1, src2, src2);
+        vec2 = __msa_vshf_b(mask2, src2, src2);
+        vec3 = __msa_vshf_b(mask3, src2, src2);
+
+        dst4 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst4, dst + 32);
+
+        vec0 = __msa_vshf_b(mask4, src3, src2);
+        vec1 = __msa_vshf_b(mask5, src3, src2);
+        vec2 = __msa_vshf_b(mask6, src3, src2);
+        vec3 = __msa_vshf_b(mask7, src3, src2);
+
+        dst5 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst5, dst + 40);
+
+        vec0 = __msa_vshf_b(mask0, src3, src3);
+        vec1 = __msa_vshf_b(mask1, src3, src3);
+        vec2 = __msa_vshf_b(mask2, src3, src3);
+        vec3 = __msa_vshf_b(mask3, src3, src3);
+
+        dst6 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst6, dst + 48);
+
+        vec0 = __msa_vshf_b(mask0, src4, src4);
+        vec1 = __msa_vshf_b(mask1, src4, src4);
+        vec2 = __msa_vshf_b(mask2, src4, src4);
+        vec3 = __msa_vshf_b(mask3, src4, src4);
+
+        dst7 = HEVC_FILT_8TAP_DPADD_H(vec0, vec1, vec2, vec3,
+                                      filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst7, dst + 56);
+
+        dst += dst_stride;
+    }
+}
+
+static void hevc_vt_8t_4w_msa(uint8_t * __restrict src, int32_t src_stride,
+                              int16_t * __restrict dst, int32_t dst_stride,
+                              const int8_t * __restrict filter, int32_t height)
+{
+    int32_t loop_cnt;
+    uint64_t out0, out1, out2, out3;
+    v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+    v16i8 src9, src10, src11, src12, src13, src14;
+    v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
+    v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
+    v16i8 src1110_r, src1211_r, src1312_r, src1413_r;
+    v16i8 src2110, src4332, src6554, src8776, src10998;
+    v16i8 src12111110, src14131312;
+    v8i16 dst10, dst32, dst54, dst76;
+    v8i16 filter_vec;
+    v8i16 filt0, filt1, filt2, filt3;
+    v8u16 const_vec;
+
+    src -= (3 * src_stride);
+
+    const_vec = (v8u16) __msa_ldi_h(128);
+    const_vec <<= 6;
+
+    filter_vec = LOAD_SH(filter);
+    filt0 = __msa_splati_h(filter_vec, 0);
+    filt1 = __msa_splati_h(filter_vec, 1);
+    filt2 = __msa_splati_h(filter_vec, 2);
+    filt3 = __msa_splati_h(filter_vec, 3);
+
+    LOAD_7VECS_SB(src, src_stride,
+                  src0, src1, src2, src3, src4, src5, src6);
+    src += (7 * src_stride);
+
+    ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
+                    src1, src3, src5, src2, src4, src6,
+                    src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
+
+    ILVR_D_3VECS_SB(src2110, src21_r, src10_r, src4332, src43_r, src32_r,
+                    src6554, src65_r, src54_r);
+
+    XORI_B_3VECS_SB(src2110, src4332, src6554, src2110, src4332, src6554, 128);
+
+    for (loop_cnt = (height >> 3); loop_cnt--;) {
+        LOAD_8VECS_SB(src, src_stride,
+                      src7, src8, src9, src10, src11, src12, src13, src14);
+        src += (8 * src_stride);
+
+        ILVR_B_8VECS_SB(src6, src7, src8, src9, src10, src11, src12, src13,
+                        src7, src8, src9, src10, src11, src12, src13, src14,
+                        src76_r, src87_r, src98_r, src109_r,
+                        src1110_r, src1211_r, src1312_r, src1413_r);
+
+        ILVR_D_4VECS_SB(src8776, src87_r, src76_r, src10998, src109_r, src98_r,
+                        src12111110, src1211_r, src1110_r,
+                        src14131312, src1413_r, src1312_r);
+
+        XORI_B_4VECS_SB(src8776, src10998, src12111110, src14131312,
+                        src8776, src10998, src12111110, src14131312, 128);
+
+        dst10 = HEVC_FILT_8TAP_DPADD_H(src2110, src4332, src6554, src8776,
+                                       filt0, filt1, filt2, filt3, const_vec);
+
+        dst32 = HEVC_FILT_8TAP_DPADD_H(src4332, src6554, src8776, src10998,
+                                       filt0, filt1, filt2, filt3, const_vec);
+
+        dst54 = HEVC_FILT_8TAP_DPADD_H(src6554, src8776, src10998, src12111110,
+                                       filt0, filt1, filt2, filt3, const_vec);
+
+        dst76 = HEVC_FILT_8TAP_DPADD_H(src8776, src10998,
+                                       src12111110, src14131312,
+                                       filt0, filt1, filt2, filt3, const_vec);
+
+        out0 = __msa_copy_u_d((v2i64) dst10, 0);
+        out1 = __msa_copy_u_d((v2i64) dst10, 1);
+        out2 = __msa_copy_u_d((v2i64) dst32, 0);
+        out3 = __msa_copy_u_d((v2i64) dst32, 1);
+
+        STORE_DWORD(dst, out0);
+        dst += dst_stride;
+        STORE_DWORD(dst, out1);
+        dst += dst_stride;
+        STORE_DWORD(dst, out2);
+        dst += dst_stride;
+        STORE_DWORD(dst, out3);
+        dst += dst_stride;
+
+        out0 = __msa_copy_u_d((v2i64) dst54, 0);
+        out1 = __msa_copy_u_d((v2i64) dst54, 1);
+        out2 = __msa_copy_u_d((v2i64) dst76, 0);
+        out3 = __msa_copy_u_d((v2i64) dst76, 1);
+
+        STORE_DWORD(dst, out0);
+        dst += dst_stride;
+        STORE_DWORD(dst, out1);
+        dst += dst_stride;
+        STORE_DWORD(dst, out2);
+        dst += dst_stride;
+        STORE_DWORD(dst, out3);
+        dst += dst_stride;
+
+        src2110 = src10998;
+        src4332 = src12111110;
+        src6554 = src14131312;
+
+        src6 = src14;
+    }
+}
+
+static void hevc_vt_8t_8w_msa(uint8_t * __restrict src, int32_t src_stride,
+                              int16_t * __restrict dst, int32_t dst_stride,
+                              const int8_t * __restrict filter, int32_t height)
+{
+    int32_t loop_cnt;
+    v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+    v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
+    v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
+    v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
+    v8i16 filter_vec;
+    v8i16 filt0, filt1, filt2, filt3;
+    v8u16 const_vec;
+
+    src -= (3 * src_stride);
+
+    const_vec = (v8u16) __msa_ldi_h(128);
+    const_vec <<= 6;
+
+    filter_vec = LOAD_SH(filter);
+    filt0 = __msa_splati_h(filter_vec, 0);
+    filt1 = __msa_splati_h(filter_vec, 1);
+    filt2 = __msa_splati_h(filter_vec, 2);
+    filt3 = __msa_splati_h(filter_vec, 3);
+
+    LOAD_7VECS_SB(src, src_stride,
+                  src0, src1, src2, src3, src4, src5, src6);
+    src += (7 * src_stride);
+
+    XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
+                    src0, src1, src2, src3, src4, src5, src6, 128);
+
+    ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
+                    src1, src3, src5, src2, src4, src6,
+                    src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
+
+    for (loop_cnt = (height >> 2); loop_cnt--;) {
+        LOAD_4VECS_SB(src, src_stride, src7, src8, src9, src10);
+        src += (4 * src_stride);
+
+        XORI_B_4VECS_SB(src7, src8, src9, src10, src7, src8, src9, src10, 128);
+
+        ILVR_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
+                        src76_r, src87_r, src98_r, src109_r);
+
+        dst0_r = HEVC_FILT_8TAP_DPADD_H(src10_r, src32_r, src54_r, src76_r,
+                                        filt0, filt1, filt2, filt3, const_vec);
+
+        dst1_r = HEVC_FILT_8TAP_DPADD_H(src21_r, src43_r, src65_r, src87_r,
+                                        filt0, filt1, filt2, filt3, const_vec);
+
+        dst2_r = HEVC_FILT_8TAP_DPADD_H(src32_r, src54_r, src76_r, src98_r,
+                                        filt0, filt1, filt2, filt3, const_vec);
+
+        dst3_r = HEVC_FILT_8TAP_DPADD_H(src43_r, src65_r, src87_r, src109_r,
+                                        filt0, filt1, filt2, filt3, const_vec);
+
+        STORE_SH(dst0_r, dst);
+        dst += dst_stride;
+        STORE_SH(dst1_r, dst);
+        dst += dst_stride;
+        STORE_SH(dst2_r, dst);
+        dst += dst_stride;
+        STORE_SH(dst3_r, dst);
+        dst += dst_stride;
+
+        src10_r = src54_r;
+        src32_r = src76_r;
+        src54_r = src98_r;
+
+        src21_r = src65_r;
+        src43_r = src87_r;
+        src65_r = src109_r;
+
+        src6 = src10;
+    }
+}
+
+static void hevc_vt_8t_12w_msa(uint8_t * __restrict src, int32_t src_stride,
+                               int16_t * __restrict dst, int32_t dst_stride,
+                               const int8_t * __restrict filter, int32_t height)
+{
+    int32_t loop_cnt;
+    uint64_t out0, out1, out2, out3;
+    v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+    v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
+    v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
+    v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
+    v16i8 src10_l, src32_l, src54_l, src76_l, src98_l;
+    v16i8 src21_l, src43_l, src65_l, src87_l, src109_l;
+    v16i8 src2110, src4332, src6554, src8776, src10998;
+    v8i16 dst0_l, dst1_l;
+    v8i16 filter_vec;
+    v8i16 filt0, filt1, filt2, filt3;
+    v8u16 const_vec;
+
+    src -= (3 * src_stride);
+
+    const_vec = (v8u16) __msa_ldi_h(128);
+    const_vec <<= 6;
+
+    filter_vec = LOAD_SH(filter);
+    filt0 = __msa_splati_h(filter_vec, 0);
+    filt1 = __msa_splati_h(filter_vec, 1);
+    filt2 = __msa_splati_h(filter_vec, 2);
+    filt3 = __msa_splati_h(filter_vec, 3);
+
+    LOAD_7VECS_SB(src, src_stride,
+                  src0, src1, src2, src3, src4, src5, src6);
+    src += (7 * src_stride);
+
+    XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
+                    src0, src1, src2, src3, src4, src5, src6, 128);
+
+    ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
+                    src1, src3, src5, src2, src4, src6,
+                    src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
+
+    ILVL_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
+                    src1, src3, src5, src2, src4, src6,
+                    src10_l, src32_l, src54_l, src21_l, src43_l, src65_l);
+
+    ILVR_D_3VECS_SB(src2110, src21_l, src10_l, src4332, src43_l, src32_l,
+                    src6554, src65_l, src54_l);
+
+    for (loop_cnt = (height >> 2); loop_cnt--;) {
+        LOAD_4VECS_SB(src, src_stride, src7, src8, src9, src10);
+        src += (4 * src_stride);
+
+        XORI_B_4VECS_SB(src7, src8, src9, src10, src7, src8, src9, src10, 128);
+
+        ILVR_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
+                        src76_r, src87_r, src98_r, src109_r);
+
+        ILVL_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
+                        src76_l, src87_l, src98_l, src109_l);
+
+        ILVR_D_2VECS_SB(src8776, src87_l, src76_l, src10998, src109_l, src98_l);
+
+        dst0_r = HEVC_FILT_8TAP_DPADD_H(src10_r, src32_r, src54_r, src76_r,
+                                        filt0, filt1, filt2, filt3, const_vec);
+
+        dst1_r = HEVC_FILT_8TAP_DPADD_H(src21_r, src43_r, src65_r, src87_r,
+                                        filt0, filt1, filt2, filt3, const_vec);
+
+        dst2_r = HEVC_FILT_8TAP_DPADD_H(src32_r, src54_r, src76_r, src98_r,
+                                        filt0, filt1, filt2, filt3, const_vec);
+
+        dst3_r = HEVC_FILT_8TAP_DPADD_H(src43_r, src65_r, src87_r, src109_r,
+                                        filt0, filt1, filt2, filt3, const_vec);
+
+        dst0_l = HEVC_FILT_8TAP_DPADD_H(src2110, src4332, src6554, src8776,
+                                        filt0, filt1, filt2, filt3, const_vec);
+
+        dst1_l = HEVC_FILT_8TAP_DPADD_H(src4332, src6554, src8776, src10998,
+                                        filt0, filt1, filt2, filt3, const_vec);
+
+        out0 = __msa_copy_u_d((v2i64) dst0_l, 0);
+        out1 = __msa_copy_u_d((v2i64) dst0_l, 1);
+        out2 = __msa_copy_u_d((v2i64) dst1_l, 0);
+        out3 = __msa_copy_u_d((v2i64) dst1_l, 1);
+
+        STORE_SH(dst0_r, dst);
+        STORE_DWORD(dst + 8, out0);
+        dst += dst_stride;
+        STORE_SH(dst1_r, dst);
+        STORE_DWORD(dst + 8, out1);
+        dst += dst_stride;
+
+        STORE_SH(dst2_r, dst);
+        STORE_DWORD(dst + 8, out2);
+        dst += dst_stride;
+        STORE_SH(dst3_r, dst);
+        STORE_DWORD(dst + 8, out3);
+        dst += dst_stride;
+
+        src10_r = src54_r;
+        src32_r = src76_r;
+        src54_r = src98_r;
+
+        src21_r = src65_r;
+        src43_r = src87_r;
+        src65_r = src109_r;
+
+        src2110 = src6554;
+        src4332 = src8776;
+        src6554 = src10998;
+
+        src6 = src10;
+    }
+}
+
+static void hevc_vt_8t_16multx4mult_msa(uint8_t * __restrict src,
+                                        int32_t src_stride,
+                                        int16_t * __restrict dst,
+                                        int32_t dst_stride,
+                                        const int8_t * __restrict filter,
+                                        int32_t height,
+                                        int32_t width)
+{
+    uint8_t *src_tmp;
+    int16_t *dst_tmp;
+    int32_t loop_cnt, cnt;
+    v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+    v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
+    v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
+    v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
+    v16i8 src10_l, src32_l, src54_l, src76_l, src98_l;
+    v16i8 src21_l, src43_l, src65_l, src87_l, src109_l;
+    v8i16 dst0_l, dst1_l, dst2_l, dst3_l;
+    v8i16 filter_vec;
+    v8i16 filt0, filt1, filt2, filt3;
+    v8u16 const_vec;
+
+    src -= (3 * src_stride);
+
+    const_vec = (v8u16) __msa_ldi_h(128);
+    const_vec <<= 6;
+
+    filter_vec = LOAD_SH(filter);
+    filt0 = __msa_splati_h(filter_vec, 0);
+    filt1 = __msa_splati_h(filter_vec, 1);
+    filt2 = __msa_splati_h(filter_vec, 2);
+    filt3 = __msa_splati_h(filter_vec, 3);
+
+    for (cnt = width >> 4; cnt--;) {
+        src_tmp = src;
+        dst_tmp = dst;
+
+        LOAD_7VECS_SB(src_tmp, src_stride,
+                      src0, src1, src2, src3, src4, src5, src6);
+        src_tmp += (7 * src_stride);
+
+        XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
+                        src0, src1, src2, src3, src4, src5, src6, 128);
+
+        ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
+                        src1, src3, src5, src2, src4, src6,
+                        src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
+
+        ILVL_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
+                        src1, src3, src5, src2, src4, src6,
+                        src10_l, src32_l, src54_l, src21_l, src43_l, src65_l);
+
+        for (loop_cnt = (height >> 2); loop_cnt--;) {
+            LOAD_4VECS_SB(src_tmp, src_stride, src7, src8, src9, src10);
+            src_tmp += (4 * src_stride);
+
+            XORI_B_4VECS_SB(src7, src8, src9, src10,
+                            src7, src8, src9, src10, 128);
+
+            ILVR_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
+                            src76_r, src87_r, src98_r, src109_r);
+
+            ILVL_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
+                            src76_l, src87_l, src98_l, src109_l);
+
+            dst0_r = HEVC_FILT_8TAP_DPADD_H(src10_r, src32_r, src54_r, src76_r,
+                                            filt0, filt1, filt2, filt3,
+                                            const_vec);
+
+            dst1_r = HEVC_FILT_8TAP_DPADD_H(src21_r, src43_r, src65_r, src87_r,
+                                            filt0, filt1, filt2, filt3,
+                                            const_vec);
+
+            dst2_r = HEVC_FILT_8TAP_DPADD_H(src32_r, src54_r, src76_r, src98_r,
+                                            filt0, filt1, filt2, filt3,
+                                            const_vec);
+
+            dst3_r = HEVC_FILT_8TAP_DPADD_H(src43_r, src65_r, src87_r, src109_r,
+                                            filt0, filt1, filt2, filt3,
+                                            const_vec);
+
+            dst0_l = HEVC_FILT_8TAP_DPADD_H(src10_l, src32_l, src54_l, src76_l,
+                                            filt0, filt1, filt2, filt3,
+                                            const_vec);
+
+            dst1_l = HEVC_FILT_8TAP_DPADD_H(src21_l, src43_l, src65_l, src87_l,
+                                            filt0, filt1, filt2, filt3,
+                                            const_vec);
+
+            dst2_l = HEVC_FILT_8TAP_DPADD_H(src32_l, src54_l, src76_l, src98_l,
+                                            filt0, filt1, filt2, filt3,
+                                            const_vec);
+
+            dst3_l = HEVC_FILT_8TAP_DPADD_H(src43_l, src65_l, src87_l, src109_l,
+                                            filt0, filt1, filt2, filt3,
+                                            const_vec);
+
+            STORE_SH(dst0_r, dst_tmp);
+            STORE_SH(dst0_l, dst_tmp + 8);
+            dst_tmp += dst_stride;
+            STORE_SH(dst1_r, dst_tmp);
+            STORE_SH(dst1_l, dst_tmp + 8);
+            dst_tmp += dst_stride;
+
+            STORE_SH(dst2_r, dst_tmp);
+            STORE_SH(dst2_l, dst_tmp + 8);
+            dst_tmp += dst_stride;
+            STORE_SH(dst3_r, dst_tmp);
+            STORE_SH(dst3_l, dst_tmp + 8);
+            dst_tmp += dst_stride;
+
+            src10_r = src54_r;
+            src32_r = src76_r;
+            src54_r = src98_r;
+
+            src21_r = src65_r;
+            src43_r = src87_r;
+            src65_r = src109_r;
+
+            src10_l = src54_l;
+            src32_l = src76_l;
+            src54_l = src98_l;
+
+            src21_l = src65_l;
+            src43_l = src87_l;
+            src65_l = src109_l;
+
+            src6 = src10;
+        }
+
+        src += 16;
+        dst += 16;
+    }
+}
+
+static void hevc_vt_8t_16w_msa(uint8_t * __restrict src, int32_t src_stride,
+                               int16_t * __restrict dst, int32_t dst_stride,
+                               const int8_t * __restrict filter, int32_t height)
+{
+    hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
+                                filter, height, 16);
+}
+
+static void hevc_vt_8t_24w_msa(uint8_t * __restrict src, int32_t src_stride,
+                               int16_t * __restrict dst, int32_t dst_stride,
+                               const int8_t * __restrict filter, int32_t height)
+{
+    hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
+                                filter, height, 16);
+
+    hevc_vt_8t_8w_msa(src + 16, src_stride, dst + 16, dst_stride,
+                      filter, height);
+}
+
+static void hevc_vt_8t_32w_msa(uint8_t * __restrict src, int32_t src_stride,
+                               int16_t * __restrict dst, int32_t dst_stride,
+                               const int8_t * __restrict filter, int32_t height)
+{
+    hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
+                                filter, height, 32);
+}
+
+static void hevc_vt_8t_48w_msa(uint8_t * __restrict src, int32_t src_stride,
+                               int16_t * __restrict dst, int32_t dst_stride,
+                               const int8_t * __restrict filter, int32_t height)
+{
+    hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
+                                filter, height, 48);
+}
+
+static void hevc_vt_8t_64w_msa(uint8_t * __restrict src, int32_t src_stride,
+                               int16_t * __restrict dst, int32_t dst_stride,
+                               const int8_t * __restrict filter, int32_t height)
+{
+    hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
+                                filter, height, 64);
+}
+
+#define MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR)                            \
+void ff_hevc_put_hevc_##PEL##_##DIR####WIDTH##_8_msa(int16_t *dst,          \
+                                                     uint8_t *src,          \
+                                                     ptrdiff_t src_stride,  \
+                                                     int height,            \
+                                                     intptr_t mx,           \
+                                                     intptr_t my,           \
+                                                     int width)             \
+{                                                                           \
+    const int8_t *filter = ff_hevc_##PEL##_filters[FILT_DIR - 1];           \
+                                                                            \
+    hevc_##DIR1##_##TAP##t_##WIDTH##w_msa(src, src_stride, dst,             \
+                                          MAX_PB_SIZE, filter, height);     \
+}
+
+MC(qpel, h, 4, 8, hz, mx);
+MC(qpel, h, 8, 8, hz, mx);
+MC(qpel, h, 12, 8, hz, mx);
+MC(qpel, h, 16, 8, hz, mx);
+MC(qpel, h, 24, 8, hz, mx);
+MC(qpel, h, 32, 8, hz, mx);
+MC(qpel, h, 48, 8, hz, mx);
+MC(qpel, h, 64, 8, hz, mx);
+
+MC(qpel, v, 4, 8, vt, my);
+MC(qpel, v, 8, 8, vt, my);
+MC(qpel, v, 12, 8, vt, my);
+MC(qpel, v, 16, 8, vt, my);
+MC(qpel, v, 24, 8, vt, my);
+MC(qpel, v, 32, 8, vt, my);
+MC(qpel, v, 48, 8, vt, my);
+MC(qpel, v, 64, 8, vt, my);
+
+#undef MC
diff --git a/libavutil/mips/generic_macros_msa.h b/libavutil/mips/generic_macros_msa.h
new file mode 100644
index 0000000..d2fd87c
--- /dev/null
+++ b/libavutil/mips/generic_macros_msa.h
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale at imgtec.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MIPS_GENERIC_MACROS_MSA_H
+#define AVUTIL_MIPS_GENERIC_MACROS_MSA_H
+
+#include <stdint.h>
+#include <msa.h>
+
+#define LOAD_UB(psrc)             \
+( {                               \
+    v16u8 out_m;                  \
+    out_m = *((v16u8 *) (psrc));  \
+    out_m;                        \
+} )
+
+#define LOAD_SB(psrc)             \
+( {                               \
+    v16i8 out_m;                  \
+    out_m = *((v16i8 *) (psrc));  \
+    out_m;                        \
+} )
+
+#define LOAD_SH(psrc)             \
+( {                               \
+    v8i16 out_m;                  \
+    out_m = *((v8i16 *) (psrc));  \
+    out_m;                        \
+} )
+
+#define STORE_SH(vec, pdest)       \
+{                                  \
+    *((v8i16 *) (pdest)) = (vec);  \
+}
+
+#if (__mips_isa_rev >= 6)
+    #define STORE_DWORD(pdst, val)                \
+    {                                             \
+        uint8_t *dst_ptr_m = (uint8_t *) (pdst);  \
+        uint64_t val_m = (val);                   \
+                                                  \
+        __asm__ __volatile__ (                    \
+            "sd  %[val_m],  %[dst_ptr_m]  \n\t"   \
+                                                  \
+            : [dst_ptr_m] "=m" (*dst_ptr_m)       \
+            : [val_m] "r" (val_m)                 \
+        );                                        \
+    }
+#else
+    #define STORE_DWORD(pdst, val)                                 \
+    {                                                              \
+        uint8_t *dst1_m = (uint8_t *) (pdst);                      \
+        uint8_t *dst2_m = ((uint8_t *) (pdst)) + 4;                \
+        uint32_t val0_m, val1_m;                                   \
+                                                                   \
+        val0_m = (uint32_t) ((val) & 0x00000000FFFFFFFF);          \
+        val1_m = (uint32_t) (((val) >> 32) & 0x00000000FFFFFFFF);  \
+                                                                   \
+        __asm__ __volatile__ (                                     \
+            "usw  %[val0_m],  %[dst1_m]  \n\t"                     \
+            "usw  %[val1_m],  %[dst2_m]  \n\t"                     \
+                                                                   \
+            : [dst1_m] "=m" (*dst1_m), [dst2_m] "=m" (*dst2_m)     \
+            : [val0_m] "r" (val0_m), [val1_m] "r" (val1_m)         \
+        );                                                         \
+    }
+#endif
+
+#define LOAD_4VECS_SB(psrc, stride,            \
+                      val0, val1, val2, val3)  \
+{                                              \
+    val0 = LOAD_SB(psrc + 0 * stride);         \
+    val1 = LOAD_SB(psrc + 1 * stride);         \
+    val2 = LOAD_SB(psrc + 2 * stride);         \
+    val3 = LOAD_SB(psrc + 3 * stride);         \
+}
+
+#define LOAD_7VECS_SB(psrc, stride,            \
+                      val0, val1, val2, val3,  \
+                      val4, val5, val6)        \
+{                                              \
+    val0 = LOAD_SB((psrc) + 0 * (stride));     \
+    val1 = LOAD_SB((psrc) + 1 * (stride));     \
+    val2 = LOAD_SB((psrc) + 2 * (stride));     \
+    val3 = LOAD_SB((psrc) + 3 * (stride));     \
+    val4 = LOAD_SB((psrc) + 4 * (stride));     \
+    val5 = LOAD_SB((psrc) + 5 * (stride));     \
+    val6 = LOAD_SB((psrc) + 6 * (stride));     \
+}
+
+#define LOAD_8VECS_SB(psrc, stride,                 \
+                      out0, out1, out2, out3,       \
+                      out4, out5, out6, out7)       \
+{                                                   \
+    LOAD_4VECS_SB((psrc), (stride),                 \
+                  (out0), (out1), (out2), (out3));  \
+    LOAD_4VECS_SB((psrc + 4 * stride), (stride),    \
+                  (out4), (out5), (out6), (out7));  \
+}
+
+#define ILVR_B_2VECS_SB(in0_r, in1_r, in0_l, in1_l,         \
+                        out0, out1)                         \
+{                                                           \
+    out0 = __msa_ilvr_b((v16i8) (in0_l), (v16i8) (in0_r));  \
+    out1 = __msa_ilvr_b((v16i8) (in1_l), (v16i8) (in1_r));  \
+}
+
+#define ILVR_B_4VECS_SB(in0_r, in1_r, in2_r, in3_r,  \
+                        in0_l, in1_l, in2_l, in3_l,  \
+                        out0, out1, out2, out3)      \
+{                                                    \
+    ILVR_B_2VECS_SB(in0_r, in1_r, in0_l, in1_l,      \
+                    out0, out1);                     \
+    ILVR_B_2VECS_SB(in2_r, in3_r, in2_l, in3_l,      \
+                    out2, out3);                     \
+}
+
+#define ILVR_B_6VECS_SB(in0_r, in1_r, in2_r,     \
+                        in3_r, in4_r, in5_r,     \
+                        in0_l, in1_l, in2_l,     \
+                        in3_l, in4_l, in5_l,     \
+                        out0, out1, out2,        \
+                        out3, out4, out5)        \
+{                                                \
+    ILVR_B_2VECS_SB(in0_r, in1_r, in0_l, in1_l,  \
+                    out0, out1);                 \
+    ILVR_B_2VECS_SB(in2_r, in3_r, in2_l, in3_l,  \
+                    out2, out3);                 \
+    ILVR_B_2VECS_SB(in4_r, in5_r, in4_l, in5_l,  \
+                    out4, out5);                 \
+}
+
+#define ILVR_B_8VECS_SB(in0_r, in1_r, in2_r, in3_r,  \
+                        in4_r, in5_r, in6_r, in7_r,  \
+                        in0_l, in1_l, in2_l, in3_l,  \
+                        in4_l, in5_l, in6_l, in7_l,  \
+                        out0, out1, out2, out3,      \
+                        out4, out5, out6, out7)      \
+{                                                    \
+    ILVR_B_2VECS_SB(in0_r, in1_r, in0_l, in1_l,      \
+                    out0, out1);                     \
+    ILVR_B_2VECS_SB(in2_r, in3_r, in2_l, in3_l,      \
+                    out2, out3);                     \
+    ILVR_B_2VECS_SB(in4_r, in5_r, in4_l, in5_l,      \
+                    out4, out5);                     \
+    ILVR_B_2VECS_SB(in6_r, in7_r, in6_l, in7_l,      \
+                    out6, out7);                     \
+}
+
+#define ILVL_B_2VECS_SB(in0_r, in1_r, in0_l, in1_l,         \
+                        out0, out1)                         \
+{                                                           \
+    out0 = __msa_ilvl_b((v16i8) (in0_l), (v16i8) (in0_r));  \
+    out1 = __msa_ilvl_b((v16i8) (in1_l), (v16i8) (in1_r));  \
+}
+
+#define ILVL_B_4VECS_SB(in0_r, in1_r, in2_r, in3_r,  \
+                        in0_l, in1_l, in2_l, in3_l,  \
+                        out0, out1, out2, out3)      \
+{                                                    \
+    ILVL_B_2VECS_SB(in0_r, in1_r, in0_l, in1_l,      \
+                    out0, out1);                     \
+    ILVL_B_2VECS_SB(in2_r, in3_r, in2_l, in3_l,      \
+                    out2, out3);                     \
+}
+
+#define ILVL_B_6VECS_SB(in0_r, in1_r, in2_r,     \
+                        in3_r, in4_r, in5_r,     \
+                        in0_l, in1_l, in2_l,     \
+                        in3_l, in4_l, in5_l,     \
+                        out0, out1, out2,        \
+                        out3, out4, out5)        \
+{                                                \
+    ILVL_B_2VECS_SB(in0_r, in1_r, in0_l, in1_l,  \
+                    out0, out1);                 \
+    ILVL_B_2VECS_SB(in2_r, in3_r, in2_l, in3_l,  \
+                    out2, out3);                 \
+    ILVL_B_2VECS_SB(in4_r, in5_r, in4_l, in5_l,  \
+                    out4, out5);                 \
+}
+
+#define ILVR_D_2VECS_SB(out0, in0_l, in0_r,                         \
+                        out1, in1_l, in1_r)                         \
+{                                                                   \
+    out0 = (v16i8) __msa_ilvr_d((v2i64) (in0_l), (v2i64) (in0_r));  \
+    out1 = (v16i8) __msa_ilvr_d((v2i64) (in1_l), (v2i64) (in1_r));  \
+}
+
+#define ILVR_D_3VECS_SB(out0, in0_l, in0_r,                         \
+                        out1, in1_l, in1_r,                         \
+                        out2, in2_l, in2_r)                         \
+{                                                                   \
+    ILVR_D_2VECS_SB(out0, in0_l, in0_r,                             \
+                    out1, in1_l, in1_r);                            \
+    out2 = (v16i8) __msa_ilvr_d((v2i64) (in2_l), (v2i64) (in2_r));  \
+}
+
+#define ILVR_D_4VECS_SB(out0, in0_l, in0_r,  \
+                        out1, in1_l, in1_r,  \
+                        out2, in2_l, in2_r,  \
+                        out3, in3_l, in3_r)  \
+{                                            \
+    ILVR_D_2VECS_SB(out0, in0_l, in0_r,      \
+                    out1, in1_l, in1_r);     \
+    ILVR_D_2VECS_SB(out2, in2_l, in2_r,      \
+                    out3, in3_l, in3_r);     \
+}
+
+#define XORI_B_2VECS_SB(val0, val1,                          \
+                        out0, out1, xor_val)                 \
+{                                                            \
+    out0 = (v16i8) __msa_xori_b((v16u8) (val0), (xor_val));  \
+    out1 = (v16i8) __msa_xori_b((v16u8) (val1), (xor_val));  \
+}
+
+#define XORI_B_3VECS_SB(val0, val1, val2,                    \
+                        out0, out1, out2,                    \
+                        xor_val)                             \
+{                                                            \
+    XORI_B_2VECS_SB(val0, val1,                              \
+                    out0, out1, xor_val);                    \
+    out2 = (v16i8) __msa_xori_b((v16u8) (val2), (xor_val));  \
+}
+
+#define XORI_B_4VECS_SB(val0, val1, val2, val3,  \
+                        out0, out1, out2, out3,  \
+                        xor_val)                 \
+{                                                \
+    XORI_B_2VECS_SB(val0, val1,                  \
+                    out0, out1, xor_val);        \
+    XORI_B_2VECS_SB(val2, val3,                  \
+                    out2, out3, xor_val);        \
+}
+
+#define XORI_B_5VECS_SB(val0, val1, val2, val3, val4,  \
+                        out0, out1, out2, out3, out4,  \
+                        xor_val)                       \
+{                                                      \
+    XORI_B_3VECS_SB(val0, val1, val2,                  \
+                    out0, out1, out2, xor_val);        \
+    XORI_B_2VECS_SB(val3, val4,                        \
+                    out3, out4, xor_val);              \
+}
+
+#define XORI_B_7VECS_SB(val0, val1, val2, val3,        \
+                        val4, val5, val6,              \
+                        out0, out1, out2, out3,        \
+                        out4, out5, out6,              \
+                        xor_val)                       \
+{                                                      \
+    XORI_B_4VECS_SB(val0, val1, val2, val3,            \
+                    out0, out1, out2, out3, xor_val);  \
+    XORI_B_3VECS_SB(val4, val5, val6,                  \
+                    out4, out5, out6, xor_val);        \
+}
+
+#define XORI_B_8VECS_SB(val0, val1, val2, val3,           \
+                        val4, val5, val6, val7,           \
+                        out0, out1, out2, out3,           \
+                        out4, out5, out6, out7, xor_val)  \
+{                                                         \
+    XORI_B_4VECS_SB(val0, val1, val2, val3,               \
+                    out0, out1, out2, out3, xor_val);     \
+    XORI_B_4VECS_SB(val4, val5, val6, val7,               \
+                    out4, out5, out6, out7, xor_val);     \
+}
+
+#endif  /* AVUTIL_MIPS_GENERIC_MACROS_MSA_H */
-- 
2.3.2



More information about the ffmpeg-devel mailing list