[FFmpeg-cvslog] rv40: NEON optimised rv40 qpel motion compensation

Mans Rullgard git at videolan.org
Fri Dec 9 00:18:54 CET 2011


ffmpeg | branch: master | Mans Rullgard <mans at mansr.com> | Wed Dec  7 22:28:00 2011 +0000| [392107ad079860fb41c3a9800b6d33ad4b058324] | committer: Mans Rullgard

rv40: NEON optimised rv40 qpel motion compensation

Based on patch by Janne Grunau.

Signed-off-by: Mans Rullgard <mans at mansr.com>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=392107ad079860fb41c3a9800b6d33ad4b058324
---

 libavcodec/arm/rv40dsp_init_neon.c |   75 +++++
 libavcodec/arm/rv40dsp_neon.S      |  639 ++++++++++++++++++++++++++++++++++++
 2 files changed, 714 insertions(+), 0 deletions(-)

diff --git a/libavcodec/arm/rv40dsp_init_neon.c b/libavcodec/arm/rv40dsp_init_neon.c
index 3a863e1..36d75e6 100644
--- a/libavcodec/arm/rv40dsp_init_neon.c
+++ b/libavcodec/arm/rv40dsp_init_neon.c
@@ -23,6 +23,28 @@
 #include "libavcodec/avcodec.h"
 #include "libavcodec/rv34dsp.h"
 
+#define DECL_QPEL3(type, w, pos) \
+    void ff_##type##_rv40_qpel##w##_mc##pos##_neon(uint8_t *dst, uint8_t *src,\
+                                                   int stride)
+#define DECL_QPEL2(w, pos)                      \
+    DECL_QPEL3(put, w, pos);                    \
+    DECL_QPEL3(avg, w, pos)
+
+#define DECL_QPEL_XY(x, y)                      \
+    DECL_QPEL2(16, x ## y);                     \
+    DECL_QPEL2(8,  x ## y)
+
+#define DECL_QPEL_Y(y)                          \
+    DECL_QPEL_XY(0, y);                         \
+    DECL_QPEL_XY(1, y);                         \
+    DECL_QPEL_XY(2, y);                         \
+    DECL_QPEL_XY(3, y);                         \
+
+DECL_QPEL_Y(0);
+DECL_QPEL_Y(1);
+DECL_QPEL_Y(2);
+DECL_QPEL_Y(3);
+
 void ff_put_rv40_chroma_mc8_neon(uint8_t *, uint8_t *, int, int, int, int);
 void ff_put_rv40_chroma_mc4_neon(uint8_t *, uint8_t *, int, int, int, int);
 
@@ -34,6 +56,59 @@ void ff_rv40_weight_func_8_neon(uint8_t *, uint8_t *, uint8_t *, int, int, int);
 
 void ff_rv40dsp_init_neon(RV34DSPContext *c, DSPContext* dsp)
 {
+    c->put_pixels_tab[0][ 1] = ff_put_rv40_qpel16_mc10_neon;
+    c->put_pixels_tab[0][ 3] = ff_put_rv40_qpel16_mc30_neon;
+    c->put_pixels_tab[0][ 4] = ff_put_rv40_qpel16_mc01_neon;
+    c->put_pixels_tab[0][ 5] = ff_put_rv40_qpel16_mc11_neon;
+    c->put_pixels_tab[0][ 6] = ff_put_rv40_qpel16_mc21_neon;
+    c->put_pixels_tab[0][ 7] = ff_put_rv40_qpel16_mc31_neon;
+    c->put_pixels_tab[0][ 9] = ff_put_rv40_qpel16_mc12_neon;
+    c->put_pixels_tab[0][10] = ff_put_rv40_qpel16_mc22_neon;
+    c->put_pixels_tab[0][11] = ff_put_rv40_qpel16_mc32_neon;
+    c->put_pixels_tab[0][12] = ff_put_rv40_qpel16_mc03_neon;
+    c->put_pixels_tab[0][13] = ff_put_rv40_qpel16_mc13_neon;
+    c->put_pixels_tab[0][14] = ff_put_rv40_qpel16_mc23_neon;
+    c->put_pixels_tab[0][15] = ff_put_rv40_qpel16_mc33_neon;
+    c->avg_pixels_tab[0][ 1] = ff_avg_rv40_qpel16_mc10_neon;
+    c->avg_pixels_tab[0][ 3] = ff_avg_rv40_qpel16_mc30_neon;
+    c->avg_pixels_tab[0][ 4] = ff_avg_rv40_qpel16_mc01_neon;
+    c->avg_pixels_tab[0][ 5] = ff_avg_rv40_qpel16_mc11_neon;
+    c->avg_pixels_tab[0][ 6] = ff_avg_rv40_qpel16_mc21_neon;
+    c->avg_pixels_tab[0][ 7] = ff_avg_rv40_qpel16_mc31_neon;
+    c->avg_pixels_tab[0][ 9] = ff_avg_rv40_qpel16_mc12_neon;
+    c->avg_pixels_tab[0][10] = ff_avg_rv40_qpel16_mc22_neon;
+    c->avg_pixels_tab[0][11] = ff_avg_rv40_qpel16_mc32_neon;
+    c->avg_pixels_tab[0][12] = ff_avg_rv40_qpel16_mc03_neon;
+    c->avg_pixels_tab[0][13] = ff_avg_rv40_qpel16_mc13_neon;
+    c->avg_pixels_tab[0][14] = ff_avg_rv40_qpel16_mc23_neon;
+    c->avg_pixels_tab[0][15] = ff_avg_rv40_qpel16_mc33_neon;
+    c->put_pixels_tab[1][ 1] = ff_put_rv40_qpel8_mc10_neon;
+    c->put_pixels_tab[1][ 3] = ff_put_rv40_qpel8_mc30_neon;
+    c->put_pixels_tab[1][ 4] = ff_put_rv40_qpel8_mc01_neon;
+    c->put_pixels_tab[1][ 5] = ff_put_rv40_qpel8_mc11_neon;
+    c->put_pixels_tab[1][ 6] = ff_put_rv40_qpel8_mc21_neon;
+    c->put_pixels_tab[1][ 7] = ff_put_rv40_qpel8_mc31_neon;
+    c->put_pixels_tab[1][ 9] = ff_put_rv40_qpel8_mc12_neon;
+    c->put_pixels_tab[1][10] = ff_put_rv40_qpel8_mc22_neon;
+    c->put_pixels_tab[1][11] = ff_put_rv40_qpel8_mc32_neon;
+    c->put_pixels_tab[1][12] = ff_put_rv40_qpel8_mc03_neon;
+    c->put_pixels_tab[1][13] = ff_put_rv40_qpel8_mc13_neon;
+    c->put_pixels_tab[1][14] = ff_put_rv40_qpel8_mc23_neon;
+    c->put_pixels_tab[1][15] = ff_put_rv40_qpel8_mc33_neon;
+    c->avg_pixels_tab[1][ 1] = ff_avg_rv40_qpel8_mc10_neon;
+    c->avg_pixels_tab[1][ 3] = ff_avg_rv40_qpel8_mc30_neon;
+    c->avg_pixels_tab[1][ 4] = ff_avg_rv40_qpel8_mc01_neon;
+    c->avg_pixels_tab[1][ 5] = ff_avg_rv40_qpel8_mc11_neon;
+    c->avg_pixels_tab[1][ 6] = ff_avg_rv40_qpel8_mc21_neon;
+    c->avg_pixels_tab[1][ 7] = ff_avg_rv40_qpel8_mc31_neon;
+    c->avg_pixels_tab[1][ 9] = ff_avg_rv40_qpel8_mc12_neon;
+    c->avg_pixels_tab[1][10] = ff_avg_rv40_qpel8_mc22_neon;
+    c->avg_pixels_tab[1][11] = ff_avg_rv40_qpel8_mc32_neon;
+    c->avg_pixels_tab[1][12] = ff_avg_rv40_qpel8_mc03_neon;
+    c->avg_pixels_tab[1][13] = ff_avg_rv40_qpel8_mc13_neon;
+    c->avg_pixels_tab[1][14] = ff_avg_rv40_qpel8_mc23_neon;
+    c->avg_pixels_tab[1][15] = ff_avg_rv40_qpel8_mc33_neon;
+
     c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_neon;
     c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_neon;
     c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_neon;
diff --git a/libavcodec/arm/rv40dsp_neon.S b/libavcodec/arm/rv40dsp_neon.S
index cafd98a..07ba842 100644
--- a/libavcodec/arm/rv40dsp_neon.S
+++ b/libavcodec/arm/rv40dsp_neon.S
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2011 Janne Grunau <janne-libav at jannau.net>
+ * Copyright (c) 2011 Mans Rullgard <mans at mansr.com>
  *
  * This file is part of Libav.
  *
@@ -19,6 +20,644 @@
  */
 
 #include "asm.S"
+#include "neon.S"
+
+.macro  qpel_lowpass    r0,  r1,  rc1, rc2, shift
+        vext.8          d25, \r0, \r1, #1       @ src[-1]
+        vext.8          d26, \r0, \r1, #4       @ src[ 2]
+        vext.8          d24, \r0, \r1, #5       @ src[ 3]
+        vaddl.u8        q9,  d25, d26
+        vaddl.u8        q8,  \r0, d24
+        vext.8          d27, \r0, \r1, #2       @ src[ 0]
+        vshl.s16        q12, q9,  #2
+        vsub.s16        q8,  q8,  q9
+        vext.8          d28, \r0, \r1, #3       @ src[ 1]
+        vsub.s16        q8,  q8,  q12
+        vmlal.u8        q8,  d27, \rc1
+        vmlal.u8        q8,  d28, \rc2
+        vqrshrun.s16    \r0, q8,  #\shift
+.endm
+
+.macro  qpel_lowpass_x2 r0,  r1,  r2,  r3,  rc1, rc2, shift
+        vext.8          d25, \r0, \r1, #1       @ src[-1]
+        vext.8          d26, \r0, \r1, #4       @ src[ 2]
+        vext.8          d24, \r0, \r1, #5       @ src[ 3]
+        vaddl.u8        q9,  d25, d26
+        vaddl.u8        q8,  \r0, d24
+        vext.8          d29, \r0, \r1, #2       @ src[ 0]
+        vext.8          d28, \r0, \r1, #3       @ src[ 1]
+        vshl.s16        q10, q9,  #2
+        vext.8          \r1, \r2, \r3, #1       @ src[-1]
+        vsub.s16        q8,  q8,  q9
+        vext.8          d22, \r2, \r3, #4       @ src[ 2]
+        vext.8          \r0, \r2, \r3, #5       @ src[ 3]
+        vaddl.u8        q13, \r1, d22
+        vaddl.u8        q12, \r2, \r0
+        vsub.s16        q8,  q8,  q10
+        vshl.s16        q9,  q13, #2
+        vsub.s16        q12, q12, q13
+        vmlal.u8        q8,  d29, \rc1
+        vmlal.u8        q8,  d28, \rc2
+        vsub.s16        q12, q12, q9
+        vext.8          d26, \r2, \r3, #2       @ src[ 0]
+        vext.8          d27, \r2, \r3, #3       @ src[ 1]
+        vmlal.u8        q12, d26, \rc1
+        vmlal.u8        q12, d27, \rc2
+        vqrshrun.s16    \r0, q8,  #\shift
+        vqrshrun.s16    \r2, q12, #\shift
+.endm
+
+.macro  rv40_qpel8_h    shift
+function put_rv40_qpel8_h_lp_packed_s\shift\()_neon
+1:
+        vld1.8          {q2},     [r1], r2
+        vld1.8          {q3},     [r1], r2
+        qpel_lowpass_x2 d4,  d5,  d6,  d7,  d0,  d1,  \shift
+        vst1.8          {d4},     [r12,:64]!
+        vst1.8          {d6},     [r12,:64]!
+        subs            r3,  r3,  #2
+        bgt             1b
+        vld1.8          {q2},     [r1]
+        qpel_lowpass    d4,  d5,  d0,  d1,  \shift
+        vst1.8          {d4},     [r12,:64]!
+        bx              lr
+endfunc
+.endm
+
+.macro  rv40_qpel8_v    shift, type
+function \type\()_rv40_qpel8_v_lp_packed_s\shift\()_neon
+        vld1.64         {d2},     [r1,:64]!
+        vld1.64         {d3},     [r1,:64]!
+        vld1.64         {d4},     [r1,:64]!
+        vld1.64         {d5},     [r1,:64]!
+        vld1.64         {d6},     [r1,:64]!
+        vld1.64         {d7},     [r1,:64]!
+        vld1.64         {d8},     [r1,:64]!
+        vld1.64         {d9},     [r1,:64]!
+        vld1.64         {d10},    [r1,:64]!
+        vld1.64         {d11},    [r1,:64]!
+        vld1.64         {d12},    [r1,:64]!
+        vld1.64         {d13},    [r1,:64]!
+        vld1.64         {d14},    [r1,:64]!
+        transpose_8x8   d2,  d3,  d4,  d5,  d6,  d7,  d8,  d9
+        transpose_8x8   d10, d11, d12, d13, d14, d15, d30, d31
+        qpel_lowpass_x2 d2,  d10, d3,  d11, d0,  d1,  \shift
+        qpel_lowpass_x2 d4,  d12, d5,  d13, d0,  d1,  \shift
+        qpel_lowpass_x2 d6,  d14, d7,  d15, d0,  d1,  \shift
+        qpel_lowpass_x2 d8,  d30, d9,  d31, d0,  d1,  \shift
+        transpose_8x8   d2,  d3,  d4,  d5,  d6,  d7,  d8,  d9
+  .ifc \type,avg
+        vld1.64         d12,      [r0,:64], r2
+        vld1.64         d13,      [r0,:64], r2
+        vld1.64         d14,      [r0,:64], r2
+        vld1.64         d15,      [r0,:64], r2
+        vld1.64         d16,      [r0,:64], r2
+        vld1.64         d17,      [r0,:64], r2
+        vld1.64         d18,      [r0,:64], r2
+        vld1.64         d19,      [r0,:64], r2
+        sub             r0,  r0,  r2,  lsl #3
+        vrhadd.u8       q1,  q1,  q6
+        vrhadd.u8       q2,  q2,  q7
+        vrhadd.u8       q3,  q3,  q8
+        vrhadd.u8       q4,  q4,  q9
+  .endif
+        vst1.64         d2,       [r0,:64], r2
+        vst1.64         d3,       [r0,:64], r2
+        vst1.64         d4,       [r0,:64], r2
+        vst1.64         d5,       [r0,:64], r2
+        vst1.64         d6,       [r0,:64], r2
+        vst1.64         d7,       [r0,:64], r2
+        vst1.64         d8,       [r0,:64], r2
+        vst1.64         d9,       [r0,:64], r2
+        bx              lr
+endfunc
+.endm
+
+        rv40_qpel8_h    5
+        rv40_qpel8_h    6
+
+.macro  rv40_qpel       type
+function \type\()_rv40_qpel8_h_lowpass_neon
+  .ifc \type,avg
+        mov             r12, r0
+  .endif
+1:
+        vld1.8          {q2},     [r1], r2
+        vld1.8          {q3},     [r1], r2
+        qpel_lowpass_x2 d4,  d5,  d6,  d7,  d0,  d1,  6
+  .ifc \type,avg
+        vld1.8          {d3},     [r12,:64], r2
+        vld1.8          {d16},    [r12,:64], r2
+        vrhadd.u8       d4,  d4,  d3
+        vrhadd.u8       d6,  d6,  d16
+  .endif
+        vst1.8          {d4},     [r0,:64], r2
+        vst1.8          {d6},     [r0,:64], r2
+        subs            r3,  r3,  #2
+        bgt             1b
+        bx              lr
+endfunc
+
+function \type\()_rv40_qpel8_v_lowpass_neon
+        vld1.64         {d2},     [r1], r2
+        vld1.64         {d3},     [r1], r2
+        vld1.64         {d4},     [r1], r2
+        vld1.64         {d5},     [r1], r2
+        vld1.64         {d6},     [r1], r2
+        vld1.64         {d7},     [r1], r2
+        vld1.64         {d8},     [r1], r2
+        vld1.64         {d9},     [r1], r2
+        vld1.64         {d10},    [r1], r2
+        vld1.64         {d11},    [r1], r2
+        vld1.64         {d12},    [r1], r2
+        vld1.64         {d13},    [r1], r2
+        vld1.64         {d14},    [r1]
+        transpose_8x8   d2,  d3,  d4,  d5,  d6,  d7,  d8,  d9
+        transpose_8x8   d10, d11, d12, d13, d14, d15, d30, d31
+        qpel_lowpass_x2 d2,  d10, d3,  d11, d0,  d1,  6
+        qpel_lowpass_x2 d4,  d12, d5,  d13, d0,  d1,  6
+        qpel_lowpass_x2 d6,  d14, d7,  d15, d0,  d1,  6
+        qpel_lowpass_x2 d8,  d30, d9,  d31, d0,  d1,  6
+        transpose_8x8   d2,  d3,  d4,  d5,  d6,  d7,  d8,  d9
+  .ifc \type,avg
+        vld1.64         d12,      [r0,:64], r2
+        vld1.64         d13,      [r0,:64], r2
+        vld1.64         d14,      [r0,:64], r2
+        vld1.64         d15,      [r0,:64], r2
+        vld1.64         d16,      [r0,:64], r2
+        vld1.64         d17,      [r0,:64], r2
+        vld1.64         d18,      [r0,:64], r2
+        vld1.64         d19,      [r0,:64], r2
+        sub             r0,  r0,  r2,  lsl #3
+        vrhadd.u8       q1,  q1,  q6
+        vrhadd.u8       q2,  q2,  q7
+        vrhadd.u8       q3,  q3,  q8
+        vrhadd.u8       q4,  q4,  q9
+  .endif
+        vst1.64         d2,       [r0,:64], r2
+        vst1.64         d3,       [r0,:64], r2
+        vst1.64         d4,       [r0,:64], r2
+        vst1.64         d5,       [r0,:64], r2
+        vst1.64         d6,       [r0,:64], r2
+        vst1.64         d7,       [r0,:64], r2
+        vst1.64         d8,       [r0,:64], r2
+        vst1.64         d9,       [r0,:64], r2
+        bx              lr
+endfunc
+
+        rv40_qpel8_v    5, \type
+        rv40_qpel8_v    6, \type
+
+function ff_\type\()_rv40_qpel8_mc10_neon, export=1
+        sub             r1,  r1,  #2
+        mov             r3,  #8
+        vmov.i8         d0,  #52
+        vmov.i8         d1,  #20
+        b               \type\()_rv40_qpel8_h_lowpass_neon
+endfunc
+
+function ff_\type\()_rv40_qpel8_mc30_neon, export=1
+        sub             r1,  r1,  #2
+        mov             r3,  #8
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #52
+        b               \type\()_rv40_qpel8_h_lowpass_neon
+endfunc
+
+function ff_\type\()_rv40_qpel8_mc01_neon, export=1
+        push            {r4, lr}
+        vpush           {d8-d15}
+        sub             r1,  r1,  r2,  lsl #1
+        vmov.i8         d0,  #52
+        vmov.i8         d1,  #20
+        bl              \type\()_rv40_qpel8_v_lowpass_neon
+        vpop            {d8-d15}
+        pop             {r4, pc}
+endfunc
+
+function ff_\type\()_rv40_qpel8_mc11_neon, export=1
+        push            {r4, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #14*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        mov             r3,  #12
+        vmov.i8         d0,  #52
+        vmov.i8         d1,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        add             r1,  sp,  #7
+        bic             r1,  r1,  #7
+        bl              \type\()_rv40_qpel8_v_lp_packed_s6_neon
+        add             sp,  sp,  #14*8
+        vpop            {d8-d15}
+        pop             {r4, pc}
+endfunc
+
+function ff_\type\()_rv40_qpel8_mc21_neon, export=1
+        push            {r4, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #14*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        mov             r3,  #12
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s5_neon
+        add             r1,  sp,  #7
+        bic             r1,  r1,  #7
+        vmov.i8         d0,  #52
+        bl              \type\()_rv40_qpel8_v_lp_packed_s6_neon
+        add             sp,  sp,  #14*8
+        vpop            {d8-d15}
+        pop             {r4, pc}
+endfunc
+
+function ff_\type\()_rv40_qpel8_mc31_neon, export=1
+        push            {r4, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #14*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        mov             r3,  #12
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #52
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        add             r1,  sp,  #7
+        bic             r1,  r1,  #7
+        vswp            d0,  d1
+        bl              \type\()_rv40_qpel8_v_lp_packed_s6_neon
+        add             sp,  sp,  #14*8
+        vpop            {d8-d15}
+        pop             {r4, pc}
+endfunc
+
+function ff_\type\()_rv40_qpel8_mc12_neon, export=1
+        push            {r4, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #14*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        mov             r3,  #12
+        vmov.i8         d0,  #52
+        vmov.i8         d1,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        add             r1,  sp,  #7
+        bic             r1,  r1,  #7
+        vmov.i8         d0,  #20
+        bl              \type\()_rv40_qpel8_v_lp_packed_s5_neon
+        add             sp,  sp,  #14*8
+        vpop            {d8-d15}
+        pop             {r4, pc}
+endfunc
+
+function ff_\type\()_rv40_qpel8_mc22_neon, export=1
+        push            {r4, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #14*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        mov             r3,  #12
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s5_neon
+        add             r1,  sp,  #7
+        bic             r1,  r1,  #7
+        bl              \type\()_rv40_qpel8_v_lp_packed_s5_neon
+        add             sp,  sp,  #14*8
+        vpop            {d8-d15}
+        pop             {r4, pc}
+endfunc
+
+function ff_\type\()_rv40_qpel8_mc32_neon, export=1
+        push            {r4, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #14*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        mov             r3,  #12
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #52
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        add             r1,  sp,  #7
+        bic             r1,  r1,  #7
+        vmov.i8         d1,  #20
+        bl              \type\()_rv40_qpel8_v_lp_packed_s5_neon
+        add             sp,  sp,  #14*8
+        vpop            {d8-d15}
+        pop             {r4, pc}
+endfunc
+
+function ff_\type\()_rv40_qpel8_mc03_neon, export=1
+        push            {r4, lr}
+        vpush           {d8-d15}
+        sub             r1,  r1,  r2,  lsl #1
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #52
+        bl              \type\()_rv40_qpel8_v_lowpass_neon
+        vpop            {d8-d15}
+        pop             {r4, pc}
+endfunc
+
+function ff_\type\()_rv40_qpel8_mc33_neon, export=1
+        mov             r3,  #8
+        b               ff_\type\()_pixels8_xy2_neon
+endfunc
+
+function ff_\type\()_rv40_qpel8_mc13_neon, export=1
+        push            {r4, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #14*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        mov             r3,  #12
+        vmov.i8         d0,  #52
+        vmov.i8         d1,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        add             r1,  sp,  #7
+        bic             r1,  r1,  #7
+        vswp            d0,  d1
+        bl              \type\()_rv40_qpel8_v_lp_packed_s6_neon
+        add             sp,  sp,  #14*8
+        vpop            {d8-d15}
+        pop             {r4, pc}
+endfunc
+
+function ff_\type\()_rv40_qpel8_mc23_neon, export=1
+        push            {r4, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #14*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        mov             r3,  #12
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s5_neon
+        add             r1,  sp,  #7
+        bic             r1,  r1,  #7
+        vmov.i8         d1,  #52
+        bl              \type\()_rv40_qpel8_v_lp_packed_s6_neon
+        add             sp,  sp,  #14*8
+        vpop            {d8-d15}
+        pop             {r4, pc}
+endfunc
+
+function ff_\type\()_rv40_qpel16_mc10_neon, export=1
+        vmov.i8         d0,  #52
+        vmov.i8         d1,  #20
+.L\type\()_rv40_qpel16_h:
+        push            {r1, lr}
+        sub             r1,  r1,  #2
+        mov             r3,  #16
+        bl              \type\()_rv40_qpel8_h_lowpass_neon
+        pop             {r1, lr}
+        sub             r0,  r0,  r2,  lsl #4
+        add             r0,  r0,  #8
+        add             r1,  r1,  #6
+        mov             r3,  #16
+        b               \type\()_rv40_qpel8_h_lowpass_neon
+endfunc
+
+function ff_\type\()_rv40_qpel16_mc30_neon, export=1
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #52
+        b               .L\type\()_rv40_qpel16_h
+endfunc
+
+function ff_\type\()_rv40_qpel16_mc01_neon, export=1
+        vmov.i8         d0,  #52
+        vmov.i8         d1,  #20
+.L\type\()_rv40_qpel16_v:
+        sub             r1,  r1,  r2,  lsl #1
+        push            {r1, lr}
+        vpush           {d8-d15}
+        bl              \type\()_rv40_qpel8_v_lowpass_neon
+        sub             r1,  r1,  r2,  lsl #2
+        bl              \type\()_rv40_qpel8_v_lowpass_neon
+        ldr             r1,  [sp, #64]
+        sub             r0,  r0,  r2,  lsl #4
+        add             r0,  r0,  #8
+        add             r1,  r1,  #8
+        bl              \type\()_rv40_qpel8_v_lowpass_neon
+        sub             r1,  r1,  r2,  lsl #2
+        bl              \type\()_rv40_qpel8_v_lowpass_neon
+        vpop            {d8-d15}
+        pop             {r1, pc}
+endfunc
+
+function ff_\type\()_rv40_qpel16_mc11_neon, export=1
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        push            {r1, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #44*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        mov             r3,  #20
+        vmov.i8         d0,  #52
+        vmov.i8         d1,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        ldr             r1,  [sp, #416]
+        add             r1,  r1,  #8
+        mov             r3,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+.L\type\()_rv40_qpel16_v_s6:
+        add             r1,  sp,  #7
+        bic             r1,  r1,  #7
+        bl              \type\()_rv40_qpel8_v_lp_packed_s6_neon
+        sub             r1,  r1,  #40
+        bl              \type\()_rv40_qpel8_v_lp_packed_s6_neon
+        sub             r0,  r0,  r2,  lsl #4
+        add             r0,  r0,  #8
+        bl              \type\()_rv40_qpel8_v_lp_packed_s6_neon
+        sub             r1,  r1,  #40
+        bl              \type\()_rv40_qpel8_v_lp_packed_s6_neon
+        add             sp,  sp,  #44*8
+        vpop            {d8-d15}
+        pop             {r1, pc}
+endfunc
+
+function ff_\type\()_rv40_qpel16_mc21_neon, export=1
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        push            {r1, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #44*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        mov             r3,  #20
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s5_neon
+        ldr             r1,  [sp, #416]
+        add             r1,  r1,  #8
+        mov             r3,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s5_neon
+        vmov.i8         d0,  #52
+        b               .L\type\()_rv40_qpel16_v_s6
+endfunc
+
+function ff_\type\()_rv40_qpel16_mc31_neon, export=1
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        push            {r1, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #44*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        mov             r3,  #20
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #52
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        ldr             r1,  [sp, #416]
+        add             r1,  r1,  #8
+        mov             r3,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        vswp            d0,  d1
+        b               .L\type\()_rv40_qpel16_v_s6
+endfunc
+
+function ff_\type\()_rv40_qpel16_mc12_neon, export=1
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        push            {r1, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #44*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        mov             r3,  #20
+        vmov.i8         d0,  #52
+        vmov.i8         d1,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        ldr             r1,  [sp, #416]
+        add             r1,  r1,  #8
+        mov             r3,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        vmov.i8         d0,  #20
+.L\type\()_rv40_qpel16_v_s5:
+        add             r1,  sp,  #7
+        bic             r1,  r1,  #7
+        bl              \type\()_rv40_qpel8_v_lp_packed_s5_neon
+        sub             r1,  r1,  #40
+        bl              \type\()_rv40_qpel8_v_lp_packed_s5_neon
+        sub             r0,  r0,  r2,  lsl #4
+        add             r0,  r0,  #8
+        bl              \type\()_rv40_qpel8_v_lp_packed_s5_neon
+        sub             r1,  r1,  #40
+        bl              \type\()_rv40_qpel8_v_lp_packed_s5_neon
+        add             sp,  sp,  #44*8
+        vpop            {d8-d15}
+        pop             {r1, pc}
+endfunc
+
+function ff_\type\()_rv40_qpel16_mc22_neon, export=1
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        push            {r1, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #44*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        mov             r3,  #20
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s5_neon
+        ldr             r1,  [sp, #416]
+        add             r1,  r1,  #8
+        mov             r3,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s5_neon
+        b               .L\type\()_rv40_qpel16_v_s5
+endfunc
+
+function ff_\type\()_rv40_qpel16_mc32_neon, export=1
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        push            {r1, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #44*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        mov             r3,  #20
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #52
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        ldr             r1,  [sp, #416]
+        add             r1,  r1,  #8
+        mov             r3,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        vmov.i8         d1,  #20
+        b               .L\type\()_rv40_qpel16_v_s5
+endfunc
+
+function ff_\type\()_rv40_qpel16_mc03_neon, export=1
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #52
+        b               .L\type\()_rv40_qpel16_v
+endfunc
+
+function ff_\type\()_rv40_qpel16_mc13_neon, export=1
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        push            {r1, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #44*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        mov             r3,  #20
+        vmov.i8         d0,  #52
+        vmov.i8         d1,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        ldr             r1,  [sp, #416]
+        add             r1,  r1,  #8
+        mov             r3,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s6_neon
+        vswp            d0,  d1
+        b               .L\type\()_rv40_qpel16_v_s6
+endfunc
+
+function ff_\type\()_rv40_qpel16_mc23_neon, export=1
+        sub             r1,  r1,  r2,  lsl #1
+        sub             r1,  r1,  #2
+        push            {r1, lr}
+        vpush           {d8-d15}
+        sub             sp,  sp,  #44*8
+        add             r12, sp,  #7
+        bic             r12, r12, #7
+        mov             r3,  #20
+        vmov.i8         d0,  #20
+        vmov.i8         d1,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s5_neon
+        ldr             r1,  [sp, #416]
+        add             r1,  r1,  #8
+        mov             r3,  #20
+        bl              put_rv40_qpel8_h_lp_packed_s5_neon
+        vmov.i8         d1,  #52
+        b               .L\type\()_rv40_qpel16_v_s6
+endfunc
+
+function ff_\type\()_rv40_qpel16_mc33_neon, export=1
+        mov             r3,  #16
+        b               ff_\type\()_pixels16_xy2_neon
+endfunc
+.endm
+
+        rv40_qpel       put
+        rv40_qpel       avg
 
 .macro  rv40_weight
         vmovl.u8        q8,  d2



More information about the ffmpeg-cvslog mailing list