[FFmpeg-cvslog] aarch64: NEON float to s16 audio conversion

Janne Grunau git at videolan.org
Tue Apr 22 23:46:35 CEST 2014


ffmpeg | branch: master | Janne Grunau <janne-libav at jannau.net> | Wed Apr 16 23:47:32 2014 +0200| [f4d5a2cc35fcdf06ec031fabe8b0710e995fe924] | committer: Janne Grunau

aarch64: NEON float to s16 audio conversion

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=f4d5a2cc35fcdf06ec031fabe8b0710e995fe924
---

 libavresample/aarch64/Makefile             |    4 +
 libavresample/aarch64/audio_convert_init.c |   49 ++++
 libavresample/aarch64/audio_convert_neon.S |  363 ++++++++++++++++++++++++++++
 libavresample/audio_convert.c              |    2 +
 libavresample/audio_convert.h              |    1 +
 5 files changed, 419 insertions(+)

diff --git a/libavresample/aarch64/Makefile b/libavresample/aarch64/Makefile
index 13ba193..320ed67 100644
--- a/libavresample/aarch64/Makefile
+++ b/libavresample/aarch64/Makefile
@@ -1 +1,5 @@
+OBJS                             += aarch64/audio_convert_init.o
+
 OBJS-$(CONFIG_NEON_CLOBBER_TEST) += aarch64/neontest.o
+
+NEON-OBJS                        += aarch64/audio_convert_neon.o
diff --git a/libavresample/aarch64/audio_convert_init.c b/libavresample/aarch64/audio_convert_init.c
new file mode 100644
index 0000000..020dd61
--- /dev/null
+++ b/libavresample/aarch64/audio_convert_init.c
@@ -0,0 +1,49 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/aarch64/cpu.h"
+#include "libavutil/samplefmt.h"
+#include "libavresample/audio_convert.h"
+
+void ff_conv_flt_to_s16_neon(int16_t *dst, const float *src, int len);
+void ff_conv_fltp_to_s16_neon(int16_t *dst, float *const *src,
+                              int len, int channels);
+void ff_conv_fltp_to_s16_2ch_neon(int16_t *dst, float *const *src,
+                                  int len, int channels);
+
+av_cold void ff_audio_convert_init_aarch64(AudioConvert *ac)
+{
+    int cpu_flags = av_get_cpu_flags();
+
+    if (have_neon(cpu_flags)) {
+        ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT,
+                                  0, 16, 8, "NEON",
+                                  ff_conv_flt_to_s16_neon);
+        ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLTP,
+                                  2, 16, 8, "NEON",
+                                  ff_conv_fltp_to_s16_2ch_neon);
+        ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLTP,
+                                  0, 16, 8, "NEON",
+                                  ff_conv_fltp_to_s16_neon);
+    }
+}
diff --git a/libavresample/aarch64/audio_convert_neon.S b/libavresample/aarch64/audio_convert_neon.S
new file mode 100644
index 0000000..198cf86
--- /dev/null
+++ b/libavresample/aarch64/audio_convert_neon.S
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2008 Mans Rullgard <mans at mansr.com>
+ * Copyright (c) 2014 Janne Grunau <janne-libav at jannau.net>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libavutil/aarch64/asm.S"
+
+function ff_conv_flt_to_s16_neon, export=1
+        subs            x2,  x2,  #8
+        ld1             {v0.4s}, [x1],  #16
+        fcvtzs          v4.4s,  v0.4s,  #31
+        ld1             {v1.4s}, [x1],  #16
+        fcvtzs          v5.4s,  v1.4s,  #31
+        b.eq            3f
+        ands            x12, x2,  #~15
+        b.eq            2f
+1:      subs            x12, x12, #16
+        sqrshrn         v4.4h,  v4.4s,  #16
+        ld1             {v2.4s}, [x1],  #16
+        fcvtzs          v6.4s,  v2.4s,  #31
+        sqrshrn2        v4.8h,  v5.4s,  #16
+        ld1             {v3.4s}, [x1],  #16
+        fcvtzs          v7.4s,  v3.4s,  #31
+        sqrshrn         v6.4h,  v6.4s,  #16
+        st1             {v4.8h}, [x0],  #16
+        sqrshrn2        v6.8h,  v7.4s,  #16
+        ld1             {v0.4s}, [x1],  #16
+        fcvtzs          v4.4s,  v0.4s,  #31
+        ld1             {v1.4s}, [x1],  #16
+        fcvtzs          v5.4s,  v1.4s,  #31
+        st1             {v6.8h}, [x0],  #16
+        b.ne            1b
+        ands            x2,  x2,  #15
+        b.eq            3f
+2:      ld1             {v2.4s}, [x1],  #16
+        sqrshrn         v4.4h,  v4.4s,  #16
+        fcvtzs          v6.4s,  v2.4s,  #31
+        ld1             {v3.4s}, [x1],  #16
+        sqrshrn2        v4.8h,  v5.4s,  #16
+        fcvtzs          v7.4s,  v3.4s,  #31
+        sqrshrn         v6.4h,  v6.4s,  #16
+        st1             {v4.8h}, [x0],  #16
+        sqrshrn2        v6.8h,  v7.4s,  #16
+        st1             {v6.8h}, [x0]
+        ret
+3:      sqrshrn         v4.4h,  v4.4s,  #16
+        sqrshrn2        v4.8h,  v5.4s,  #16
+        st1             {v4.8h}, [x0]
+        ret
+endfunc
+
+function ff_conv_fltp_to_s16_2ch_neon, export=1
+        ldp             x4,  x5,  [x1]
+        subs            x2,  x2,  #8
+        ld1             {v0.4s},  [x4], #16
+        fcvtzs          v4.4s,  v0.4s,  #31
+        ld1             {v1.4s},  [x4], #16
+        fcvtzs          v5.4s,  v1.4s,  #31
+        ld1             {v2.4s},  [x5], #16
+        fcvtzs          v6.4s,  v2.4s,  #31
+        ld1             {v3.4s},  [x5], #16
+        fcvtzs          v7.4s,  v3.4s,  #31
+        b.eq            3f
+        ands            x12, x2,  #~15
+        b.eq            2f
+1:      subs            x12, x12, #16
+        ld1             {v16.4s}, [x4], #16
+        fcvtzs          v20.4s, v16.4s, #31
+        sri             v6.4s,  v4.4s,  #16
+        ld1             {v17.4s}, [x4], #16
+        fcvtzs          v21.4s, v17.4s, #31
+        ld1             {v18.4s}, [x5], #16
+        fcvtzs          v22.4s, v18.4s, #31
+        ld1             {v19.4s}, [x5], #16
+        sri             v7.4s,  v5.4s,  #16
+        st1             {v6.4s},  [x0], #16
+        fcvtzs          v23.4s, v19.4s, #31
+        st1             {v7.4s},  [x0], #16
+        sri             v22.4s, v20.4s, #16
+        ld1             {v0.4s},  [x4], #16
+        sri             v23.4s, v21.4s, #16
+        st1             {v22.4s}, [x0], #16
+        fcvtzs          v4.4s,  v0.4s,  #31
+        ld1             {v1.4s},  [x4], #16
+        fcvtzs          v5.4s,  v1.4s,  #31
+        ld1             {v2.4s},  [x5], #16
+        fcvtzs          v6.4s,  v2.4s,  #31
+        ld1             {v3.4s},  [x5], #16
+        fcvtzs          v7.4s,  v3.4s,  #31
+        st1             {v23.4s}, [x0], #16
+        b.ne            1b
+        ands            x2,  x2,  #15
+        b.eq            3f
+2:      sri             v6.4s,  v4.4s,  #16
+        ld1             {v0.4s},  [x4], #16
+        fcvtzs          v0.4s,  v0.4s,  #31
+        ld1             {v1.4s},  [x4], #16
+        fcvtzs          v1.4s,  v1.4s,  #31
+        ld1             {v2.4s},  [x5], #16
+        fcvtzs          v2.4s,  v2.4s,  #31
+        sri             v7.4s,  v5.4s,  #16
+        ld1             {v3.4s},  [x5], #16
+        fcvtzs          v3.4s,  v3.4s,  #31
+        sri             v2.4s,  v0.4s,  #16
+        st1             {v6.4s,v7.4s},  [x0], #32
+        sri             v3.4s,  v1.4s,  #16
+        st1             {v2.4s,v3.4s},  [x0], #32
+        ret
+3:      sri             v6.4s,  v4.4s,  #16
+        sri             v7.4s,  v5.4s,  #16
+        st1             {v6.4s,v7.4s},  [x0]
+        ret
+endfunc
+
+function ff_conv_fltp_to_s16_neon, export=1
+        cmp             w3,  #2
+        b.eq            X(ff_conv_fltp_to_s16_2ch_neon)
+        b.gt            1f
+        ldr             x1,  [x1]
+        b               X(ff_conv_flt_to_s16_neon)
+1:
+        cmp             w3,  #4
+        lsl             x12, x3,  #1
+        b.lt            4f
+
+5:      // 4 channels
+        ldp             x4, x5, [x1], #16
+        ldp             x6, x7, [x1], #16
+        mov             w9,  w2
+        mov             x8,  x0
+        ld1             {v4.4s},        [x4], #16
+        fcvtzs          v4.4s,  v4.4s,  #31
+        ld1             {v5.4s},        [x5], #16
+        fcvtzs          v5.4s,  v5.4s,  #31
+        ld1             {v6.4s},        [x6], #16
+        fcvtzs          v6.4s, v6.4s, #31
+        ld1             {v7.4s},        [x7], #16
+        fcvtzs          v7.4s, v7.4s, #31
+6:
+        subs            w9,  w9,  #8
+        ld1             {v0.4s},        [x4], #16
+        fcvtzs          v0.4s,  v0.4s,  #31
+        sri             v5.4s,  v4.4s,  #16
+        ld1             {v1.4s},        [x5], #16
+        fcvtzs          v1.4s,  v1.4s,  #31
+        sri             v7.4s,  v6.4s,  #16
+        ld1             {v2.4s},        [x6], #16
+        fcvtzs          v2.4s,  v2.4s,  #31
+        zip1            v16.4s, v5.4s,  v7.4s
+        ld1             {v3.4s},        [x7], #16
+        fcvtzs          v3.4s,  v3.4s,  #31
+        zip2            v17.4s, v5.4s,  v7.4s
+        st1             {v16.d}[0],     [x8], x12
+        sri             v1.4s,  v0.4s,  #16
+        st1             {v16.d}[1],     [x8], x12
+        sri             v3.4s,  v2.4s,  #16
+        st1             {v17.d}[0],     [x8], x12
+        zip1            v18.4s, v1.4s,  v3.4s
+        st1             {v17.d}[1],     [x8], x12
+        zip2            v19.4s, v1.4s,  v3.4s
+        b.eq            7f
+        ld1             {v4.4s},        [x4], #16
+        fcvtzs          v4.4s,  v4.4s,  #31
+        st1             {v18.d}[0],     [x8], x12
+        ld1             {v5.4s},        [x5], #16
+        fcvtzs          v5.4s,  v5.4s,  #31
+        st1             {v18.d}[1],     [x8], x12
+        ld1             {v6.4s},    [x6], #16
+        fcvtzs          v6.4s, v6.4s, #31
+        st1             {v19.d}[0],     [x8], x12
+        ld1             {v7.4s},    [x7], #16
+        fcvtzs          v7.4s, v7.4s, #31
+        st1             {v19.d}[1],     [x8], x12
+        b               6b
+7:
+        st1             {v18.d}[0],     [x8], x12
+        st1             {v18.d}[1],     [x8], x12
+        st1             {v19.d}[0],     [x8], x12
+        st1             {v19.d}[1],     [x8], x12
+        subs            w3,  w3,  #4
+        b.eq            end
+        cmp             w3,  #4
+        add             x0,  x0,  #8
+        b.ge            5b
+
+4:      // 2 channels
+        cmp             w3,  #2
+        b.lt            4f
+        ldp             x4,  x5,  [x1], #16
+        mov             w9,  w2
+        mov             x8,  x0
+        tst             w9,  #8
+        ld1             {v4.4s},        [x4], #16
+        fcvtzs          v4.4s,  v4.4s,  #31
+        ld1             {v5.4s},        [x5], #16
+        fcvtzs          v5.4s,  v5.4s,  #31
+        ld1             {v6.4s},        [x4], #16
+        fcvtzs          v6.4s,  v6.4s,  #31
+        ld1             {v7.4s},        [x5], #16
+        fcvtzs          v7.4s,  v7.4s,  #31
+        b.eq            6f
+        subs            w9,  w9,  #8
+        b.eq            7f
+        sri             v5.4s,  v4.4s,  #16
+        ld1             {v4.4s},        [x4], #16
+        fcvtzs          v4.4s,  v4.4s,  #31
+        st1             {v5.s}[0],      [x8], x12
+        sri             v7.4s,  v6.4s,  #16
+        st1             {v5.s}[1],      [x8], x12
+        ld1             {v6.4s},        [x4], #16
+        fcvtzs          v6.4s,  v6.4s, #31
+        st1             {v5.s}[2],      [x8], x12
+        st1             {v5.s}[3],      [x8], x12
+        st1             {v7.s}[0],      [x8], x12
+        st1             {v7.s}[1],      [x8], x12
+        ld1             {v5.4s},        [x5], #16
+        fcvtzs          v5.4s,  v5.4s,  #31
+        st1             {v7.s}[2],      [x8], x12
+        st1             {v7.s}[3],      [x8], x12
+        ld1             {v7.4s},        [x5], #16
+        fcvtzs          v7.4s,  v7.4s,  #31
+6:
+        subs            w9,  w9,  #16
+        ld1             {v0.4s},        [x4], #16
+        sri             v5.4s,  v4.4s,  #16
+        fcvtzs          v0.4s,  v0.4s,  #31
+        ld1             {v1.4s},        [x5], #16
+        sri             v7.4s,  v6.4s,  #16
+        st1             {v5.s}[0],      [x8], x12
+        st1             {v5.s}[1],      [x8], x12
+        fcvtzs          v1.4s,  v1.4s,  #31
+        st1             {v5.s}[2],      [x8], x12
+        st1             {v5.s}[3],      [x8], x12
+        ld1             {v2.4s},        [x4], #16
+        st1             {v7.s}[0],      [x8], x12
+        fcvtzs          v2.4s,  v2.4s,  #31
+        st1             {v7.s}[1],      [x8], x12
+        ld1             {v3.4s},        [x5], #16
+        st1             {v7.s}[2],      [x8], x12
+        fcvtzs          v3.4s,  v3.4s,  #31
+        st1             {v7.s}[3],      [x8], x12
+        sri             v1.4s,  v0.4s,  #16
+        sri             v3.4s,  v2.4s,  #16
+        b.eq            6f
+        ld1             {v4.4s},        [x4], #16
+        st1             {v1.s}[0],      [x8], x12
+        fcvtzs          v4.4s,  v4.4s,  #31
+        st1             {v1.s}[1],      [x8], x12
+        ld1             {v5.4s},        [x5], #16
+        st1             {v1.s}[2],      [x8], x12
+        fcvtzs          v5.4s,  v5.4s,  #31
+        st1             {v1.s}[3],      [x8], x12
+        ld1             {v6.4s},        [x4], #16
+        st1             {v3.s}[0],      [x8], x12
+        fcvtzs          v6.4s,  v6.4s,  #31
+        st1             {v3.s}[1],      [x8], x12
+        ld1             {v7.4s},        [x5], #16
+        st1             {v3.s}[2],      [x8], x12
+        fcvtzs          v7.4s,  v7.4s,  #31
+        st1             {v3.s}[3],      [x8], x12
+        b.gt            6b
+6:
+        st1             {v1.s}[0],      [x8], x12
+        st1             {v1.s}[1],      [x8], x12
+        st1             {v1.s}[2],      [x8], x12
+        st1             {v1.s}[3],      [x8], x12
+        st1             {v3.s}[0],      [x8], x12
+        st1             {v3.s}[1],      [x8], x12
+        st1             {v3.s}[2],      [x8], x12
+        st1             {v3.s}[3],      [x8], x12
+        b               8f
+7:
+        sri             v5.4s,  v4.4s,  #16
+        sri             v7.4s,  v6.4s,  #16
+        st1             {v5.s}[0],      [x8], x12
+        st1             {v5.s}[1],      [x8], x12
+        st1             {v5.s}[2],      [x8], x12
+        st1             {v5.s}[3],      [x8], x12
+        st1             {v7.s}[0],      [x8], x12
+        st1             {v7.s}[1],      [x8], x12
+        st1             {v7.s}[2],      [x8], x12
+        st1             {v7.s}[3],      [x8], x12
+8:
+        subs            w3,  w3,  #2
+        add             x0,  x0,  #4
+        b.eq            end
+
+4:      // 1 channel
+        ldr             x4,  [x1]
+        tst             w2,  #8
+        mov             w9,  w2
+        mov             x5,  x0
+        ld1             {v0.4s},        [x4], #16
+        fcvtzs          v0.4s,  v0.4s,  #31
+        ld1             {v1.4s},        [x4], #16
+        fcvtzs          v1.4s,  v1.4s,  #31
+        b.ne            8f
+6:
+        subs            w9,  w9,  #16
+        ld1             {v2.4s},        [x4], #16
+        fcvtzs          v2.4s,  v2.4s,  #31
+        ld1             {v3.4s},        [x4], #16
+        fcvtzs          v3.4s,  v3.4s,  #31
+        st1             {v0.h}[1],      [x5], x12
+        st1             {v0.h}[3],      [x5], x12
+        st1             {v0.h}[5],      [x5], x12
+        st1             {v0.h}[7],      [x5], x12
+        st1             {v1.h}[1],      [x5], x12
+        st1             {v1.h}[3],      [x5], x12
+        st1             {v1.h}[5],      [x5], x12
+        st1             {v1.h}[7],      [x5], x12
+        b.eq            7f
+        ld1             {v0.4s},        [x4], #16
+        fcvtzs          v0.4s,  v0.4s,  #31
+        ld1             {v1.4s},        [x4], #16
+        fcvtzs          v1.4s,  v1.4s,  #31
+7:
+        st1             {v2.h}[1],      [x5], x12
+        st1             {v2.h}[3],      [x5], x12
+        st1             {v2.h}[5],      [x5], x12
+        st1             {v2.h}[7],      [x5], x12
+        st1             {v3.h}[1],      [x5], x12
+        st1             {v3.h}[3],      [x5], x12
+        st1             {v3.h}[5],      [x5], x12
+        st1             {v3.h}[7],      [x5], x12
+        b.gt            6b
+        ret
+8:
+        subs            w9,  w9,  #8
+        st1             {v0.h}[1],      [x5], x12
+        st1             {v0.h}[3],      [x5], x12
+        st1             {v0.h}[5],      [x5], x12
+        st1             {v0.h}[7],      [x5], x12
+        st1             {v1.h}[1],      [x5], x12
+        st1             {v1.h}[3],      [x5], x12
+        st1             {v1.h}[5],      [x5], x12
+        st1             {v1.h}[7],      [x5], x12
+        b.eq            end
+        ld1             {v0.4s},        [x4], #16
+        fcvtzs          v0.4s,  v0.4s,  #31
+        ld1             {v1.4s},        [x4], #16
+        fcvtzs          v1.4s,  v1.4s,  #31
+        b               6b
+end:
+        ret
+endfunc
diff --git a/libavresample/audio_convert.c b/libavresample/audio_convert.c
index 371617c..27add23 100644
--- a/libavresample/audio_convert.c
+++ b/libavresample/audio_convert.c
@@ -301,6 +301,8 @@ AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr,
 
     set_generic_function(ac);
 
+    if (ARCH_AARCH64)
+        ff_audio_convert_init_aarch64(ac);
     if (ARCH_ARM)
         ff_audio_convert_init_arm(ac);
     if (ARCH_X86)
diff --git a/libavresample/audio_convert.h b/libavresample/audio_convert.h
index 6a3089d..d8e42d0 100644
--- a/libavresample/audio_convert.h
+++ b/libavresample/audio_convert.h
@@ -96,6 +96,7 @@ int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in);
 
 /* arch-specific initialization functions */
 
+void ff_audio_convert_init_aarch64(AudioConvert *ac);
 void ff_audio_convert_init_arm(AudioConvert *ac);
 void ff_audio_convert_init_x86(AudioConvert *ac);
 



More information about the ffmpeg-cvslog mailing list