[FFmpeg-cvslog] ppc: Fix the bug of fft for little endian environment on POWER7 and later

Rong Yan git at videolan.org
Thu Jun 19 01:14:11 CEST 2014


ffmpeg | branch: master | Rong Yan <rongyan236 at gmail.com> | Wed Jun 18 04:51:35 2014 -0400| [ab12373956e92b865bced9b05b12971f062cfd3e] | committer: Michael Niedermayer

ppc: Fix the bug of fft for little endian environment on POWER7 and later

Signed-off-by: Michael Niedermayer <michaelni at gmx.at>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=ab12373956e92b865bced9b05b12971f062cfd3e
---

 configure                    |   15 +-
 libavcodec/ppc/Makefile      |    1 +
 libavcodec/ppc/fft_altivec.c |   12 +
 libavcodec/ppc/fft_vsx.c     |  227 ++++++++++++
 libavcodec/ppc/fft_vsx.h     |  830 ++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 1084 insertions(+), 1 deletion(-)

diff --git a/configure b/configure
index d1eb8da..75740f9 100755
--- a/configure
+++ b/configure
@@ -1548,6 +1548,7 @@ ARCH_EXT_LIST_PPC="
     dcbzl
     ldbrx
     ppc4xx
+    vsx
 "
 
 ARCH_EXT_LIST_X86="
@@ -1933,6 +1934,7 @@ mipsdspr2_deps="mips"
 
 altivec_deps="ppc"
 ppc4xx_deps="ppc"
+vsx_deps="ppc"
 
 cpunop_deps="i686"
 x86_64_select="i686"
@@ -3691,6 +3693,7 @@ elif enabled mips; then
 elif enabled ppc; then
 
     disable ldbrx
+    disable vsx
 
     case $(tolower $cpu) in
         601|ppc601|powerpc601)
@@ -3718,7 +3721,7 @@ elif enabled ppc; then
         g5|970|ppc970|powerpc970)
             cpuflags="-mcpu=970"
         ;;
-        power[3-7]*)
+        power[3-8]*)
             cpuflags="-mcpu=$cpu"
         ;;
         cell)
@@ -4286,6 +4289,12 @@ unsigned int endian = 'B' << 24 | 'I' << 16 | 'G' << 8 | 'E';
 EOF
 od -t x1 $TMPO | grep -q '42 *49 *47 *45' && enable bigendian
 
+if  [ "$cpu" = "power7" ] || [ "$cpu" = "power8" ] ;then
+    if ! enabled bigendian ;then
+        enable vsx
+    fi
+fi
+
 
 if enabled asm; then
     enabled     arm         && nogas=die
@@ -4415,6 +4424,9 @@ EOF
         enabled altivec || warn "Altivec disabled, possibly missing --cpu flag"
     fi
 
+    if enabled vsx; then
+        check_cflags -mvsx
+    fi
 elif enabled x86; then
 
     check_builtin rdtsc    intrin.h   "__rdtsc()"
@@ -5256,6 +5268,7 @@ fi
 if enabled ppc; then
     echo "AltiVec enabled           ${altivec-no}"
     echo "PPC 4xx optimizations     ${ppc4xx-no}"
+    echo "PPC VSX optimizations     ${vsx-no}"
     echo "dcbzl available           ${dcbzl-no}"
 fi
 echo "debug symbols             ${debug-no}"
diff --git a/libavcodec/ppc/Makefile b/libavcodec/ppc/Makefile
index b04aa0e..bec7845 100644
--- a/libavcodec/ppc/Makefile
+++ b/libavcodec/ppc/Makefile
@@ -26,4 +26,5 @@ ALTIVEC-OBJS-$(CONFIG_DSPUTIL)         += ppc/dsputil_altivec.o         \
                                           ppc/int_altivec.o             \
 
 FFT-OBJS-$(HAVE_GNU_AS)                += ppc/fft_altivec_s.o
+FFT-OBJS-$(HAVE_VSX)                   += ppc/fft_vsx.o
 ALTIVEC-OBJS-$(CONFIG_FFT)             += $(FFT-OBJS-yes)
diff --git a/libavcodec/ppc/fft_altivec.c b/libavcodec/ppc/fft_altivec.c
index 8d7fd46..675fa33 100644
--- a/libavcodec/ppc/fft_altivec.c
+++ b/libavcodec/ppc/fft_altivec.c
@@ -36,8 +36,12 @@
  * It also assumes all FFTComplex are 8 bytes-aligned pairs of floats.
  */
 
+#if HAVE_VSX
+#include "fft_vsx.h"
+#else
 void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_interleave_altivec(FFTContext *s, FFTComplex *z);
+#endif
 
 #if HAVE_GNU_AS && HAVE_ALTIVEC
 static void imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
@@ -94,7 +98,11 @@ static void imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample
         k--;
     } while(k >= 0);
 
+#if HAVE_VSX
+    ff_fft_calc_vsx(s, (FFTComplex*)output);
+#else
     ff_fft_calc_altivec(s, (FFTComplex*)output);
+#endif
 
     /* post rotation + reordering */
     j = -n32;
@@ -147,7 +155,11 @@ av_cold void ff_fft_init_ppc(FFTContext *s)
     if (!PPC_ALTIVEC(av_get_cpu_flags()))
         return;
 
+#if HAVE_VSX
+    s->fft_calc = ff_fft_calc_interleave_vsx;
+#else
     s->fft_calc   = ff_fft_calc_interleave_altivec;
+#endif
     if (s->mdct_bits >= 5) {
         s->imdct_calc = imdct_calc_altivec;
         s->imdct_half = imdct_half_altivec;
diff --git a/libavcodec/ppc/fft_vsx.c b/libavcodec/ppc/fft_vsx.c
new file mode 100644
index 0000000..e92975f
--- /dev/null
+++ b/libavcodec/ppc/fft_vsx.c
@@ -0,0 +1,227 @@
+/*
+ * FFT  transform, optimized with VSX built-in functions
+ * Copyright (c) 2014 Rong Yan
+ *
+ * This algorithm (though not any of the implementation details) is
+ * based on libdjbfft by D. J. Bernstein.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#include "config.h"
+#include "libavutil/cpu.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
+#include "libavcodec/fft.h"
+#include "libavcodec/fft-internal.h"
+#include "fft_vsx.h"
+
+#if HAVE_VSX
+
+static void fft32_vsx_interleave(FFTComplex *z)
+{
+    fft16_vsx_interleave(z);
+    fft8_vsx_interleave(z+16);
+    fft8_vsx_interleave(z+24);
+    pass_vsx_interleave(z,ff_cos_32,4);
+}
+
+static void fft64_vsx_interleave(FFTComplex *z)
+{
+    fft32_vsx_interleave(z);
+    fft16_vsx_interleave(z+32);
+    fft16_vsx_interleave(z+48);
+    pass_vsx_interleave(z,ff_cos_64, 8);
+}
+static void fft128_vsx_interleave(FFTComplex *z)
+{
+    fft64_vsx_interleave(z);
+    fft32_vsx_interleave(z+64);
+    fft32_vsx_interleave(z+96);
+    pass_vsx_interleave(z,ff_cos_128,16);
+}
+static void fft256_vsx_interleave(FFTComplex *z)
+{
+    fft128_vsx_interleave(z);
+    fft64_vsx_interleave(z+128);
+    fft64_vsx_interleave(z+192);
+    pass_vsx_interleave(z,ff_cos_256,32);
+}
+static void fft512_vsx_interleave(FFTComplex *z)
+{
+    fft256_vsx_interleave(z);
+    fft128_vsx_interleave(z+256);
+    fft128_vsx_interleave(z+384);
+    pass_vsx_interleave(z,ff_cos_512,64);
+}
+static void fft1024_vsx_interleave(FFTComplex *z)
+{
+    fft512_vsx_interleave(z);
+    fft256_vsx_interleave(z+512);
+    fft256_vsx_interleave(z+768);
+    pass_vsx_interleave(z,ff_cos_1024,128);
+
+}
+static void fft2048_vsx_interleave(FFTComplex *z)
+{
+    fft1024_vsx_interleave(z);
+    fft512_vsx_interleave(z+1024);
+    fft512_vsx_interleave(z+1536);
+    pass_vsx_interleave(z,ff_cos_2048,256);
+}
+static void fft4096_vsx_interleave(FFTComplex *z)
+{
+    fft2048_vsx_interleave(z);
+    fft1024_vsx_interleave(z+2048);
+    fft1024_vsx_interleave(z+3072);
+    pass_vsx_interleave(z,ff_cos_4096, 512);
+}
+static void fft8192_vsx_interleave(FFTComplex *z)
+{
+    fft4096_vsx_interleave(z);
+    fft2048_vsx_interleave(z+4096);
+    fft2048_vsx_interleave(z+6144);
+    pass_vsx_interleave(z,ff_cos_8192,1024);
+}
+static void fft16384_vsx_interleave(FFTComplex *z)
+{
+    fft8192_vsx_interleave(z);
+    fft4096_vsx_interleave(z+8192);
+    fft4096_vsx_interleave(z+12288);
+    pass_vsx_interleave(z,ff_cos_16384,2048);
+}
+static void fft32768_vsx_interleave(FFTComplex *z)
+{
+    fft16384_vsx_interleave(z);
+    fft8192_vsx_interleave(z+16384);
+    fft8192_vsx_interleave(z+24576);
+    pass_vsx_interleave(z,ff_cos_32768,4096);
+}
+static void fft65536_vsx_interleave(FFTComplex *z)
+{
+    fft32768_vsx_interleave(z);
+    fft16384_vsx_interleave(z+32768);
+    fft16384_vsx_interleave(z+49152);
+    pass_vsx_interleave(z,ff_cos_65536,8192);
+}
+
+static void fft32_vsx(FFTComplex *z)
+{
+    fft16_vsx(z);
+    fft8_vsx(z+16);
+    fft8_vsx(z+24);
+    pass_vsx(z,ff_cos_32,4);
+}
+
+static void fft64_vsx(FFTComplex *z)
+{
+    fft32_vsx(z);
+    fft16_vsx(z+32);
+    fft16_vsx(z+48);
+    pass_vsx(z,ff_cos_64, 8);
+}
+static void fft128_vsx(FFTComplex *z)
+{
+    fft64_vsx(z);
+    fft32_vsx(z+64);
+    fft32_vsx(z+96);
+    pass_vsx(z,ff_cos_128,16);
+}
+static void fft256_vsx(FFTComplex *z)
+{
+    fft128_vsx(z);
+    fft64_vsx(z+128);
+    fft64_vsx(z+192);
+    pass_vsx(z,ff_cos_256,32);
+}
+static void fft512_vsx(FFTComplex *z)
+{
+    fft256_vsx(z);
+    fft128_vsx(z+256);
+    fft128_vsx(z+384);
+    pass_vsx(z,ff_cos_512,64);
+}
+static void fft1024_vsx(FFTComplex *z)
+{
+    fft512_vsx(z);
+    fft256_vsx(z+512);
+    fft256_vsx(z+768);
+    pass_vsx(z,ff_cos_1024,128);
+
+}
+static void fft2048_vsx(FFTComplex *z)
+{
+    fft1024_vsx(z);
+    fft512_vsx(z+1024);
+    fft512_vsx(z+1536);
+    pass_vsx(z,ff_cos_2048,256);
+}
+static void fft4096_vsx(FFTComplex *z)
+{
+    fft2048_vsx(z);
+    fft1024_vsx(z+2048);
+    fft1024_vsx(z+3072);
+    pass_vsx(z,ff_cos_4096, 512);
+}
+static void fft8192_vsx(FFTComplex *z)
+{
+    fft4096_vsx(z);
+    fft2048_vsx(z+4096);
+    fft2048_vsx(z+6144);
+    pass_vsx(z,ff_cos_8192,1024);
+}
+static void fft16384_vsx(FFTComplex *z)
+{
+    fft8192_vsx(z);
+    fft4096_vsx(z+8192);
+    fft4096_vsx(z+12288);
+    pass_vsx(z,ff_cos_16384,2048);
+}
+static void fft32768_vsx(FFTComplex *z)
+{
+    fft16384_vsx(z);
+    fft8192_vsx(z+16384);
+    fft8192_vsx(z+24576);
+    pass_vsx(z,ff_cos_32768,4096);
+}
+static void fft65536_vsx(FFTComplex *z)
+{
+    fft32768_vsx(z);
+    fft16384_vsx(z+32768);
+    fft16384_vsx(z+49152);
+    pass_vsx(z,ff_cos_65536,8192);
+}
+
+static void (* const fft_dispatch_vsx[])(FFTComplex*) = {
+    fft4_vsx, fft8_vsx, fft16_vsx, fft32_vsx, fft64_vsx, fft128_vsx, fft256_vsx, fft512_vsx, fft1024_vsx,
+    fft2048_vsx, fft4096_vsx, fft8192_vsx, fft16384_vsx, fft32768_vsx, fft65536_vsx,
+};
+static void (* const fft_dispatch_vsx_interleave[])(FFTComplex*) = {
+    fft4_vsx_interleave, fft8_vsx_interleave, fft16_vsx_interleave, fft32_vsx_interleave, fft64_vsx_interleave,
+    fft128_vsx_interleave, fft256_vsx_interleave, fft512_vsx_interleave, fft1024_vsx_interleave,
+    fft2048_vsx_interleave, fft4096_vsx_interleave, fft8192_vsx_interleave, fft16384_vsx_interleave, fft32768_vsx_interleave, fft65536_vsx_interleave,
+};
+void ff_fft_calc_interleave_vsx(FFTContext *s, FFTComplex *z)
+{
+     fft_dispatch_vsx_interleave[s->nbits-2](z);
+}
+void ff_fft_calc_vsx(FFTContext *s, FFTComplex *z)
+{
+     fft_dispatch_vsx[s->nbits-2](z);
+}
+#endif /* HAVE_VSX */
diff --git a/libavcodec/ppc/fft_vsx.h b/libavcodec/ppc/fft_vsx.h
new file mode 100644
index 0000000..a85475d
--- /dev/null
+++ b/libavcodec/ppc/fft_vsx.h
@@ -0,0 +1,830 @@
+#ifndef AVCODEC_PPC_FFT_VSX_H
+#define AVCODEC_PPC_FFT_VSX_H
+/*
+ * FFT  transform, optimized with VSX built-in functions
+ * Copyright (c) 2014 Rong Yan  Copyright (c) 2009 Loren Merritt
+ *
+ * This algorithm (though not any of the implementation details) is
+ * based on libdjbfft by D. J. Bernstein, and fft_altivec_s.S.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#include "config.h"
+#include "libavutil/cpu.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
+#include "libavcodec/fft.h"
+#include "libavcodec/fft-internal.h"
+
+#if HAVE_VSX
+
+void ff_fft_calc_interleave_vsx(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_vsx(FFTContext *s, FFTComplex *z);
+
+
+#define byte_2complex (2*sizeof(FFTComplex))
+#define byte_4complex (4*sizeof(FFTComplex))
+#define byte_6complex (6*sizeof(FFTComplex))
+#define byte_8complex (8*sizeof(FFTComplex))
+#define byte_10complex (10*sizeof(FFTComplex))
+#define byte_12complex (12*sizeof(FFTComplex))
+#define byte_14complex (14*sizeof(FFTComplex))
+
+inline static void pass_vsx_interleave(FFTComplex *z, const FFTSample *wre, unsigned int n)
+{
+    int o1 = n<<1;
+    int o2 = n<<2;
+    int o3 = o1+o2;
+    int i1, i2, i3;
+    FFTSample* out = (FFTSample*)z;
+    const FFTSample *wim = wre+o1;
+    vec_f vz0, vzo1, vzo2, vzo3;
+    vec_f x0, x1, x2, x3;
+    vec_f x4, x5, x6, x7;
+    vec_f x8, x9, x10, x11;
+    vec_f x12, x13, x14, x15;
+    vec_f x16, x17, x18, x19;
+    vec_f x20, x21, x22, x23;
+    vec_f vz0plus1, vzo1plus1, vzo2plus1, vzo3plus1;
+    vec_f y0, y1, y2, y3;
+    vec_f y4, y5, y8, y9;
+    vec_f y10, y13, y14, y15;
+    vec_f y16, y17, y18, y19;
+    vec_f y20, y21, y22, y23;
+    vec_f wr1, wi1, wr0, wi0;
+    vec_f wr2, wi2, wr3, wi3;
+    vec_f xmulwi0, xmulwi1, ymulwi2, ymulwi3;
+
+    n = n-2;
+    i1 = o1*sizeof(FFTComplex);
+    i2 = o2*sizeof(FFTComplex);
+    i3 = o3*sizeof(FFTComplex);
+    vzo2 = vec_ld(i2, &(out[0]));  // zo2.r  zo2.i  z(o2+1).r  z(o2+1).i
+    vzo2plus1 = vec_ld(i2+16, &(out[0]));
+    vzo3 = vec_ld(i3, &(out[0]));  // zo3.r  zo3.i  z(o3+1).r  z(o3+1).i
+    vzo3plus1 = vec_ld(i3+16, &(out[0]));
+    vz0 = vec_ld(0, &(out[0]));    // z0.r  z0.i  z1.r  z1.i
+    vz0plus1 = vec_ld(16, &(out[0]));
+    vzo1 = vec_ld(i1, &(out[0]));  // zo1.r  zo1.i  z(o1+1).r  z(o1+1).i
+    vzo1plus1 = vec_ld(i1+16, &(out[0]));
+
+    x0 = vec_add(vzo2, vzo3);
+    x1 = vec_sub(vzo2, vzo3);
+    y0 = vec_add(vzo2plus1, vzo3plus1);
+    y1 = vec_sub(vzo2plus1, vzo3plus1);
+
+    wr1 = vec_splats(wre[1]);
+    wi1 = vec_splats(wim[-1]);
+    wi2 = vec_splats(wim[-2]);
+    wi3 = vec_splats(wim[-3]);
+    wr2 = vec_splats(wre[2]);
+    wr3 = vec_splats(wre[3]);
+
+    x2 = vec_perm(x0, x1, vcprm(2,s2,3,s3));
+    x3 = vec_perm(x0, x1, vcprm(s3,3,s2,2));
+
+    y4 = vec_perm(y0, y1, vcprm(s1,1,s0,0));
+    y5 = vec_perm(y0, y1, vcprm(s3,3,s2,2));
+    y2 = vec_perm(y0, y1, vcprm(0,s0,1,s1));
+    y3 = vec_perm(y0, y1, vcprm(2,s2,3,s3));
+
+    ymulwi2 = vec_mul(y4, wi2);
+    ymulwi3 = vec_mul(y5, wi3);
+    x4 = vec_mul(x2, wr1);
+    x5 = vec_mul(x3, wi1);
+    y8 = vec_madd(y2, wr2, ymulwi2);
+    y9 = vec_msub(y2, wr2, ymulwi2);
+    x6 = vec_add(x4, x5);
+    x7 = vec_sub(x4, x5);
+    y13 = vec_madd(y3, wr3, ymulwi3);
+    y14 = vec_msub(y3, wr3, ymulwi3);
+
+    x8 = vec_perm(x6, x7, vcprm(0,1,s2,s3));
+    y10 = vec_perm(y8, y9, vcprm(0,1,s2,s3));
+    y15 = vec_perm(y13, y14, vcprm(0,1,s2,s3));
+
+    x9 = vec_perm(x0, x8, vcprm(0,1,s0,s2));
+    x10 = vec_perm(x1, x8, vcprm(1,0,s3,s1));
+
+    y16 = vec_perm(y10, y15, vcprm(0,2,s0,s2));
+    y17 = vec_perm(y10, y15, vcprm(3,1,s3,s1));
+
+    x11 = vec_add(vz0, x9);
+    x12 = vec_sub(vz0, x9);
+    x13 = vec_add(vzo1, x10);
+    x14 = vec_sub(vzo1, x10);
+
+    y18 = vec_add(vz0plus1, y16);
+    y19 = vec_sub(vz0plus1, y16);
+    y20 = vec_add(vzo1plus1, y17);
+    y21 = vec_sub(vzo1plus1, y17);
+
+    x15 = vec_perm(x13, x14, vcprm(0,s1,2,s3));
+    x16 = vec_perm(x13, x14, vcprm(s0,1,s2,3));
+    y22 = vec_perm(y20, y21, vcprm(0,s1,2,s3));
+    y23 = vec_perm(y20, y21, vcprm(s0,1,s2,3));
+
+
+    vec_st(x11, 0, &(out[0]));
+    vec_st(y18, 16, &(out[0]));
+    vec_st(x15, i1, &(out[0]));
+    vec_st(y22, i1+16, &(out[0]));
+    vec_st(x12, i2, &(out[0]));
+    vec_st(y19, i2+16, &(out[0]));
+    vec_st(x16, i3, &(out[0]));
+    vec_st(y23, i3+16, &(out[0]));
+
+    do {
+        out += 8;
+        wre += 4;
+        wim -= 4;
+        wr0 = vec_splats(wre[0]);
+        wr1 = vec_splats(wre[1]);
+        wi0 = vec_splats(wim[0]);
+        wi1 = vec_splats(wim[-1]);
+
+        wr2 = vec_splats(wre[2]);
+        wr3 = vec_splats(wre[3]);
+        wi2 = vec_splats(wim[-2]);
+        wi3 = vec_splats(wim[-3]);
+
+        vzo2 = vec_ld(i2, &(out[0]));  // zo2.r  zo2.i  z(o2+1).r  z(o2+1).i
+        vzo2plus1 = vec_ld(i2+16, &(out[0]));
+        vzo3 = vec_ld(i3, &(out[0]));  // zo3.r  zo3.i  z(o3+1).r  z(o3+1).i
+        vzo3plus1 = vec_ld(i3+16, &(out[0]));
+        vz0 = vec_ld(0, &(out[0]));    // z0.r  z0.i  z1.r  z1.i
+        vz0plus1 = vec_ld(16, &(out[0]));
+        vzo1 = vec_ld(i1, &(out[0])); // zo1.r  zo1.i  z(o1+1).r  z(o1+1).i
+        vzo1plus1 = vec_ld(i1+16, &(out[0]));
+
+        x0 = vec_add(vzo2, vzo3);
+        x1 = vec_sub(vzo2, vzo3);
+
+        y0 = vec_add(vzo2plus1, vzo3plus1);
+        y1 = vec_sub(vzo2plus1, vzo3plus1);
+
+        x4 = vec_perm(x0, x1, vcprm(s1,1,s0,0));
+        x5 = vec_perm(x0, x1, vcprm(s3,3,s2,2));
+        x2 = vec_perm(x0, x1, vcprm(0,s0,1,s1));
+        x3 = vec_perm(x0, x1, vcprm(2,s2,3,s3));
+
+        y2 = vec_perm(y0, y1, vcprm(0,s0,1,s1));
+        y3 = vec_perm(y0, y1, vcprm(2,s2,3,s3));
+        xmulwi0 = vec_mul(x4, wi0);
+        xmulwi1 = vec_mul(x5, wi1);
+
+        y4 = vec_perm(y0, y1, vcprm(s1,1,s0,0));
+        y5 = vec_perm(y0, y1, vcprm(s3,3,s2,2));
+
+        x8 = vec_madd(x2, wr0, xmulwi0);
+        x9 = vec_msub(x2, wr0, xmulwi0);
+        ymulwi2 = vec_mul(y4, wi2);
+        ymulwi3 = vec_mul(y5, wi3);
+
+        x13 = vec_madd(x3, wr1, xmulwi1);
+        x14 = vec_msub(x3, wr1, xmulwi1);
+
+        y8 = vec_madd(y2, wr2, ymulwi2);
+        y9 = vec_msub(y2, wr2, ymulwi2);
+        y13 = vec_madd(y3, wr3, ymulwi3);
+        y14 = vec_msub(y3, wr3, ymulwi3);
+
+        x10 = vec_perm(x8, x9, vcprm(0,1,s2,s3));
+        x15 = vec_perm(x13, x14, vcprm(0,1,s2,s3));
+
+        y10 = vec_perm(y8, y9, vcprm(0,1,s2,s3));
+        y15 = vec_perm(y13, y14, vcprm(0,1,s2,s3));
+
+        x16 = vec_perm(x10, x15, vcprm(0,2,s0,s2));
+        x17 = vec_perm(x10, x15, vcprm(3,1,s3,s1));
+
+        y16 = vec_perm(y10, y15, vcprm(0,2,s0,s2));
+        y17 = vec_perm(y10, y15, vcprm(3,1,s3,s1));
+
+        x18 = vec_add(vz0, x16);
+        x19 = vec_sub(vz0, x16);
+        x20 = vec_add(vzo1, x17);
+        x21 = vec_sub(vzo1, x17);
+
+        y18 = vec_add(vz0plus1, y16);
+        y19 = vec_sub(vz0plus1, y16);
+        y20 = vec_add(vzo1plus1, y17);
+        y21 = vec_sub(vzo1plus1, y17);
+
+        x22 = vec_perm(x20, x21, vcprm(0,s1,2,s3));
+        x23 = vec_perm(x20, x21, vcprm(s0,1,s2,3));
+
+        y22 = vec_perm(y20, y21, vcprm(0,s1,2,s3));
+        y23 = vec_perm(y20, y21, vcprm(s0,1,s2,3));
+
+        vec_st(x18, 0, &(out[0]));
+        vec_st(y18, 16, &(out[0]));
+        vec_st(x22, i1, &(out[0]));
+        vec_st(y22, i1+16, &(out[0]));
+        vec_st(x19, i2, &(out[0]));
+        vec_st(y19, i2+16, &(out[0]));
+        vec_st(x23, i3, &(out[0]));
+        vec_st(y23, i3+16, &(out[0]));
+    } while (n-=2);
+}
+
+inline static void fft2_vsx_interleave(FFTComplex *z)
+{
+    FFTSample r1, i1;
+
+    r1 = z[0].re - z[1].re;
+    z[0].re += z[1].re;
+    z[1].re = r1;
+
+    i1 = z[0].im - z[1].im;
+    z[0].im += z[1].im;
+    z[1].im = i1;
+ }
+
+inline static void fft4_vsx_interleave(FFTComplex *z)
+{
+    vec_f a, b, c, d;
+    float* out=  (float*)z;
+    a = vec_ld(0, &(out[0]));
+    b = vec_ld(byte_2complex, &(out[0]));
+
+    c = vec_perm(a, b, vcprm(0,1,s2,s1));
+    d = vec_perm(a, b, vcprm(2,3,s0,s3));
+    a = vec_add(c, d);
+    b = vec_sub(c, d);
+
+    c = vec_perm(a, b, vcprm(0,1,s0,s1));
+    d = vec_perm(a, b, vcprm(2,3,s3,s2));
+
+    a = vec_add(c, d);
+    b = vec_sub(c, d);
+    vec_st(a, 0, &(out[0]));
+    vec_st(b, byte_2complex, &(out[0]));
+}
+
+inline static void fft8_vsx_interleave(FFTComplex *z)
+{
+    vec_f vz0, vz1, vz2, vz3;
+    vec_f x0, x1, x2, x3;
+    vec_f x4, x5, x6, x7;
+    vec_f x8, x9, x10, x11;
+    vec_f x12, x13, x14, x15;
+    vec_f x16, x17, x18, x19;
+    vec_f x20, x21, x22, x23;
+    vec_f x24, x25, x26, x27;
+    vec_f x28, x29, x30, x31;
+    vec_f x32, x33, x34;
+
+    float* out=  (float*)z;
+    vec_f vc1 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+
+    vz0 = vec_ld(0, &(out[0]));
+    vz1 = vec_ld(byte_2complex, &(out[0]));
+    vz2 = vec_ld(byte_4complex, &(out[0]));
+    vz3 = vec_ld(byte_6complex, &(out[0]));
+
+    x0 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+    x1 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+    x2 = vec_perm(vz2, vz3, vcprm(2,1,s0,s1));
+    x3 = vec_perm(vz2, vz3, vcprm(0,3,s2,s3));
+
+    x4 = vec_add(x0, x1);
+    x5 = vec_sub(x0, x1);
+    x6 = vec_add(x2, x3);
+    x7 = vec_sub(x2, x3);
+
+    x8 = vec_perm(x4, x5, vcprm(0,1,s0,s1));
+    x9 = vec_perm(x4, x5, vcprm(2,3,s3,s2));
+    x10 = vec_perm(x6, x7, vcprm(2,1,s2,s1));
+    x11 = vec_perm(x6, x7, vcprm(0,3,s0,s3));
+
+    x12 = vec_add(x8, x9);
+    x13 = vec_sub(x8, x9);
+    x14 = vec_add(x10, x11);
+    x15 = vec_sub(x10, x11);
+    x16 = vec_perm(x12, x13, vcprm(0,s0,1,s1));
+    x17 = vec_perm(x14, x15, vcprm(0,s0,1,s1));
+    x18 = vec_perm(x16, x17, vcprm(s0,s3,s2,s1));
+    x19 = vec_add(x16, x18); // z0.r  z2.r  z0.i  z2.i
+    x20 = vec_sub(x16, x18); // z4.r  z6.r  z4.i  z6.i
+
+    x21 = vec_perm(x12, x13, vcprm(2,s2,3,s3));
+    x22 = vec_perm(x14, x15, vcprm(2,3,s2,s3));
+    x23 = vec_perm(x14, x15, vcprm(3,2,s3,s2));
+    x24 = vec_add(x22, x23);
+    x25 = vec_sub(x22, x23);
+    x26 = vec_mul( vec_perm(x24, x25, vcprm(2,s2,0,s0)), vc1);
+
+    x27 = vec_add(x21, x26); // z1.r  z7.r z1.i z3.i
+    x28 = vec_sub(x21, x26); //z5.r  z3.r z5.i z7.i
+
+    x29 = vec_perm(x19, x27, vcprm(0,2,s0,s2)); // z0.r  z0.i  z1.r  z1.i
+    x30 = vec_perm(x19, x27, vcprm(1,3,s1,s3)); // z2.r  z2.i  z7.r  z3.i
+    x31 = vec_perm(x20, x28, vcprm(0,2,s0,s2)); // z4.r  z4.i  z5.r  z5.i
+    x32 = vec_perm(x20, x28, vcprm(1,3,s1,s3)); // z6.r  z6.i  z3.r  z7.i
+    x33 = vec_perm(x30, x32, vcprm(0,1,s2,3));  // z2.r  z2.i  z3.r  z3.i
+    x34 = vec_perm(x30, x32, vcprm(s0,s1,2,s3)); // z6.r  z6.i  z7.r  z7.i
+
+    vec_st(x29, 0, &(out[0]));
+    vec_st(x33, byte_2complex, &(out[0]));
+    vec_st(x31, byte_4complex, &(out[0]));
+    vec_st(x34, byte_6complex, &(out[0]));
+}
+
+inline static void fft16_vsx_interleave(FFTComplex *z)
+{
+    float* out=  (float*)z;
+    vec_f vc0 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+    vec_f vc1 = {ff_cos_16[1], ff_cos_16[1], ff_cos_16[1], ff_cos_16[1]};
+    vec_f vc2 = {ff_cos_16[3], ff_cos_16[3], ff_cos_16[3], ff_cos_16[3]};
+    vec_f vz0, vz1, vz2, vz3;
+    vec_f vz4, vz5, vz6, vz7;
+    vec_f x0, x1, x2, x3;
+    vec_f x4, x5, x6, x7;
+    vec_f x8, x9, x10, x11;
+    vec_f x12, x13, x14, x15;
+    vec_f x16, x17, x18, x19;
+    vec_f x20, x21, x22, x23;
+    vec_f x24, x25, x26, x27;
+    vec_f x28, x29, x30, x31;
+    vec_f x32, x33, x34, x35;
+    vec_f x36, x37, x38, x39;
+    vec_f x40, x41, x42, x43;
+    vec_f x44, x45, x46, x47;
+    vec_f x48, x49, x50, x51;
+    vec_f x52, x53, x54, x55;
+    vec_f x56, x57, x58, x59;
+    vec_f x60, x61, x62, x63;
+    vec_f x64, x65, x66, x67;
+    vec_f x68, x69, x70, x71;
+    vec_f x72, x73, x74, x75;
+    vec_f x76, x77, x78, x79;
+    vec_f x80, x81, x82, x83;
+    vec_f x84, x85, x86;
+
+    vz0 = vec_ld(0, &(out[0]));
+    vz1 = vec_ld(byte_2complex, &(out[0]));
+    vz2 = vec_ld(byte_4complex, &(out[0]));
+    vz3 = vec_ld(byte_6complex, &(out[0]));
+    vz4 = vec_ld(byte_8complex, &(out[0]));
+    vz5 = vec_ld(byte_10complex, &(out[0]));
+    vz6 = vec_ld(byte_12complex, &(out[0]));
+    vz7 = vec_ld(byte_14complex, &(out[0]));
+
+    x0 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+    x1 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+    x2 = vec_perm(vz2, vz3, vcprm(0,1,s0,s1));
+    x3 = vec_perm(vz2, vz3, vcprm(2,3,s2,s3));
+
+    x4 = vec_perm(vz4, vz5, vcprm(0,1,s2,s1));
+    x5 = vec_perm(vz4, vz5, vcprm(2,3,s0,s3));
+    x6 = vec_perm(vz6, vz7, vcprm(0,1,s2,s1));
+    x7 = vec_perm(vz6, vz7, vcprm(2,3,s0,s3));
+
+    x8 = vec_add(x0, x1);
+    x9 = vec_sub(x0, x1);
+    x10 = vec_add(x2, x3);
+    x11 = vec_sub(x2, x3);
+
+    x12 = vec_add(x4, x5);
+    x13 = vec_sub(x4, x5);
+    x14 = vec_add(x6, x7);
+    x15 = vec_sub(x6, x7);
+
+    x16 = vec_perm(x8, x9, vcprm(0,1,s0,s1));
+    x17 = vec_perm(x8, x9, vcprm(2,3,s3,s2));
+    x18 = vec_perm(x10, x11, vcprm(2,1,s1,s2));
+    x19 = vec_perm(x10, x11, vcprm(0,3,s0,s3));
+    x20 = vec_perm(x12, x14, vcprm(0,1,s0, s1));
+    x21 = vec_perm(x12, x14, vcprm(2,3,s2,s3));
+    x22 = vec_perm(x13, x15, vcprm(0,1,s0,s1));
+    x23 = vec_perm(x13, x15, vcprm(3,2,s3,s2));
+
+    x24 = vec_add(x16, x17);
+    x25 = vec_sub(x16, x17);
+    x26 = vec_add(x18, x19);
+    x27 = vec_sub(x18, x19);
+    x28 = vec_add(x20, x21);
+    x29 = vec_sub(x20, x21);
+    x30 = vec_add(x22, x23);
+    x31 = vec_sub(x22, x23);
+
+    x32 = vec_add(x24, x26);
+    x33 = vec_sub(x24, x26);
+    x34 = vec_perm(x32, x33, vcprm(0,1,s0,s1));
+
+    x35 = vec_perm(x28, x29, vcprm(2,1,s1,s2));
+    x36 = vec_perm(x28, x29, vcprm(0,3,s0,s3));
+    x37 = vec_add(x35, x36);
+    x38 = vec_sub(x35, x36);
+    x39 = vec_perm(x37, x38, vcprm(0,1,s1,s0));
+
+    x40 = vec_perm(x27, x38, vcprm(3,2,s2,s3));
+    x41 = vec_perm(x26,  x37, vcprm(2,3,s3,s2));
+    x42 = vec_add(x40, x41);
+    x43 = vec_sub(x40, x41);
+    x44 = vec_mul(x42, vc0);
+    x45 = vec_mul(x43, vc0);
+
+    x46 = vec_add(x34, x39);  // z0.r  z0.i  z4.r  z4.i
+    x47 = vec_sub(x34, x39);  // z8.r  z8.i  z12.r  z12.i
+
+    x48 = vec_perm(x30, x31, vcprm(2,1,s1,s2));
+    x49 = vec_perm(x30, x31, vcprm(0,3,s3,s0));
+    x50 = vec_add(x48, x49);
+    x51 = vec_sub(x48, x49);
+    x52 = vec_mul(x50, vc1);
+    x53 = vec_mul(x50, vc2);
+    x54 = vec_mul(x51, vc1);
+    x55 = vec_mul(x51, vc2);
+
+    x56 = vec_perm(x24, x25, vcprm(2,3,s2,s3));
+    x57 = vec_perm(x44, x45, vcprm(0,1,s1,s0));
+    x58 = vec_add(x56, x57);
+    x59 = vec_sub(x56, x57);
+
+    x60 = vec_perm(x54, x55, vcprm(1,0,3,2));
+    x61 = vec_perm(x54, x55, vcprm(s1,s0,s3,s2));
+    x62 = vec_add(x52, x61);
+    x63 = vec_sub(x52, x61);
+    x64 = vec_add(x60, x53);
+    x65 = vec_sub(x60, x53);
+    x66 = vec_perm(x62, x64, vcprm(0,1,s3,s2));
+    x67 = vec_perm(x63, x65, vcprm(s0,s1,3,2));
+
+    x68 = vec_add(x58, x66); // z1.r    z1.i  z3.r    z3.i
+    x69 = vec_sub(x58, x66); // z9.r    z9.i  z11.r  z11.i
+    x70 = vec_add(x59, x67); // z5.r    z5.i  z15.r  z15.i
+    x71 = vec_sub(x59, x67); // z13.r  z13.i z7.r   z7.i
+
+    x72 = vec_perm(x25, x27, vcprm(s1,s0,s2,s3));
+    x73 = vec_add(x25, x72);
+    x74 = vec_sub(x25, x72);
+    x75 = vec_perm(x73, x74, vcprm(0,1,s0,s1));
+    x76 = vec_perm(x44, x45, vcprm(3,2,s2,s3));
+    x77 = vec_add(x75, x76); // z2.r   z2.i    z6.r    z6.i
+    x78 = vec_sub(x75, x76); // z10.r  z10.i  z14.r  z14.i
+
+    x79 = vec_perm(x46, x68, vcprm(0,1,s0,s1)); // z0.r  z0.i  z1.r  z1.i
+    x80 = vec_perm(x77, x68, vcprm(0,1,s2,s3)); // z2.r  z2.i  z3.r  z3.i
+    x81 = vec_perm(x46, x70, vcprm(2,3,s0,s1)); // z4.r  z4.i  z5.r  z5.i
+    x82 = vec_perm(x71, x77, vcprm(s2,s3,2,3)); // z6.r  z6.i  z7.r  z7.i
+    vec_st(x79, 0, &(out[0]));
+    vec_st(x80, byte_2complex, &(out[0]));
+    vec_st(x81, byte_4complex, &(out[0]));
+    vec_st(x82, byte_6complex, &(out[0]));
+    x83 = vec_perm(x47, x69, vcprm(0,1,s0,s1)); // z8.r  z8.i  z9.r  z9.i
+    x84 = vec_perm(x78, x69, vcprm(0,1,s2,s3)); // z10.r  z10.i  z11.r  z11.i
+    x85 = vec_perm(x47, x71, vcprm(2,3,s0,s1)); // z12.r  z12.i  z13.r  z13.i
+    x86 = vec_perm(x70, x78, vcprm(s2,s3,2,3)); // z14.r  z14.i  z15.r  z15.i
+    vec_st(x83, byte_8complex, &(out[0]));
+    vec_st(x84, byte_10complex, &(out[0]));
+    vec_st(x85, byte_12complex, &(out[0]));
+    vec_st(x86, byte_14complex, &(out[0]));
+}
+
+inline static void fft4_vsx(FFTComplex *z)
+{
+    vec_f a, b, c, d;
+    float* out=  (float*)z;
+    a = vec_ld(0, &(out[0]));
+    b = vec_ld(byte_2complex, &(out[0]));
+
+    c = vec_perm(a, b, vcprm(0,1,s2,s1));
+    d = vec_perm(a, b, vcprm(2,3,s0,s3));
+    a = vec_add(c, d);
+    b = vec_sub(c, d);
+
+    c = vec_perm(a,b, vcprm(0,s0,1,s1));
+    d = vec_perm(a, b, vcprm(2,s3,3,s2));
+
+    a = vec_add(c, d);
+    b = vec_sub(c, d);
+
+    c = vec_perm(a, b, vcprm(0,1,s0,s1));
+    d = vec_perm(a, b, vcprm(2,3,s2,s3));
+
+    vec_st(c, 0, &(out[0]));
+    vec_st(d, byte_2complex, &(out[0]));
+    return;
+}
+
+inline static void fft8_vsx(FFTComplex *z)
+{
+    vec_f vz0, vz1, vz2, vz3;
+    vec_f vz4, vz5, vz6, vz7, vz8;
+
+    float* out=  (float*)z;
+    vec_f vc0 = {0.0, 0.0, 0.0, 0.0};
+    vec_f vc1 = {-sqrthalf, sqrthalf, sqrthalf, -sqrthalf};
+    vec_f vc2 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+
+    vz0 = vec_ld(0, &(out[0]));
+    vz1 = vec_ld(byte_2complex, &(out[0]));
+    vz2 = vec_ld(byte_4complex, &(out[0]));
+    vz3 = vec_ld(byte_6complex, &(out[0]));
+
+    vz6 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
+    vz7 = vec_perm(vz2, vz3, vcprm(2,s2,3,s3));
+    vz4 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+    vz5 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+
+    vz2 = vec_add(vz6, vz7);
+    vz3 = vec_sub(vz6, vz7);
+    vz8 = vec_perm(vz3, vz3, vcprm(2,3,0,1));
+
+    vz0 = vec_add(vz4, vz5);
+    vz1 = vec_sub(vz4, vz5);
+
+    vz3 = vec_madd(vz3, vc1, vc0);
+    vz3 = vec_madd(vz8, vc2, vz3);
+
+    vz4 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
+    vz5 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
+    vz6 = vec_perm(vz2, vz3, vcprm(1,2,s3,s0));
+    vz7 = vec_perm(vz2, vz3, vcprm(0,3,s2,s1));
+
+    vz0 = vec_add(vz4, vz5);
+    vz1 = vec_sub(vz4, vz5);
+    vz2 = vec_add(vz6, vz7);
+    vz3 = vec_sub(vz6, vz7);
+
+    vz4 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
+    vz5 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
+    vz6 = vec_perm(vz2, vz3, vcprm(0,2,s1,s3));
+    vz7 = vec_perm(vz2, vz3, vcprm(1,3,s0,s2));
+
+
+    vz2 = vec_sub(vz4, vz6);
+    vz3 = vec_sub(vz5, vz7);
+
+    vz0 = vec_add(vz4, vz6);
+    vz1 = vec_add(vz5, vz7);
+
+    vec_st(vz0, 0, &(out[0]));
+    vec_st(vz1, byte_2complex, &(out[0]));
+    vec_st(vz2, byte_4complex, &(out[0]));
+    vec_st(vz3, byte_6complex, &(out[0]));
+    return;
+}
+
+inline static void fft16_vsx(FFTComplex *z)
+{
+    float* out=  (float*)z;
+    vec_f vc0 = {0.0, 0.0, 0.0, 0.0};
+    vec_f vc1 = {-sqrthalf, sqrthalf, sqrthalf, -sqrthalf};
+    vec_f vc2 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+    vec_f vc3 = {1.0, 0.92387953, sqrthalf, 0.38268343};
+    vec_f vc4 = {0.0, 0.38268343, sqrthalf, 0.92387953};
+    vec_f vc5 = {-0.0, -0.38268343, -sqrthalf, -0.92387953};
+
+    vec_f vz0, vz1, vz2, vz3;
+    vec_f vz4, vz5, vz6, vz7;
+    vec_f vz8, vz9, vz10, vz11;
+    vec_f vz12, vz13;
+
+    vz0 = vec_ld(byte_8complex, &(out[0]));
+    vz1 = vec_ld(byte_10complex, &(out[0]));
+    vz2 = vec_ld(byte_12complex, &(out[0]));
+    vz3 = vec_ld(byte_14complex, &(out[0]));
+
+    vz4 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+    vz5 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+    vz6 = vec_perm(vz2, vz3, vcprm(0,1,s2,s1));
+    vz7 = vec_perm(vz2, vz3, vcprm(2,3,s0,s3));
+
+    vz0 = vec_add(vz4, vz5);
+    vz1= vec_sub(vz4, vz5);
+    vz2 = vec_add(vz6, vz7);
+    vz3 = vec_sub(vz6, vz7);
+
+    vz4 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
+    vz5 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
+    vz6 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
+    vz7 = vec_perm(vz2, vz3, vcprm(2,s3,3,s2));
+
+    vz0 = vec_add(vz4, vz5);
+    vz1 = vec_sub(vz4, vz5);
+    vz2 = vec_add(vz6, vz7);
+    vz3 = vec_sub(vz6, vz7);
+
+    vz4 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
+    vz5 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
+
+    vz6 = vec_perm(vz2, vz3, vcprm(0,1,s0,s1));
+    vz7 = vec_perm(vz2, vz3, vcprm(2,3,s2,s3));
+
+    vz0 = vec_ld(0, &(out[0]));
+    vz1 = vec_ld(byte_2complex, &(out[0]));
+    vz2 = vec_ld(byte_4complex, &(out[0]));
+    vz3 = vec_ld(byte_6complex, &(out[0]));
+    vz10 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
+    vz11 = vec_perm(vz2, vz3, vcprm(2,s2,3,s3));
+    vz8 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+    vz9 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+
+    vz2 = vec_add(vz10, vz11);
+    vz3 = vec_sub(vz10, vz11);
+    vz12 = vec_perm(vz3, vz3, vcprm(2,3,0,1));
+    vz0 = vec_add(vz8, vz9);
+    vz1 = vec_sub(vz8, vz9);
+
+    vz3 = vec_madd(vz3, vc1, vc0);
+    vz3 = vec_madd(vz12, vc2, vz3);
+    vz8 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
+    vz9 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
+    vz10 = vec_perm(vz2, vz3, vcprm(1,2,s3,s0));
+    vz11 = vec_perm(vz2, vz3, vcprm(0,3,s2,s1));
+
+    vz0 = vec_add(vz8, vz9);
+    vz1 = vec_sub(vz8, vz9);
+    vz2 = vec_add(vz10, vz11);
+    vz3 = vec_sub(vz10, vz11);
+
+    vz8 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
+    vz9 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
+    vz10 = vec_perm(vz2, vz3, vcprm(0,2,s1,s3));
+    vz11 = vec_perm(vz2, vz3, vcprm(1,3,s0,s2));
+
+    vz2 = vec_sub(vz8, vz10);
+    vz3 = vec_sub(vz9, vz11);
+    vz0 = vec_add(vz8, vz10);
+    vz1 = vec_add(vz9, vz11);
+
+    vz8 = vec_madd(vz4, vc3, vc0);
+    vz9 = vec_madd(vz5, vc3, vc0);
+    vz10 = vec_madd(vz6, vc3, vc0);
+    vz11 = vec_madd(vz7, vc3, vc0);
+
+    vz8 = vec_madd(vz5, vc4, vz8);
+    vz9 = vec_madd(vz4, vc5, vz9);
+    vz10 = vec_madd(vz7, vc5, vz10);
+    vz11 = vec_madd(vz6, vc4, vz11);
+
+    vz12 = vec_sub(vz10, vz8);
+    vz10 = vec_add(vz10, vz8);
+
+    vz13 = vec_sub(vz9, vz11);
+    vz11 = vec_add(vz9, vz11);
+
+    vz4 = vec_sub(vz0, vz10);
+    vz0 = vec_add(vz0, vz10);
+
+    vz7= vec_sub(vz3, vz12);
+    vz3= vec_add(vz3, vz12);
+
+    vz5 = vec_sub(vz1, vz11);
+    vz1 = vec_add(vz1, vz11);
+
+    vz6 = vec_sub(vz2, vz13);
+    vz2 = vec_add(vz2, vz13);
+
+    vec_st(vz0, 0, &(out[0]));
+    vec_st(vz1, byte_2complex, &(out[0]));
+    vec_st(vz2, byte_4complex, &(out[0]));
+    vec_st(vz3, byte_6complex, &(out[0]));
+    vec_st(vz4, byte_8complex, &(out[0]));
+    vec_st(vz5, byte_10complex, &(out[0]));
+    vec_st(vz6, byte_12complex, &(out[0]));
+    vec_st(vz7, byte_14complex, &(out[0]));
+    return;
+
+}
+inline static void pass_vsx(FFTComplex * z, const FFTSample * wre, unsigned int n)
+{
+    int o1 = n<<1;
+    int o2 = n<<2;
+    int o3 = o1+o2;
+    int i1, i2, i3;
+    FFTSample* out = (FFTSample*)z;
+    const FFTSample *wim = wre+o1;
+    vec_f v0, v1, v2, v3;
+    vec_f v4, v5, v6, v7;
+    vec_f v8, v9, v10, v11;
+    vec_f v12, v13;
+
+    n = n-2;
+    i1 = o1*sizeof(FFTComplex);
+    i2 = o2*sizeof(FFTComplex);
+    i3 = o3*sizeof(FFTComplex);
+
+    v8 = vec_ld(0, &(wre[0]));
+    v10 = vec_ld(0, &(wim[0]));
+    v9 = vec_ld(0, &(wim[-4]));
+    v9 = vec_perm(v9, v10, vcprm(s0,3,2,1));
+
+    v4 = vec_ld(i2, &(out[0]));
+    v5 = vec_ld(i2+16, &(out[0]));
+    v6 = vec_ld(i3, &(out[0]));
+    v7 = vec_ld(i3+16, &(out[0]));
+    v10 = vec_mul(v4, v8); // r2*wre
+    v11 = vec_mul(v5, v8); // i2*wre
+    v12 = vec_mul(v6, v8); // r3*wre
+    v13 = vec_mul(v7, v8); // i3*wre
+
+    v0 = vec_ld(0, &(out[0])); // r0
+    v3 = vec_ld(i1+16, &(out[0])); // i1
+    v10 = vec_madd(v5, v9, v10); // r2*wim
+    v11 = vec_nmsub(v4, v9, v11); // i2*wim
+    v12 = vec_nmsub(v7, v9, v12); // r3*wim
+    v13 = vec_madd(v6, v9, v13); // i3*wim
+
+    v1 = vec_ld(16, &(out[0])); // i0
+    v2 = vec_ld(i1, &(out[0])); // r1
+    v8 = vec_sub(v12, v10);
+    v12 = vec_add(v12, v10);
+    v9 = vec_sub(v11, v13);
+    v13 = vec_add(v11, v13);
+    v4 = vec_sub(v0, v12);
+    v0 = vec_add(v0, v12);
+    v7 = vec_sub(v3, v8);
+    v3 = vec_add(v3, v8);
+
+    vec_st(v0, 0, &(out[0])); // r0
+    vec_st(v3, i1+16, &(out[0])); // i1
+    vec_st(v4, i2, &(out[0])); // r2
+    vec_st(v7, i3+16, &(out[0]));// i3
+
+    v5 = vec_sub(v1, v13);
+    v1 = vec_add(v1, v13);
+    v6 = vec_sub(v2, v9);
+    v2 = vec_add(v2, v9);
+
+    vec_st(v1, 16, &(out[0])); // i0
+    vec_st(v2, i1, &(out[0])); // r1
+    vec_st(v5, i2+16, &(out[0])); // i2
+    vec_st(v6, i3, &(out[0])); // r3
+
+    do {
+        out += 8;
+        wre += 4;
+        wim -= 4;
+
+        v8 = vec_ld(0, &(wre[0]));
+        v10 = vec_ld(0, &(wim[0]));
+        v9 = vec_ld(0, &(wim[-4]));
+        v9 = vec_perm(v9, v10, vcprm(s0,3,2,1));
+
+        v4 = vec_ld(i2, &(out[0])); // r2
+        v5 = vec_ld(i2+16, &(out[0])); // i2
+        v6 = vec_ld(i3, &(out[0])); // r3
+        v7 = vec_ld(i3+16, &(out[0]));// i3
+        v10 = vec_mul(v4, v8); // r2*wre
+        v11 = vec_mul(v5, v8); // i2*wre
+        v12 = vec_mul(v6, v8); // r3*wre
+        v13 = vec_mul(v7, v8); // i3*wre
+
+        v0 = vec_ld(0, &(out[0])); // r0
+        v3 = vec_ld(i1+16, &(out[0])); // i1
+        v10 = vec_madd(v5, v9, v10); // r2*wim
+        v11 = vec_nmsub(v4, v9, v11); // i2*wim
+        v12 = vec_nmsub(v7, v9, v12); // r3*wim
+        v13 = vec_madd(v6, v9, v13); // i3*wim
+
+        v1 = vec_ld(16, &(out[0])); // i0
+        v2 = vec_ld(i1, &(out[0])); // r1
+        v8 = vec_sub(v12, v10);
+        v12 = vec_add(v12, v10);
+        v9 = vec_sub(v11, v13);
+        v13 = vec_add(v11, v13);
+        v4 = vec_sub(v0, v12);
+        v0 = vec_add(v0, v12);
+        v7 = vec_sub(v3, v8);
+        v3 = vec_add(v3, v8);
+
+        vec_st(v0, 0, &(out[0])); // r0
+        vec_st(v3, i1+16, &(out[0])); // i1
+        vec_st(v4, i2, &(out[0])); // r2
+        vec_st(v7, i3+16, &(out[0])); // i3
+
+        v5 = vec_sub(v1, v13);
+        v1 = vec_add(v1, v13);
+        v6 = vec_sub(v2, v9);
+        v2 = vec_add(v2, v9);
+
+        vec_st(v1, 16, &(out[0])); // i0
+        vec_st(v2, i1, &(out[0])); // r1
+        vec_st(v5, i2+16, &(out[0])); // i2
+        vec_st(v6, i3, &(out[0])); // r3
+    } while (n-=2);
+}
+
+#endif
+
+#endif /* AVCODEC_PPC_FFT_VSX_H */




More information about the ffmpeg-cvslog mailing list