30 static void int32_to_float_fmul_scalar_altivec(
float *dst,
const int32_t *
src,
38 vector
float src1, src2, dst1, dst2, mul_v,
zero;
40 zero = (vector float)vec_splat_u32(0);
42 mul_v = vec_splat(mul_u.v, 0);
44 for (i = 0; i <
len; i += 8) {
45 src1 = vec_ctf(vec_ld(0, src+i), 0);
46 src2 = vec_ctf(vec_ld(16, src+i), 0);
47 dst1 = vec_madd(src1, mul_v, zero);
48 dst2 = vec_madd(src2, mul_v, zero);
49 vec_st(dst1, 0, dst+i);
50 vec_st(dst2, 16, dst+i);
55 static vector
signed short float_to_int16_one_altivec(
const float *src)
57 vector
float s0 = vec_ld(0, src);
58 vector
float s1 = vec_ld(16, src);
59 vector
signed int t0 = vec_cts(s0, 0);
60 vector
signed int t1 = vec_cts(s1, 0);
61 return vec_packs(t0,t1);
64 static void float_to_int16_altivec(int16_t *dst,
const float *src,
long len)
67 vector
signed short d0, d1, d;
68 vector
unsigned char align;
69 if (((
long)dst) & 15) {
70 for (i = 0; i < len - 7; i += 8) {
71 d0 = vec_ld(0, dst+i);
72 d = float_to_int16_one_altivec(src + i);
73 d1 = vec_ld(15, dst+i);
74 d1 = vec_perm(d1, d0, vec_lvsl(0, dst + i));
75 align = vec_lvsr(0, dst + i);
76 d0 = vec_perm(d1, d, align);
77 d1 = vec_perm(d, d1, align);
78 vec_st(d0, 0, dst + i);
79 vec_st(d1, 15, dst + i);
82 for (i = 0; i < len - 7; i += 8) {
83 d = float_to_int16_one_altivec(src + i);
84 vec_st(d, 0, dst + i);
89 #define VSTE_INC(dst, v, elem, inc) do { \
90 vector signed short s = vec_splat(v, elem); \
95 static void float_to_int16_stride_altivec(int16_t *dst,
const float *src,
99 vector
signed short d;
101 for (i = 0; i < len - 7; i += 8) {
102 d = float_to_int16_one_altivec(src + i);
103 VSTE_INC(dst, d, 0, stride);
104 VSTE_INC(dst, d, 1, stride);
105 VSTE_INC(dst, d, 2, stride);
106 VSTE_INC(dst, d, 3, stride);
107 VSTE_INC(dst, d, 4, stride);
108 VSTE_INC(dst, d, 5, stride);
109 VSTE_INC(dst, d, 6, stride);
110 VSTE_INC(dst, d, 7, stride);
114 static void float_to_int16_interleave_altivec(int16_t *dst,
const float **src,
115 long len,
int channels)
118 vector
signed short d0, d1, d2, c0,
c1,
t0,
t1;
119 vector
unsigned char align;
122 float_to_int16_altivec(dst, src[0], len);
125 if (((
long)dst) & 15) {
126 for (i = 0; i < len - 7; i += 8) {
127 d0 = vec_ld(0, dst + i);
128 t0 = float_to_int16_one_altivec(src[0] + i);
129 d1 = vec_ld(31, dst + i);
130 t1 = float_to_int16_one_altivec(src[1] + i);
131 c0 = vec_mergeh(t0, t1);
132 c1 = vec_mergel(t0, t1);
133 d2 = vec_perm(d1, d0, vec_lvsl(0, dst + i));
134 align = vec_lvsr(0, dst + i);
135 d0 = vec_perm(d2, c0, align);
136 d1 = vec_perm(c0, c1, align);
137 vec_st(d0, 0, dst + i);
138 d0 = vec_perm(c1, d2, align);
139 vec_st(d1, 15, dst + i);
140 vec_st(d0, 31, dst + i);
144 for (i = 0; i < len - 7; i += 8) {
145 t0 = float_to_int16_one_altivec(src[0] + i);
146 t1 = float_to_int16_one_altivec(src[1] + i);
147 d0 = vec_mergeh(t0, t1);
148 d1 = vec_mergel(t0, t1);
149 vec_st(d0, 0, dst + i);
150 vec_st(d1, 16, dst + i);
155 for (i = 0; i < channels; i++)
156 float_to_int16_stride_altivec(dst + i, src[i], len, channels);