Go to the documentation of this file.
24 #ifndef AVUTIL_MIPS_MMIUTILS_H
25 #define AVUTIL_MIPS_MMIUTILS_H
34 #define DECLARE_VAR_LOW32 int32_t low32
35 #define RESTRICT_ASM_LOW32 [low32]"=&r"(low32),
36 #define DECLARE_VAR_ALL64 int64_t all64
37 #define RESTRICT_ASM_ALL64 [all64]"=&r"(all64),
38 #define DECLARE_VAR_ADDRT mips_reg addrt
39 #define RESTRICT_ASM_ADDRT [addrt]"=&r"(addrt),
41 #define MMI_LWX(reg, addr, stride, bias) \
42 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
43 "lw "#reg", "#bias"(%[addrt]) \n\t"
45 #define MMI_SWX(reg, addr, stride, bias) \
46 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
47 "sw "#reg", "#bias"(%[addrt]) \n\t"
49 #define MMI_LDX(reg, addr, stride, bias) \
50 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
51 "ld "#reg", "#bias"(%[addrt]) \n\t"
53 #define MMI_SDX(reg, addr, stride, bias) \
54 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
55 "sd "#reg", "#bias"(%[addrt]) \n\t"
57 #define MMI_LWC1(fp, addr, bias) \
58 "lwc1 "#fp", "#bias"("#addr") \n\t"
60 #define MMI_ULWC1(fp, addr, bias) \
61 "ulw %[low32], "#bias"("#addr") \n\t" \
62 "mtc1 %[low32], "#fp" \n\t"
64 #define MMI_LWXC1(fp, addr, stride, bias) \
65 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
66 MMI_LWC1(fp, %[addrt], bias)
68 #define MMI_SWC1(fp, addr, bias) \
69 "swc1 "#fp", "#bias"("#addr") \n\t"
71 #define MMI_USWC1(fp, addr, bias) \
72 "mfc1 %[low32], "#fp" \n\t" \
73 "usw %[low32], "#bias"("#addr") \n\t"
75 #define MMI_SWXC1(fp, addr, stride, bias) \
76 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
77 MMI_SWC1(fp, %[addrt], bias)
79 #define MMI_LDC1(fp, addr, bias) \
80 "ldc1 "#fp", "#bias"("#addr") \n\t"
82 #define MMI_ULDC1(fp, addr, bias) \
83 "uld %[all64], "#bias"("#addr") \n\t" \
84 "dmtc1 %[all64], "#fp" \n\t"
86 #define MMI_LDXC1(fp, addr, stride, bias) \
87 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
88 MMI_LDC1(fp, %[addrt], bias)
90 #define MMI_SDC1(fp, addr, bias) \
91 "sdc1 "#fp", "#bias"("#addr") \n\t"
93 #define MMI_USDC1(fp, addr, bias) \
94 "dmfc1 %[all64], "#fp" \n\t" \
95 "usd %[all64], "#bias"("#addr") \n\t"
97 #define MMI_SDXC1(fp, addr, stride, bias) \
98 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
99 MMI_SDC1(fp, %[addrt], bias)
101 #define MMI_LQ(reg1, reg2, addr, bias) \
102 "ld "#reg1", "#bias"("#addr") \n\t" \
103 "ld "#reg2", 8+"#bias"("#addr") \n\t"
105 #define MMI_SQ(reg1, reg2, addr, bias) \
106 "sd "#reg1", "#bias"("#addr") \n\t" \
107 "sd "#reg2", 8+"#bias"("#addr") \n\t"
109 #define MMI_LQC1(fp1, fp2, addr, bias) \
110 "ldc1 "#fp1", "#bias"("#addr") \n\t" \
111 "ldc1 "#fp2", 8+"#bias"("#addr") \n\t"
113 #define MMI_SQC1(fp1, fp2, addr, bias) \
114 "sdc1 "#fp1", "#bias"("#addr") \n\t" \
115 "sdc1 "#fp2", 8+"#bias"("#addr") \n\t"
119 #define DECLARE_VAR_ALL64
120 #define RESTRICT_ASM_ALL64
121 #define DECLARE_VAR_ADDRT
122 #define RESTRICT_ASM_ADDRT
124 #define MMI_LWX(reg, addr, stride, bias) \
125 "gslwx "#reg", "#bias"("#addr", "#stride") \n\t"
127 #define MMI_SWX(reg, addr, stride, bias) \
128 "gsswx "#reg", "#bias"("#addr", "#stride") \n\t"
130 #define MMI_LDX(reg, addr, stride, bias) \
131 "gsldx "#reg", "#bias"("#addr", "#stride") \n\t"
133 #define MMI_SDX(reg, addr, stride, bias) \
134 "gssdx "#reg", "#bias"("#addr", "#stride") \n\t"
136 #define MMI_LWC1(fp, addr, bias) \
137 "lwc1 "#fp", "#bias"("#addr") \n\t"
139 #if _MIPS_SIM == _ABIO32
141 #define DECLARE_VAR_LOW32 int32_t low32
142 #define RESTRICT_ASM_LOW32 [low32]"=&r"(low32),
144 #define MMI_ULWC1(fp, addr, bias) \
145 "ulw %[low32], "#bias"("#addr") \n\t" \
146 "mtc1 %[low32], "#fp" \n\t"
150 #define DECLARE_VAR_LOW32
151 #define RESTRICT_ASM_LOW32
153 #define MMI_ULWC1(fp, addr, bias) \
154 "gslwlc1 "#fp", 3+"#bias"("#addr") \n\t" \
155 "gslwrc1 "#fp", "#bias"("#addr") \n\t"
159 #define MMI_LWXC1(fp, addr, stride, bias) \
160 "gslwxc1 "#fp", "#bias"("#addr", "#stride") \n\t"
162 #define MMI_SWC1(fp, addr, bias) \
163 "swc1 "#fp", "#bias"("#addr") \n\t"
165 #define MMI_USWC1(fp, addr, bias) \
166 "gsswlc1 "#fp", 3+"#bias"("#addr") \n\t" \
167 "gsswrc1 "#fp", "#bias"("#addr") \n\t"
169 #define MMI_SWXC1(fp, addr, stride, bias) \
170 "gsswxc1 "#fp", "#bias"("#addr", "#stride") \n\t"
172 #define MMI_LDC1(fp, addr, bias) \
173 "ldc1 "#fp", "#bias"("#addr") \n\t"
175 #define MMI_ULDC1(fp, addr, bias) \
176 "gsldlc1 "#fp", 7+"#bias"("#addr") \n\t" \
177 "gsldrc1 "#fp", "#bias"("#addr") \n\t"
179 #define MMI_LDXC1(fp, addr, stride, bias) \
180 "gsldxc1 "#fp", "#bias"("#addr", "#stride") \n\t"
182 #define MMI_SDC1(fp, addr, bias) \
183 "sdc1 "#fp", "#bias"("#addr") \n\t"
185 #define MMI_USDC1(fp, addr, bias) \
186 "gssdlc1 "#fp", 7+"#bias"("#addr") \n\t" \
187 "gssdrc1 "#fp", "#bias"("#addr") \n\t"
189 #define MMI_SDXC1(fp, addr, stride, bias) \
190 "gssdxc1 "#fp", "#bias"("#addr", "#stride") \n\t"
192 #define MMI_LQ(reg1, reg2, addr, bias) \
193 "gslq "#reg1", "#reg2", "#bias"("#addr") \n\t"
195 #define MMI_SQ(reg1, reg2, addr, bias) \
196 "gssq "#reg1", "#reg2", "#bias"("#addr") \n\t"
198 #define MMI_LQC1(fp1, fp2, addr, bias) \
199 "gslqc1 "#fp1", "#fp2", "#bias"("#addr") \n\t"
201 #define MMI_SQC1(fp1, fp2, addr, bias) \
202 "gssqc1 "#fp1", "#fp2", "#bias"("#addr") \n\t"
210 LOCAL_ALIGNED_16(double, temp_backup_reg, [8]); \
211 if (_MIPS_SIM == _ABI64) \
213 "gssqc1 $f25, $f24, 0x00(%[temp]) \n\t" \
214 "gssqc1 $f27, $f26, 0x10(%[temp]) \n\t" \
215 "gssqc1 $f29, $f28, 0x20(%[temp]) \n\t" \
216 "gssqc1 $f31, $f30, 0x30(%[temp]) \n\t" \
218 : [temp]"r"(temp_backup_reg) \
223 "gssqc1 $f22, $f20, 0x00(%[temp]) \n\t" \
224 "gssqc1 $f26, $f24, 0x10(%[temp]) \n\t" \
225 "gssqc1 $f30, $f28, 0x20(%[temp]) \n\t" \
227 : [temp]"r"(temp_backup_reg) \
234 #define RECOVER_REG \
235 if (_MIPS_SIM == _ABI64) \
237 "gslqc1 $f25, $f24, 0x00(%[temp]) \n\t" \
238 "gslqc1 $f27, $f26, 0x10(%[temp]) \n\t" \
239 "gslqc1 $f29, $f28, 0x20(%[temp]) \n\t" \
240 "gslqc1 $f31, $f30, 0x30(%[temp]) \n\t" \
242 : [temp]"r"(temp_backup_reg) \
247 "gslqc1 $f22, $f20, 0x00(%[temp]) \n\t" \
248 "gslqc1 $f26, $f24, 0x10(%[temp]) \n\t" \
249 "gslqc1 $f30, $f28, 0x20(%[temp]) \n\t" \
251 : [temp]"r"(temp_backup_reg) \
260 #define TRANSPOSE_2W(fr_i0, fr_i1, fr_o0, fr_o1) \
261 "punpcklwd "#fr_o0", "#fr_i0", "#fr_i1" \n\t" \
262 "punpckhwd "#fr_o1", "#fr_i0", "#fr_i1" \n\t"
269 #define TRANSPOSE_4H(fr_i0, fr_i1, fr_i2, fr_i3, \
270 fr_t0, fr_t1, fr_t2, fr_t3) \
271 "punpcklhw "#fr_t0", "#fr_i0", "#fr_i1" \n\t" \
272 "punpckhhw "#fr_t1", "#fr_i0", "#fr_i1" \n\t" \
273 "punpcklhw "#fr_t2", "#fr_i2", "#fr_i3" \n\t" \
274 "punpckhhw "#fr_t3", "#fr_i2", "#fr_i3" \n\t" \
275 "punpcklwd "#fr_i0", "#fr_t0", "#fr_t2" \n\t" \
276 "punpckhwd "#fr_i1", "#fr_t0", "#fr_t2" \n\t" \
277 "punpcklwd "#fr_i2", "#fr_t1", "#fr_t3" \n\t" \
278 "punpckhwd "#fr_i3", "#fr_t1", "#fr_t3" \n\t"
285 #define TRANSPOSE_8B(fr_i0, fr_i1, fr_i2, fr_i3, fr_i4, fr_i5, \
286 fr_i6, fr_i7, fr_t0, fr_t1, fr_t2, fr_t3) \
287 "punpcklbh "#fr_t0", "#fr_i0", "#fr_i1" \n\t" \
288 "punpckhbh "#fr_t1", "#fr_i0", "#fr_i1" \n\t" \
289 "punpcklbh "#fr_t2", "#fr_i2", "#fr_i3" \n\t" \
290 "punpckhbh "#fr_t3", "#fr_i2", "#fr_i3" \n\t" \
291 "punpcklbh "#fr_i0", "#fr_i4", "#fr_i5" \n\t" \
292 "punpckhbh "#fr_i1", "#fr_i4", "#fr_i5" \n\t" \
293 "punpcklbh "#fr_i2", "#fr_i6", "#fr_i7" \n\t" \
294 "punpckhbh "#fr_i3", "#fr_i6", "#fr_i7" \n\t" \
295 "punpcklhw "#fr_i4", "#fr_t0", "#fr_t2" \n\t" \
296 "punpckhhw "#fr_i5", "#fr_t0", "#fr_t2" \n\t" \
297 "punpcklhw "#fr_i6", "#fr_t1", "#fr_t3" \n\t" \
298 "punpckhhw "#fr_i7", "#fr_t1", "#fr_t3" \n\t" \
299 "punpcklhw "#fr_t0", "#fr_i0", "#fr_i2" \n\t" \
300 "punpckhhw "#fr_t1", "#fr_i0", "#fr_i2" \n\t" \
301 "punpcklhw "#fr_t2", "#fr_i1", "#fr_i3" \n\t" \
302 "punpckhhw "#fr_t3", "#fr_i1", "#fr_i3" \n\t" \
303 "punpcklwd "#fr_i0", "#fr_i4", "#fr_t0" \n\t" \
304 "punpckhwd "#fr_i1", "#fr_i4", "#fr_t0" \n\t" \
305 "punpcklwd "#fr_i2", "#fr_i5", "#fr_t1" \n\t" \
306 "punpckhwd "#fr_i3", "#fr_i5", "#fr_t1" \n\t" \
307 "punpcklwd "#fr_i4", "#fr_i6", "#fr_t2" \n\t" \
308 "punpckhwd "#fr_i5", "#fr_i6", "#fr_t2" \n\t" \
309 "punpcklwd "#fr_i6", "#fr_i7", "#fr_t3" \n\t" \
310 "punpckhwd "#fr_i7", "#fr_i7", "#fr_t3" \n\t"
319 #define PSRAB_MMI(fr_i0, fr_i1, fr_t0, fr_t1, fr_d0) \
320 "punpcklbh "#fr_t0", "#fr_t0", "#fr_i0" \n\t" \
321 "punpckhbh "#fr_t1", "#fr_t1", "#fr_i0" \n\t" \
322 "psrah "#fr_t0", "#fr_t0", "#fr_i1" \n\t" \
323 "psrah "#fr_t1", "#fr_t1", "#fr_i1" \n\t" \
324 "packsshb "#fr_d0", "#fr_t0", "#fr_t1" \n\t"
333 #define PSRLB_MMI(fr_i0, fr_i1, fr_t0, fr_t1, fr_d0) \
334 "punpcklbh "#fr_t0", "#fr_t0", "#fr_i0" \n\t" \
335 "punpckhbh "#fr_t1", "#fr_t1", "#fr_i0" \n\t" \
336 "psrlh "#fr_t0", "#fr_t0", "#fr_i1" \n\t" \
337 "psrlh "#fr_t1", "#fr_t1", "#fr_i1" \n\t" \
338 "packsshb "#fr_d0", "#fr_t0", "#fr_t1" \n\t"
340 #define PSRAH_4_MMI(fp1, fp2, fp3, fp4, shift) \
341 "psrah "#fp1", "#fp1", "#shift" \n\t" \
342 "psrah "#fp2", "#fp2", "#shift" \n\t" \
343 "psrah "#fp3", "#fp3", "#shift" \n\t" \
344 "psrah "#fp4", "#fp4", "#shift" \n\t"
346 #define PSRAH_8_MMI(fp1, fp2, fp3, fp4, fp5, fp6, fp7, fp8, shift) \
347 PSRAH_4_MMI(fp1, fp2, fp3, fp4, shift) \
348 PSRAH_4_MMI(fp5, fp6, fp7, fp8, shift)
357 #define ROUND_POWER_OF_TWO_MMI(fr_i0, fr_i1, fr_t0, fr_t1, gr_t0) \
358 "li "#gr_t0", 0x01 \n\t" \
359 "dmtc1 "#gr_t0", "#fr_t0" \n\t" \
360 "punpcklwd "#fr_t0", "#fr_t0", "#fr_t0" \n\t" \
361 "psubw "#fr_t1", "#fr_i1", "#fr_t0" \n\t" \
362 "psllw "#fr_t1", "#fr_t0", "#fr_t1" \n\t" \
363 "paddw "#fr_i0", "#fr_i0", "#fr_t1" \n\t" \
364 "psraw "#fr_i0", "#fr_i0", "#fr_i1" \n\t"