27 #define BITS_INV_ACC 5 // 4 or 5 for IEEE
28 #define SHIFT_INV_ROW (16 - BITS_INV_ACC) //11
29 #define SHIFT_INV_COL (1 + BITS_INV_ACC) //6
30 #define RND_INV_ROW (1024 * (6 - BITS_INV_ACC))
31 #define RND_INV_COL (16 * (BITS_INV_ACC - 3))
32 #define RND_INV_CORR (RND_INV_COL - 1)
34 #define BITS_FRW_ACC 3 // 2 or 3 for accuracy
35 #define SHIFT_FRW_COL BITS_FRW_ACC
36 #define SHIFT_FRW_ROW (BITS_FRW_ACC + 17)
37 #define RND_FRW_ROW (262144*(BITS_FRW_ACC - 1))
40 13036, 13036, 13036, 13036,
41 27146, 27146, 27146, 27146,
42 -21746,-21746,-21746,-21746,
43 23170, 23170, 23170, 23170
58 16384, 21407, 16384, 8867,
59 16384, 8867,-16384,-21407,
60 16384, -8867, 16384,-21407,
61 -16384, 21407, 16384, -8867,
62 22725, 19266, 19266, -4520,
63 12873, 4520,-22725,-12873,
64 12873,-22725, 4520,-12873,
65 4520, 19266, 19266,-22725,
67 22725, 29692, 22725, 12299,
68 22725, 12299,-22725,-29692,
69 22725,-12299, 22725,-29692,
70 -22725, 29692, 22725,-12299,
71 31521, 26722, 26722, -6270,
72 17855, 6270,-31521,-17855,
73 17855,-31521, 6270,-17855,
74 6270, 26722, 26722,-31521,
76 21407, 27969, 21407, 11585,
77 21407, 11585,-21407,-27969,
78 21407,-11585, 21407,-27969,
79 -21407, 27969, 21407,-11585,
80 29692, 25172, 25172, -5906,
81 16819, 5906,-29692,-16819,
82 16819,-29692, 5906,-16819,
83 5906, 25172, 25172,-29692,
85 19266, 25172, 19266, 10426,
86 19266, 10426,-19266,-25172,
87 19266,-10426, 19266,-25172,
88 -19266, 25172, 19266,-10426,
89 26722, 22654, 22654, -5315,
90 15137, 5315,-26722,-15137,
91 15137,-26722, 5315,-15137,
92 5315, 22654, 22654,-26722,
95 #define DCT_8_INV_ROW_MMI(A1,A2,A3,A4) \
96 "dli $10, 0x88 \n\t" \
97 "ldc1 $f4, "#A1" \n\t" \
98 "dmtc1 $10, $f16 \n\t" \
99 "ldc1 $f10, 8+"#A1" \n\t" \
100 "ldc1 $f6, "#A3" \n\t" \
101 "pshufh $f0, $f4, $f16 \n\t" \
102 "ldc1 $f8, 8+"#A3" \n\t" \
103 "ldc1 $f12, 32+"#A3" \n\t" \
104 "pmaddhw $f6, $f6, $f0 \n\t" \
105 "dli $10, 0xdd \n\t" \
106 "pshufh $f2, $f10, $f16 \n\t" \
107 "dmtc1 $10, $f16 \n\t" \
108 "pmaddhw $f8, $f8, $f2 \n\t" \
109 "ldc1 $f14, 40+"#A3" \n\t" \
110 "pshufh $f4, $f4, $f16 \n\t" \
111 "pmaddhw $f12, $f12, $f4 \n\t" \
112 "pshufh $f10, $f10, $f16 \n\t" \
113 "ldc1 $f18, "#A4" \n\t" \
114 "pmaddhw $f14, $f14, $f10 \n\t" \
115 "paddw $f6, $f6, $f18 \n\t" \
116 "ldc1 $f16, 16+"#A3" \n\t" \
117 "pmaddhw $f0, $f0, $f16 \n\t" \
118 "ldc1 $f16, 24+"#A3" \n\t" \
119 "paddw $f6, $f6, $f8 \n\t" \
120 "pmaddhw $f2, $f2, $f16 \n\t" \
121 "ldc1 $f16, 48+"#A3" \n\t" \
122 "pmaddhw $f4, $f4, $f16 \n\t" \
123 "ldc1 $f16, 56+"#A3" \n\t" \
124 "paddw $f12, $f12, $f14 \n\t" \
126 "pmaddhw $f10, $f10, $f16 \n\t" \
127 "dmtc1 $10, $f16 \n\t" \
128 "psubw $f8, $f6, $f12 \n\t" \
129 "paddw $f6, $f6, $f12 \n\t" \
130 "paddw $f0, $f0, $f18 \n\t" \
131 "psraw $f6, $f6, $f16 \n\t" \
132 "paddw $f0, $f0, $f2 \n\t" \
133 "paddw $f4, $f4, $f10 \n\t" \
134 "psraw $f8, $f8, $f16 \n\t" \
135 "psubw $f14, $f0, $f4 \n\t" \
136 "paddw $f0, $f0, $f4 \n\t" \
137 "psraw $f0, $f0, $f16 \n\t" \
138 "psraw $f14, $f14, $f16 \n\t" \
139 "dli $10, 0xb1 \n\t" \
140 "packsswh $f6, $f6, $f0 \n\t" \
141 "dmtc1 $10, $f16 \n\t" \
142 "packsswh $f14, $f14, $f8 \n\t" \
143 "sdc1 $f6, "#A2" \n\t" \
144 "pshufh $f14, $f14, $f16 \n\t" \
145 "sdc1 $f14, 8+"#A2" \n\t" \
148 #define DCT_8_INV_COL(A1,A2) \
149 "ldc1 $f2, 2*8(%3) \n\t" \
150 "ldc1 $f6, 16*3+"#A1" \n\t" \
151 "ldc1 $f10, 16*5+"#A1" \n\t" \
152 "pmulhh $f0, $f2, $f6 \n\t" \
153 "ldc1 $f4, 0(%3) \n\t" \
154 "pmulhh $f2, $f2, $f10 \n\t" \
155 "ldc1 $f14, 16*7+"#A1" \n\t" \
156 "ldc1 $f12, 16*1+"#A1" \n\t" \
157 "pmulhh $f8, $f4, $f14 \n\t" \
158 "paddsh $f0, $f0, $f6 \n\t" \
159 "pmulhh $f4, $f4, $f12 \n\t" \
160 "paddsh $f2, $f2, $f6 \n\t" \
161 "psubsh $f0, $f0, $f10 \n\t" \
162 "ldc1 $f6, 3*8(%3) \n\t" \
163 "paddsh $f2, $f2, $f10 \n\t" \
164 "paddsh $f8, $f8, $f12 \n\t" \
165 "psubsh $f4, $f4, $f14 \n\t" \
166 "paddsh $f10, $f8, $f2 \n\t" \
167 "psubsh $f12, $f4, $f0 \n\t" \
168 "psubsh $f8, $f8, $f2 \n\t" \
169 "paddsh $f4, $f4, $f0 \n\t" \
170 "ldc1 $f14, 1*8(%3) \n\t" \
171 "sdc1 $f10, 3*16+"#A2" \n\t" \
172 "paddsh $f2, $f8, $f4 \n\t" \
173 "sdc1 $f12, 5*16+"#A2" \n\t" \
174 "psubsh $f8, $f8, $f4 \n\t" \
175 "ldc1 $f10, 2*16+"#A1" \n\t" \
176 "ldc1 $f12, 6*16+"#A1" \n\t" \
177 "pmulhh $f0, $f14, $f10 \n\t" \
178 "pmulhh $f14, $f14, $f12 \n\t" \
179 "pmulhh $f2, $f2, $f6 \n\t" \
180 "ldc1 $f4, 0*16+"#A1" \n\t" \
181 "pmulhh $f8, $f8, $f6 \n\t" \
182 "psubsh $f0, $f0, $f12 \n\t" \
183 "ldc1 $f12, 4*16+"#A1" \n\t" \
184 "paddsh $f14, $f14, $f10 \n\t" \
185 "psubsh $f6, $f4, $f12 \n\t" \
186 "paddsh $f4, $f4, $f12 \n\t" \
187 "paddsh $f10, $f4, $f14 \n\t" \
188 "psubsh $f12, $f6, $f0 \n\t" \
189 "psubsh $f4, $f4, $f14 \n\t" \
190 "paddsh $f6, $f6, $f0 \n\t" \
191 "paddsh $f2, $f2, $f2 \n\t" \
192 "paddsh $f8, $f8, $f8 \n\t" \
193 "psubsh $f14, $f6, $f2 \n\t" \
195 "paddsh $f6, $f6, $f2 \n\t" \
196 "dmtc1 $10, $f16 \n\t" \
197 "psubsh $f0, $f12, $f8 \n\t" \
198 "paddsh $f12, $f12, $f8 \n\t" \
199 "psrah $f6, $f6, $f16 \n\t" \
200 "psrah $f12, $f12, $f16 \n\t" \
201 "ldc1 $f2, 3*16+"#A2" \n\t" \
202 "psrah $f14, $f14, $f16 \n\t" \
203 "psrah $f0, $f0, $f16 \n\t" \
204 "sdc1 $f6, 1*16+"#A2" \n\t" \
205 "psubsh $f8, $f10, $f2 \n\t" \
206 "paddsh $f10, $f10, $f2 \n\t" \
207 "sdc1 $f12, 2*16+"#A2" \n\t" \
208 "ldc1 $f6, 5*16+"#A2" \n\t" \
209 "psrah $f10, $f10, $f16 \n\t" \
210 "psrah $f8, $f8, $f16 \n\t" \
211 "sdc1 $f0, 5*16+"#A2" \n\t" \
212 "psubsh $f12, $f4, $f6 \n\t" \
213 "paddsh $f4, $f4, $f6 \n\t" \
214 "sdc1 $f14, 6*16+"#A2" \n\t" \
215 "sdc1 $f10, 0*16+"#A2" \n\t" \
216 "psrah $f4, $f4, $f16 \n\t" \
217 "sdc1 $f8, 7*16+"#A2" \n\t" \
218 "psrah $f12, $f12, $f16 \n\t" \
219 "sdc1 $f4, 3*16+"#A2" \n\t" \
220 "sdc1 $f12, 4*16+"#A2" \n\t" \
static const int32_t rounder_0[2 *8]
void ff_xvid_idct_add_mmi(uint8_t *dest, int32_t line_size, int16_t *block)
void ff_xvid_idct_put_mmi(uint8_t *dest, int32_t line_size, int16_t *block)
static const int16_t tab_i_04_mmi[32 *4]
#define DECLARE_ALIGNED(n, t, v)
void ff_add_pixels_clamped_mmi(const int16_t *block, uint8_t *av_restrict pixels, ptrdiff_t line_size)
static const int16_t tg_1_16[4 *4]
void ff_put_pixels_clamped_mmi(const int16_t *block, uint8_t *av_restrict pixels, ptrdiff_t line_size)
#define DCT_8_INV_COL(A1, A2)
void ff_xvid_idct_mmi(int16_t *block)
#define DCT_8_INV_ROW_MMI(A1, A2, A3, A4)