36 #define RV40_LOWPASS(OPNAME, OP) \
37 static void OPNAME ## rv40_qpel8_h_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\
38 const int h, const int C1, const int C2, const int SHIFT){\
39 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\
41 for(i = 0; i < h; i++)\
43 OP(dst[0], (src[-2] + src[ 3] - 5*(src[-1]+src[2]) + src[0]*C1 + src[1]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
44 OP(dst[1], (src[-1] + src[ 4] - 5*(src[ 0]+src[3]) + src[1]*C1 + src[2]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
45 OP(dst[2], (src[ 0] + src[ 5] - 5*(src[ 1]+src[4]) + src[2]*C1 + src[3]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
46 OP(dst[3], (src[ 1] + src[ 6] - 5*(src[ 2]+src[5]) + src[3]*C1 + src[4]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
47 OP(dst[4], (src[ 2] + src[ 7] - 5*(src[ 3]+src[6]) + src[4]*C1 + src[5]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
48 OP(dst[5], (src[ 3] + src[ 8] - 5*(src[ 4]+src[7]) + src[5]*C1 + src[6]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
49 OP(dst[6], (src[ 4] + src[ 9] - 5*(src[ 5]+src[8]) + src[6]*C1 + src[7]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
50 OP(dst[7], (src[ 5] + src[10] - 5*(src[ 6]+src[9]) + src[7]*C1 + src[8]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
56 static void OPNAME ## rv40_qpel8_v_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\
57 const int w, const int C1, const int C2, const int SHIFT){\
58 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\
60 for(i = 0; i < w; i++)\
62 const int srcB = src[-2*srcStride];\
63 const int srcA = src[-1*srcStride];\
64 const int src0 = src[0 *srcStride];\
65 const int src1 = src[1 *srcStride];\
66 const int src2 = src[2 *srcStride];\
67 const int src3 = src[3 *srcStride];\
68 const int src4 = src[4 *srcStride];\
69 const int src5 = src[5 *srcStride];\
70 const int src6 = src[6 *srcStride];\
71 const int src7 = src[7 *srcStride];\
72 const int src8 = src[8 *srcStride];\
73 const int src9 = src[9 *srcStride];\
74 const int src10 = src[10*srcStride];\
75 OP(dst[0*dstStride], (srcB + src3 - 5*(srcA+src2) + src0*C1 + src1*C2 + (1<<(SHIFT-1))) >> SHIFT);\
76 OP(dst[1*dstStride], (srcA + src4 - 5*(src0+src3) + src1*C1 + src2*C2 + (1<<(SHIFT-1))) >> SHIFT);\
77 OP(dst[2*dstStride], (src0 + src5 - 5*(src1+src4) + src2*C1 + src3*C2 + (1<<(SHIFT-1))) >> SHIFT);\
78 OP(dst[3*dstStride], (src1 + src6 - 5*(src2+src5) + src3*C1 + src4*C2 + (1<<(SHIFT-1))) >> SHIFT);\
79 OP(dst[4*dstStride], (src2 + src7 - 5*(src3+src6) + src4*C1 + src5*C2 + (1<<(SHIFT-1))) >> SHIFT);\
80 OP(dst[5*dstStride], (src3 + src8 - 5*(src4+src7) + src5*C1 + src6*C2 + (1<<(SHIFT-1))) >> SHIFT);\
81 OP(dst[6*dstStride], (src4 + src9 - 5*(src5+src8) + src6*C1 + src7*C2 + (1<<(SHIFT-1))) >> SHIFT);\
82 OP(dst[7*dstStride], (src5 + src10 - 5*(src6+src9) + src7*C1 + src8*C2 + (1<<(SHIFT-1))) >> SHIFT);\
88 static void OPNAME ## rv40_qpel16_v_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\
89 const int w, const int C1, const int C2, const int SHIFT){\
90 OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\
91 OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\
94 OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, w-8, C1, C2, SHIFT);\
95 OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, w-8, C1, C2, SHIFT);\
98 static void OPNAME ## rv40_qpel16_h_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\
99 const int h, const int C1, const int C2, const int SHIFT){\
100 OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\
101 OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\
104 OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, h-8, C1, C2, SHIFT);\
105 OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, h-8, C1, C2, SHIFT);\
109 #define RV40_MC(OPNAME, SIZE) \
110 static void OPNAME ## rv40_qpel ## SIZE ## _mc10_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
112 OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\
115 static void OPNAME ## rv40_qpel ## SIZE ## _mc30_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
117 OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\
120 static void OPNAME ## rv40_qpel ## SIZE ## _mc01_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
122 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\
125 static void OPNAME ## rv40_qpel ## SIZE ## _mc11_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
127 uint8_t full[SIZE*(SIZE+5)];\
128 uint8_t * const full_mid = full + SIZE*2;\
129 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
130 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
133 static void OPNAME ## rv40_qpel ## SIZE ## _mc21_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
135 uint8_t full[SIZE*(SIZE+5)];\
136 uint8_t * const full_mid = full + SIZE*2;\
137 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
138 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
141 static void OPNAME ## rv40_qpel ## SIZE ## _mc31_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
143 uint8_t full[SIZE*(SIZE+5)];\
144 uint8_t * const full_mid = full + SIZE*2;\
145 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\
146 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
149 static void OPNAME ## rv40_qpel ## SIZE ## _mc12_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
151 uint8_t full[SIZE*(SIZE+5)];\
152 uint8_t * const full_mid = full + SIZE*2;\
153 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
154 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
157 static void OPNAME ## rv40_qpel ## SIZE ## _mc22_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
159 uint8_t full[SIZE*(SIZE+5)];\
160 uint8_t * const full_mid = full + SIZE*2;\
161 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
162 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
165 static void OPNAME ## rv40_qpel ## SIZE ## _mc32_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
167 uint8_t full[SIZE*(SIZE+5)];\
168 uint8_t * const full_mid = full + SIZE*2;\
169 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\
170 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
173 static void OPNAME ## rv40_qpel ## SIZE ## _mc03_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
175 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\
178 static void OPNAME ## rv40_qpel ## SIZE ## _mc13_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
180 uint8_t full[SIZE*(SIZE+5)];\
181 uint8_t * const full_mid = full + SIZE*2;\
182 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
183 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\
186 static void OPNAME ## rv40_qpel ## SIZE ## _mc23_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
188 uint8_t full[SIZE*(SIZE+5)];\
189 uint8_t * const full_mid = full + SIZE*2;\
190 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
191 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\
195 #define op_avg(a, b) a = (((a)+cm[b]+1)>>1)
196 #define op_put(a, b) a = cm[b]
209 #define PIXOP2(OPNAME, OP) \
210 static inline void OPNAME ## _pixels8_xy2_8_c(uint8_t *block, \
211 const uint8_t *pixels, \
212 ptrdiff_t line_size, \
218 for (j = 0; j < 2; j++) { \
220 const uint32_t a = AV_RN32(pixels); \
221 const uint32_t b = AV_RN32(pixels + 1); \
222 uint32_t l0 = (a & 0x03030303UL) + \
223 (b & 0x03030303UL) + \
225 uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + \
226 ((b & 0xFCFCFCFCUL) >> 2); \
229 pixels += line_size; \
230 for (i = 0; i < h; i += 2) { \
231 uint32_t a = AV_RN32(pixels); \
232 uint32_t b = AV_RN32(pixels + 1); \
233 l1 = (a & 0x03030303UL) + \
234 (b & 0x03030303UL); \
235 h1 = ((a & 0xFCFCFCFCUL) >> 2) + \
236 ((b & 0xFCFCFCFCUL) >> 2); \
237 OP(*((uint32_t *) block), \
238 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \
239 pixels += line_size; \
240 block += line_size; \
241 a = AV_RN32(pixels); \
242 b = AV_RN32(pixels + 1); \
243 l0 = (a & 0x03030303UL) + \
244 (b & 0x03030303UL) + \
246 h0 = ((a & 0xFCFCFCFCUL) >> 2) + \
247 ((b & 0xFCFCFCFCUL) >> 2); \
248 OP(*((uint32_t *) block), \
249 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \
250 pixels += line_size; \
251 block += line_size; \
253 pixels += 4 - line_size * (h + 1); \
254 block += 4 - line_size * h; \
258 CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_8_c, \
259 OPNAME ## _pixels8_xy2_8_c, \
262 #define op_avg(a, b) a = rnd_avg32(a, b)
263 #define op_put(a, b) a = b
271 put_pixels16_xy2_8_c(dst,
src,
stride, 16);
275 avg_pixels16_xy2_8_c(dst,
src,
stride, 16);
279 put_pixels8_xy2_8_c(dst,
src,
stride, 8);
283 avg_pixels8_xy2_8_c(dst,
src,
stride, 8);
293 #define RV40_CHROMA_MC(OPNAME, OP)\
294 static void OPNAME ## rv40_chroma_mc4_c(uint8_t *dst ,\
295 const uint8_t *src ,\
296 ptrdiff_t stride, int h, int x, int y)\
298 const int A = (8-x) * (8-y);\
299 const int B = ( x) * (8-y);\
300 const int C = (8-x) * ( y);\
301 const int D = ( x) * ( y);\
303 int bias = rv40_bias[y>>1][x>>1];\
305 av_assert2(x<8 && y<8 && x>=0 && y>=0);\
308 for(i = 0; i < h; i++){\
309 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\
310 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\
311 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\
312 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\
317 const int E = B + C;\
318 const ptrdiff_t step = C ? stride : 1;\
319 for(i = 0; i < h; i++){\
320 OP(dst[0], (A*src[0] + E*src[step+0] + bias));\
321 OP(dst[1], (A*src[1] + E*src[step+1] + bias));\
322 OP(dst[2], (A*src[2] + E*src[step+2] + bias));\
323 OP(dst[3], (A*src[3] + E*src[step+3] + bias));\
330 static void OPNAME ## rv40_chroma_mc8_c(uint8_t *dst,\
332 ptrdiff_t stride, int h, int x, int y)\
334 const int A = (8-x) * (8-y);\
335 const int B = ( x) * (8-y);\
336 const int C = (8-x) * ( y);\
337 const int D = ( x) * ( y);\
339 int bias = rv40_bias[y>>1][x>>1];\
341 av_assert2(x<8 && y<8 && x>=0 && y>=0);\
344 for(i = 0; i < h; i++){\
345 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\
346 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\
347 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\
348 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\
349 OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + bias));\
350 OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + bias));\
351 OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + bias));\
352 OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + bias));\
357 const int E = B + C;\
358 const ptrdiff_t step = C ? stride : 1;\
359 for(i = 0; i < h; i++){\
360 OP(dst[0], (A*src[0] + E*src[step+0] + bias));\
361 OP(dst[1], (A*src[1] + E*src[step+1] + bias));\
362 OP(dst[2], (A*src[2] + E*src[step+2] + bias));\
363 OP(dst[3], (A*src[3] + E*src[step+3] + bias));\
364 OP(dst[4], (A*src[4] + E*src[step+4] + bias));\
365 OP(dst[5], (A*src[5] + E*src[step+5] + bias));\
366 OP(dst[6], (A*src[6] + E*src[step+6] + bias));\
367 OP(dst[7], (A*src[7] + E*src[step+7] + bias));\
374 #define op_avg(a, b) a = (((a)+((b)>>6)+1)>>1)
375 #define op_put(a, b) a = ((b)>>6)
380 #define RV40_WEIGHT_FUNC(size) \
381 static void rv40_weight_func_rnd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\
385 for (j = 0; j < size; j++) {\
386 for (i = 0; i < size; i++)\
387 dst[i] = ((((unsigned)w2 * src1[i]) >> 9) + (((unsigned)w1 * src2[i]) >> 9) + 0x10) >> 5;\
393 static void rv40_weight_func_nornd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\
397 for (j = 0; j < size; j++) {\
398 for (i = 0; i < size; i++)\
399 dst[i] = ((unsigned)w2 * src1[i] + (unsigned)w1 * src2[i] + 0x10) >> 5;\
413 0x40, 0x50, 0x20, 0x60, 0x30, 0x50, 0x40, 0x30,
414 0x50, 0x40, 0x50, 0x30, 0x60, 0x20, 0x50, 0x40
421 0x40, 0x30, 0x60, 0x20, 0x50, 0x30, 0x30, 0x40,
422 0x40, 0x40, 0x50, 0x30, 0x20, 0x60, 0x30, 0x40
425 #define CLIP_SYMM(a, b) av_clip(a, -(b), b)
454 if (
u > 3 - (filter_p1 && filter_q1))
458 if (filter_p1 && filter_q1)
465 if (filter_p1 &&
FFABS(diff_p1p2) <= beta) {
466 t = (diff_p1p0 + diff_p1p2 -
diff) >> 1;
470 if (filter_q1 &&
FFABS(diff_q1q2) <= beta) {
471 t = (diff_q1q0 + diff_q1q2 +
diff) >> 1;
478 const int filter_p1,
const int filter_q1,
479 const int alpha,
const int beta,
480 const int lim_p0q0,
const int lim_q1,
484 alpha, beta, lim_p0q0, lim_q1, lim_p1);
488 const int filter_p1,
const int filter_q1,
489 const int alpha,
const int beta,
490 const int lim_p0q0,
const int lim_q1,
494 alpha, beta, lim_p0q0, lim_q1, lim_p1);
508 int sflag, p0,
q0, p1,
q1;
556 const int alpha,
const int lims,
557 const int dmode,
const int chroma)
563 const int alpha,
const int lims,
564 const int dmode,
const int chroma)
575 int sum_p1p0 = 0, sum_q1q0 = 0, sum_p1p2 = 0, sum_q1q2 = 0;
576 int strong0 = 0, strong1 = 0;
581 sum_p1p0 += ptr[-2*
step] - ptr[-1*
step];
582 sum_q1q0 += ptr[ 1*
step] - ptr[ 0*
step];
585 *p1 =
FFABS(sum_p1p0) < (beta << 2);
586 *
q1 =
FFABS(sum_q1q0) < (beta << 2);
595 sum_p1p2 += ptr[-2*
step] - ptr[-3*
step];
596 sum_q1q2 += ptr[ 1*
step] - ptr[ 2*
step];
599 strong0 = *p1 && (
FFABS(sum_p1p2) < beta2);
600 strong1 = *
q1 && (
FFABS(sum_q1q2) < beta2);
602 return strong0 && strong1;
606 int beta,
int beta2,
int edge,
613 int beta,
int beta2,
int edge,
627 c->put_pixels_tab[0][ 1] = put_rv40_qpel16_mc10_c;
629 c->put_pixels_tab[0][ 3] = put_rv40_qpel16_mc30_c;
630 c->put_pixels_tab[0][ 4] = put_rv40_qpel16_mc01_c;
631 c->put_pixels_tab[0][ 5] = put_rv40_qpel16_mc11_c;
632 c->put_pixels_tab[0][ 6] = put_rv40_qpel16_mc21_c;
633 c->put_pixels_tab[0][ 7] = put_rv40_qpel16_mc31_c;
635 c->put_pixels_tab[0][ 9] = put_rv40_qpel16_mc12_c;
636 c->put_pixels_tab[0][10] = put_rv40_qpel16_mc22_c;
637 c->put_pixels_tab[0][11] = put_rv40_qpel16_mc32_c;
638 c->put_pixels_tab[0][12] = put_rv40_qpel16_mc03_c;
639 c->put_pixels_tab[0][13] = put_rv40_qpel16_mc13_c;
640 c->put_pixels_tab[0][14] = put_rv40_qpel16_mc23_c;
643 c->avg_pixels_tab[0][ 1] = avg_rv40_qpel16_mc10_c;
645 c->avg_pixels_tab[0][ 3] = avg_rv40_qpel16_mc30_c;
646 c->avg_pixels_tab[0][ 4] = avg_rv40_qpel16_mc01_c;
647 c->avg_pixels_tab[0][ 5] = avg_rv40_qpel16_mc11_c;
648 c->avg_pixels_tab[0][ 6] = avg_rv40_qpel16_mc21_c;
649 c->avg_pixels_tab[0][ 7] = avg_rv40_qpel16_mc31_c;
651 c->avg_pixels_tab[0][ 9] = avg_rv40_qpel16_mc12_c;
652 c->avg_pixels_tab[0][10] = avg_rv40_qpel16_mc22_c;
653 c->avg_pixels_tab[0][11] = avg_rv40_qpel16_mc32_c;
654 c->avg_pixels_tab[0][12] = avg_rv40_qpel16_mc03_c;
655 c->avg_pixels_tab[0][13] = avg_rv40_qpel16_mc13_c;
656 c->avg_pixels_tab[0][14] = avg_rv40_qpel16_mc23_c;
659 c->put_pixels_tab[1][ 1] = put_rv40_qpel8_mc10_c;
661 c->put_pixels_tab[1][ 3] = put_rv40_qpel8_mc30_c;
662 c->put_pixels_tab[1][ 4] = put_rv40_qpel8_mc01_c;
663 c->put_pixels_tab[1][ 5] = put_rv40_qpel8_mc11_c;
664 c->put_pixels_tab[1][ 6] = put_rv40_qpel8_mc21_c;
665 c->put_pixels_tab[1][ 7] = put_rv40_qpel8_mc31_c;
667 c->put_pixels_tab[1][ 9] = put_rv40_qpel8_mc12_c;
668 c->put_pixels_tab[1][10] = put_rv40_qpel8_mc22_c;
669 c->put_pixels_tab[1][11] = put_rv40_qpel8_mc32_c;
670 c->put_pixels_tab[1][12] = put_rv40_qpel8_mc03_c;
671 c->put_pixels_tab[1][13] = put_rv40_qpel8_mc13_c;
672 c->put_pixels_tab[1][14] = put_rv40_qpel8_mc23_c;
675 c->avg_pixels_tab[1][ 1] = avg_rv40_qpel8_mc10_c;
677 c->avg_pixels_tab[1][ 3] = avg_rv40_qpel8_mc30_c;
678 c->avg_pixels_tab[1][ 4] = avg_rv40_qpel8_mc01_c;
679 c->avg_pixels_tab[1][ 5] = avg_rv40_qpel8_mc11_c;
680 c->avg_pixels_tab[1][ 6] = avg_rv40_qpel8_mc21_c;
681 c->avg_pixels_tab[1][ 7] = avg_rv40_qpel8_mc31_c;
683 c->avg_pixels_tab[1][ 9] = avg_rv40_qpel8_mc12_c;
684 c->avg_pixels_tab[1][10] = avg_rv40_qpel8_mc22_c;
685 c->avg_pixels_tab[1][11] = avg_rv40_qpel8_mc32_c;
686 c->avg_pixels_tab[1][12] = avg_rv40_qpel8_mc03_c;
687 c->avg_pixels_tab[1][13] = avg_rv40_qpel8_mc13_c;
688 c->avg_pixels_tab[1][14] = avg_rv40_qpel8_mc23_c;
691 c->put_chroma_pixels_tab[0] = put_rv40_chroma_mc8_c;
692 c->put_chroma_pixels_tab[1] = put_rv40_chroma_mc4_c;
693 c->avg_chroma_pixels_tab[0] = avg_rv40_chroma_mc8_c;
694 c->avg_chroma_pixels_tab[1] = avg_rv40_chroma_mc4_c;
696 c->rv40_weight_pixels_tab[0][0] = rv40_weight_func_rnd_16;
697 c->rv40_weight_pixels_tab[0][1] = rv40_weight_func_rnd_8;
698 c->rv40_weight_pixels_tab[1][0] = rv40_weight_func_nornd_16;
699 c->rv40_weight_pixels_tab[1][1] = rv40_weight_func_nornd_8;