[Ffmpeg-cvslog] CVS: ffmpeg/libavcodec/ps2 dsputil_mmi.c, 1.11, 1.12 idct_mmi.c, 1.6, 1.7 mmi.h, 1.2, 1.3 mpegvideo_mmi.c, 1.7, 1.8

Diego Biurrun CVS diego
Thu Dec 22 02:10:13 CET 2005


Update of /cvsroot/ffmpeg/ffmpeg/libavcodec/ps2
In directory mail:/var2/tmp/cvs-serv29491/libavcodec/ps2

Modified Files:
	dsputil_mmi.c idct_mmi.c mmi.h mpegvideo_mmi.c 
Log Message:
COSMETICS: tabs --> spaces, some prettyprinting


Index: dsputil_mmi.c
===================================================================
RCS file: /cvsroot/ffmpeg/ffmpeg/libavcodec/ps2/dsputil_mmi.c,v
retrieving revision 1.11
retrieving revision 1.12
diff -u -d -r1.11 -r1.12
--- dsputil_mmi.c	17 Dec 2005 18:14:35 -0000	1.11
+++ dsputil_mmi.c	22 Dec 2005 01:10:10 -0000	1.12
@@ -80,7 +80,7 @@
         "pextlb $10, $0, $10    \n\t"
         "sq     $10, 80(%1)     \n\t"
         "pextlb $8, $0, $8      \n\t"
-	"sq     $8, 96(%1)      \n\t"
+        "sq     $8, 96(%1)      \n\t"
         "pextlb $9, $0, $9      \n\t"
         "sq     $9, 112(%1)     \n\t"
         ".set   pop             \n\t"
@@ -112,7 +112,7 @@
         asm volatile (
         ".set   push            \n\t"
         ".set   mips3           \n\t"
-	"1:                     \n\t"
+        "1:                     \n\t"
         "ldr    $8, 0(%1)       \n\t"
         "add    $11, %1, %3     \n\t"
         "ldl    $8, 7(%1)       \n\t"
@@ -133,7 +133,7 @@
         "bgtz   %2, 1b          \n\t"
         ".set   pop             \n\t"
         : "+r" (block), "+r" (pixels), "+r" (h) : "r" (line_size)
-	: "$8", "$9", "$10", "$11", "$12", "$13", "memory" );
+        : "$8", "$9", "$10", "$11", "$12", "$13", "memory" );
 }
 
 

Index: idct_mmi.c
===================================================================
RCS file: /cvsroot/ffmpeg/ffmpeg/libavcodec/ps2/idct_mmi.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -d -r1.6 -r1.7
--- idct_mmi.c	17 Dec 2005 18:14:35 -0000	1.6
+++ idct_mmi.c	22 Dec 2005 01:10:10 -0000	1.7
@@ -15,32 +15,32 @@
 #include "../dsputil.h"
 #include "mmi.h"
 
-#define BITS_INV_ACC	5	// 4 or 5 for IEEE
-#define SHIFT_INV_ROW	(16 - BITS_INV_ACC)
+#define BITS_INV_ACC    5       // 4 or 5 for IEEE
+#define SHIFT_INV_ROW   (16 - BITS_INV_ACC)
 #define SHIFT_INV_COL   (1 + BITS_INV_ACC)
 
-#define TG1	6518
-#define TG2	13573
-#define TG3	21895
-#define CS4	23170
+#define TG1             6518
+#define TG2             13573
+#define TG3             21895
+#define CS4             23170
 
-#define ROUNDER_0	0
-#define ROUNDER_1	16
+#define ROUNDER_0       0
+#define ROUNDER_1       16
 
-#define TAB_i_04	(32+0)
-#define TAB_i_17	(32+64)
-#define TAB_i_26	(32+128)
-#define TAB_i_35	(32+192)
+#define TAB_i_04        (32+0)
+#define TAB_i_17        (32+64)
+#define TAB_i_26        (32+128)
+#define TAB_i_35        (32+192)
 
-#define TG_1_16		(32+256+0)
-#define TG_2_16		(32+256+16)
-#define TG_3_16		(32+256+32)
-#define COS_4_16	(32+256+48)
+#define TG_1_16         (32+256+0)
+#define TG_2_16         (32+256+16)
+#define TG_3_16         (32+256+32)
+#define COS_4_16        (32+256+48)
 
-#define CLIPMAX		(32+256+64+0)
+#define CLIPMAX         (32+256+64+0)
 
 static short consttable[] align16 = {
-/* rounder 0*/	// assume SHIFT_INV_ROW == 11
+/* rounder 0*/  // assume SHIFT_INV_ROW == 11
  0x3ff, 1, 0x3ff, 1, 0x3ff, 1, 0x3ff, 1,
 /* rounder 1*/
  0x3ff, 0, 0x3ff, 0, 0x3ff, 0, 0x3ff, 0,
@@ -75,274 +75,274 @@
 
 
 #define DCT_8_INV_ROW1(blk, rowoff, taboff, rnd, outreg) { \
-	lq(blk, rowoff, $16);	/* r16 = x7  x5  x3  x1  x6  x4  x2  x0 */ \
-	/*slot*/ \
-	lq($24, 0+taboff, $17);	/* r17 = w */ \
-	/*delay slot $16*/ \
-	lq($24, 16+taboff, $18);/* r18 = w */ \
-	prevh($16, $2);		/* r2  = x1  x3  x5  x7  x0  x2  x4  x6 */ \
-	lq($24, 32+taboff, $19);/* r19 = w */ \
-	phmadh($17, $16, $17);	/* r17 = b1"b0'a1"a0' */ \
-	lq($24, 48+taboff, $20);/* r20 = w */ \
-	phmadh($18, $2, $18);	/* r18 = b1'b0"a1'a0" */ \
-	phmadh($19, $16, $19);	/* r19 = b3"b2'a3"a2' */ \
-	phmadh($20, $2, $20);	/* r20 = b3'b2"a3'a2" */ \
-	paddw($17, $18, $17);	/* r17 = (b1)(b0)(a1)(a0) */ \
-	paddw($19, $20, $19);	/* r19 = (b3)(b2)(a3)(a2) */ \
-	pcpyld($19, $17, $18);	/* r18 = (a3)(a2)(a1)(a0) */ \
-	pcpyud($17, $19, $20);	/* r20 = (b3)(b2)(b1)(b0) */ \
-	paddw($18, rnd, $18);	/* r18 = (a3)(a2)(a1)(a0) */\
-	paddw($18, $20, $17);	/* r17 = ()()()(a0+b0) */ \
-	psubw($18, $20, $20);	/* r20 = ()()()(a0-b0) */ \
-	psraw($17, SHIFT_INV_ROW, $17); /* r17 = (y3 y2 y1 y0) */ \
-	psraw($20, SHIFT_INV_ROW, $20);	/* r20 = (y4 y5 y6 y7) */ \
-	ppach($20, $17, outreg);/* out = y4 y5 y6 y7 y3 y2 y1 y0  Note order */ \
+        lq(blk, rowoff, $16);   /* r16 = x7  x5  x3  x1  x6  x4  x2  x0 */ \
+        /*slot*/ \
+        lq($24, 0+taboff, $17); /* r17 = w */ \
+        /*delay slot $16*/ \
+        lq($24, 16+taboff, $18);/* r18 = w */ \
+        prevh($16, $2);         /* r2  = x1  x3  x5  x7  x0  x2  x4  x6 */ \
+        lq($24, 32+taboff, $19);/* r19 = w */ \
+        phmadh($17, $16, $17);  /* r17 = b1"b0'a1"a0' */ \
+        lq($24, 48+taboff, $20);/* r20 = w */ \
+        phmadh($18, $2, $18);   /* r18 = b1'b0"a1'a0" */ \
+        phmadh($19, $16, $19);  /* r19 = b3"b2'a3"a2' */ \
+        phmadh($20, $2, $20);   /* r20 = b3'b2"a3'a2" */ \
+        paddw($17, $18, $17);   /* r17 = (b1)(b0)(a1)(a0) */ \
+        paddw($19, $20, $19);   /* r19 = (b3)(b2)(a3)(a2) */ \
+        pcpyld($19, $17, $18);  /* r18 = (a3)(a2)(a1)(a0) */ \
+        pcpyud($17, $19, $20);  /* r20 = (b3)(b2)(b1)(b0) */ \
+        paddw($18, rnd, $18);   /* r18 = (a3)(a2)(a1)(a0) */\
+        paddw($18, $20, $17);   /* r17 = ()()()(a0+b0) */ \
+        psubw($18, $20, $20);   /* r20 = ()()()(a0-b0) */ \
+        psraw($17, SHIFT_INV_ROW, $17); /* r17 = (y3 y2 y1 y0) */ \
+        psraw($20, SHIFT_INV_ROW, $20); /* r20 = (y4 y5 y6 y7) */ \
+        ppach($20, $17, outreg);/* out = y4 y5 y6 y7 y3 y2 y1 y0  Note order */ \
 \
-	prevh(outreg, $2);	\
-	pcpyud($2, $2, $2);	\
-	pcpyld($2, outreg, outreg);	\
+        prevh(outreg, $2);        \
+        pcpyud($2, $2, $2);        \
+        pcpyld($2, outreg, outreg);        \
 }
 
 
 #define DCT_8_INV_COL8() \
 \
-	lq($24, TG_3_16, $2);	/* r2  = tn3 */	\
+        lq($24, TG_3_16, $2);   /* r2  = tn3 */         \
 \
-	pmulth($11, $2, $17);	/* r17 = x3 * tn3 (6420) */ \
-	psraw($17, 15, $17);	\
-	pmfhl_uw($3);		/* r3  = 7531 */	\
-	psraw($3, 15, $3);	\
-	pinteh($3, $17, $17);	/* r17 = x3 * tn3 */ \
-	psubh($17, $13, $17);	/* r17 = tm35 */	\
+        pmulth($11, $2, $17);   /* r17 = x3 * tn3 (6420) */ \
+        psraw($17, 15, $17);    \
+        pmfhl_uw($3);           /* r3  = 7531 */        \
+        psraw($3, 15, $3);      \
+        pinteh($3, $17, $17);   /* r17 = x3 * tn3 */    \
+        psubh($17, $13, $17);   /* r17 = tm35 */        \
 \
-	pmulth($13, $2, $18);	/* r18 = x5 * tn3 (6420) */ \
-	psraw($18, 15, $18);	\
-	pmfhl_uw($3);		/* r3  = 7531 */	\
-	psraw($3, 15, $3);	\
-	pinteh($3, $18, $18);	/* r18 = x5 * tn3 */ \
-	paddh($18, $11, $18);	/* r18 = tp35 */	\
+        pmulth($13, $2, $18);   /* r18 = x5 * tn3 (6420) */ \
+        psraw($18, 15, $18);    \
+        pmfhl_uw($3);           /* r3  = 7531 */        \
+        psraw($3, 15, $3);      \
+        pinteh($3, $18, $18);   /* r18 = x5 * tn3 */    \
+        paddh($18, $11, $18);   /* r18 = tp35 */        \
 \
-	lq($24, TG_1_16, $2);	/* r2  = tn1 */	\
+        lq($24, TG_1_16, $2);   /* r2  = tn1 */         \
 \
-	pmulth($15, $2, $19);	/* r19 = x7 * tn1 (6420) */ \
-	psraw($19, 15, $19);	\
-	pmfhl_uw($3);		/* r3  = 7531 */	\
-	psraw($3, 15, $3);	\
-	pinteh($3, $19, $19);	/* r19 = x7 * tn1 */ \
-	paddh($19, $9, $19);	/* r19 = tp17 */	\
+        pmulth($15, $2, $19);   /* r19 = x7 * tn1 (6420) */ \
+        psraw($19, 15, $19);    \
+        pmfhl_uw($3);           /* r3  = 7531 */        \
+        psraw($3, 15, $3);      \
+        pinteh($3, $19, $19);   /* r19 = x7 * tn1 */    \
+        paddh($19, $9, $19);    /* r19 = tp17 */        \
 \
-	pmulth($9, $2, $20);	/* r20 = x1 * tn1 (6420) */ \
-	psraw($20, 15, $20);	\
-	pmfhl_uw($3);		/* r3  = 7531 */	\
-	psraw($3, 15, $3);	\
-	pinteh($3, $20, $20);	/* r20 = x1 * tn1 */ \
-	psubh($20, $15, $20);	/* r20 = tm17 */	\
+        pmulth($9, $2, $20);    /* r20 = x1 * tn1 (6420) */ \
+        psraw($20, 15, $20);    \
+        pmfhl_uw($3);           /* r3  = 7531 */        \
+        psraw($3, 15, $3);      \
+        pinteh($3, $20, $20);   /* r20 = x1 * tn1 */    \
+        psubh($20, $15, $20);   /* r20 = tm17 */        \
 \
-	psubh($19, $18, $3);	/* r3  = t1 */	\
-	paddh($20, $17, $16);	/* r16 = t2 */	\
-	psubh($20, $17, $23);	/* r23 = b3 */	\
-	paddh($19, $18, $20);	/* r20 = b0 */	\
+        psubh($19, $18, $3);    /* r3  = t1 */          \
+        paddh($20, $17, $16);   /* r16 = t2 */          \
+        psubh($20, $17, $23);   /* r23 = b3 */          \
+        paddh($19, $18, $20);   /* r20 = b0 */          \
 \
-	lq($24, COS_4_16, $2);	/* r2  = cs4 */	\
+        lq($24, COS_4_16, $2);  /* r2  = cs4 */         \
 \
-	paddh($3, $16, $21);	/* r21 = t1+t2 */	\
-	psubh($3, $16, $22);	/* r22 = t1-t2 */	\
+        paddh($3, $16, $21);    /* r21 = t1+t2 */       \
+        psubh($3, $16, $22);    /* r22 = t1-t2 */       \
 \
-	pmulth($21, $2, $21);	/* r21 = cs4 * (t1+t2) 6420 */ \
-	psraw($21, 15, $21);	\
-	pmfhl_uw($3);		/* r3  = 7531 */	\
-	psraw($3, 15, $3);	\
-	pinteh($3, $21, $21);	/* r21 = b1 */	\
+        pmulth($21, $2, $21);   /* r21 = cs4 * (t1+t2) 6420 */ \
+        psraw($21, 15, $21);    \
+        pmfhl_uw($3);           /* r3  = 7531 */        \
+        psraw($3, 15, $3);      \
+        pinteh($3, $21, $21);   /* r21 = b1 */          \
 \
-	pmulth($22, $2, $22);	/* r22 = cs4 * (t1-t2) 6420 */ \
-	psraw($22, 15, $22);	\
-	pmfhl_uw($3);		/* r3  = 7531 */	\
-	psraw($3, 15, $3);	\
-	pinteh($3, $22, $22);	/* r22 = b2 */	\
+        pmulth($22, $2, $22);   /* r22 = cs4 * (t1-t2) 6420 */ \
+        psraw($22, 15, $22);    \
+        pmfhl_uw($3);           /* r3  = 7531 */        \
+        psraw($3, 15, $3);      \
+        pinteh($3, $22, $22);   /* r22 = b2 */          \
 \
-	lq($24, TG_2_16, $2);	/* r2  = tn2 */	\
+        lq($24, TG_2_16, $2);   /* r2  = tn2 */         \
 \
-	pmulth($10, $2, $17);	/* r17 = x2 * tn2 (6420) */ \
-	psraw($17, 15, $17);	\
-	pmfhl_uw($3);		/* r3  = 7531 */	\
-	psraw($3, 15, $3);	\
-	pinteh($3, $17, $17);	/* r17 = x3 * tn3 */ \
-	psubh($17, $14, $17);	/* r17 = tm26 */	\
+        pmulth($10, $2, $17);   /* r17 = x2 * tn2 (6420) */ \
+        psraw($17, 15, $17);    \
+        pmfhl_uw($3);           /* r3  = 7531 */        \
+        psraw($3, 15, $3);      \
+        pinteh($3, $17, $17);   /* r17 = x3 * tn3 */    \
+        psubh($17, $14, $17);   /* r17 = tm26 */        \
 \
-	pmulth($14, $2, $18);	/* r18 = x6 * tn2 (6420) */ \
-	psraw($18, 15, $18);	\
-	pmfhl_uw($3);		/* r3  = 7531 */	\
-	psraw($3, 15, $3);	\
-	pinteh($3, $18, $18);	/* r18 = x6 * tn2 */ \
-	paddh($18, $10, $18);	/* r18 = tp26 */	\
+        pmulth($14, $2, $18);   /* r18 = x6 * tn2 (6420) */ \
+        psraw($18, 15, $18);    \
+        pmfhl_uw($3);           /* r3  = 7531 */        \
+        psraw($3, 15, $3);      \
+        pinteh($3, $18, $18);   /* r18 = x6 * tn2 */    \
+        paddh($18, $10, $18);   /* r18 = tp26 */        \
 \
-	paddh($8, $12, $2);	/* r2  = tp04 */	\
-	psubh($8, $12, $3);	/* r3  = tm04 */	\
+        paddh($8, $12, $2);     /* r2  = tp04 */        \
+        psubh($8, $12, $3);     /* r3  = tm04 */        \
 \
-	paddh($2, $18, $16);	/* r16 = a0 */			\
-	psubh($2, $18, $19);	/* r19 = a3 */			\
-	psubh($3, $17, $18);	/* r18 = a2 */			\
-	paddh($3, $17, $17);	/* r17 = a1 */
+        paddh($2, $18, $16);    /* r16 = a0 */          \
+        psubh($2, $18, $19);    /* r19 = a3 */          \
+        psubh($3, $17, $18);    /* r18 = a2 */          \
+        paddh($3, $17, $17);    /* r17 = a1 */
 
 
 #define DCT_8_INV_COL8_STORE(blk) \
 \
-	paddh($16, $20, $2);	/* y0  a0+b0 */		\
-	psubh($16, $20, $16);	/* y7  a0-b0 */		\
-	psrah($2, SHIFT_INV_COL, $2);		\
-	psrah($16, SHIFT_INV_COL, $16);		\
-	sq($2, 0, blk); 			\
-	sq($16, 112, blk); 			\
+        paddh($16, $20, $2);    /* y0  a0+b0 */ \
+        psubh($16, $20, $16);   /* y7  a0-b0 */ \
+        psrah($2, SHIFT_INV_COL, $2);           \
+        psrah($16, SHIFT_INV_COL, $16);         \
+        sq($2, 0, blk);                         \
+        sq($16, 112, blk);                      \
 \
-	paddh($17, $21, $3);	/* y1  a1+b1 */		\
-	psubh($17, $21, $17);	/* y6  a1-b1 */		\
-	psrah($3, SHIFT_INV_COL, $3);		\
-	psrah($17, SHIFT_INV_COL, $17);		\
-	sq($3, 16, blk);			\
-	sq($17, 96, blk);			\
+        paddh($17, $21, $3);    /* y1  a1+b1 */ \
+        psubh($17, $21, $17);   /* y6  a1-b1 */ \
+        psrah($3, SHIFT_INV_COL, $3);           \
+        psrah($17, SHIFT_INV_COL, $17);         \
+        sq($3, 16, blk);                        \
+        sq($17, 96, blk);                       \
 \
-	paddh($18, $22, $2);	/* y2  a2+b2 */	\
-	psubh($18, $22, $18);	/* y5  a2-b2 */	\
-	psrah($2, SHIFT_INV_COL, $2);	\
-	psrah($18, SHIFT_INV_COL, $18);	\
-	sq($2, 32, blk);			\
-	sq($18, 80, blk);			\
+        paddh($18, $22, $2);    /* y2  a2+b2 */ \
+        psubh($18, $22, $18);   /* y5  a2-b2 */ \
+        psrah($2, SHIFT_INV_COL, $2);           \
+        psrah($18, SHIFT_INV_COL, $18);         \
+        sq($2, 32, blk);                        \
+        sq($18, 80, blk);                       \
 \
-	paddh($19, $23, $3);	/* y3  a3+b3 */	\
-	psubh($19, $23, $19);	/* y4  a3-b3 */	\
-	psrah($3, SHIFT_INV_COL, $3);	\
-	psrah($19, SHIFT_INV_COL, $19);	\
-	sq($3, 48, blk);			\
-	sq($19, 64, blk);
+        paddh($19, $23, $3);    /* y3  a3+b3 */ \
+        psubh($19, $23, $19);   /* y4  a3-b3 */ \
+        psrah($3, SHIFT_INV_COL, $3);           \
+        psrah($19, SHIFT_INV_COL, $19);         \
+        sq($3, 48, blk);                        \
+        sq($19, 64, blk);
 
 
 
 #define DCT_8_INV_COL8_PMS() \
-	paddh($16, $20, $2);	/* y0  a0+b0 */		\
-	psubh($16, $20, $20);	/* y7  a0-b0 */		\
-	psrah($2, SHIFT_INV_COL, $16);		\
-	psrah($20, SHIFT_INV_COL, $20);		\
+        paddh($16, $20, $2);    /* y0  a0+b0 */ \
+        psubh($16, $20, $20);   /* y7  a0-b0 */ \
+        psrah($2, SHIFT_INV_COL, $16);          \
+        psrah($20, SHIFT_INV_COL, $20);         \
 \
-	paddh($17, $21, $3);	/* y1  a1+b1 */		\
-	psubh($17, $21, $21);	/* y6  a1-b1 */		\
-	psrah($3, SHIFT_INV_COL, $17);		\
-	psrah($21, SHIFT_INV_COL, $21);		\
+        paddh($17, $21, $3);    /* y1  a1+b1 */ \
+        psubh($17, $21, $21);   /* y6  a1-b1 */ \
+        psrah($3, SHIFT_INV_COL, $17);          \
+        psrah($21, SHIFT_INV_COL, $21);         \
 \
-	paddh($18, $22, $2);	/* y2  a2+b2 */	\
-	psubh($18, $22, $22);	/* y5  a2-b2 */	\
-	psrah($2, SHIFT_INV_COL, $18);	\
-	psrah($22, SHIFT_INV_COL, $22);	\
+        paddh($18, $22, $2);    /* y2  a2+b2 */ \
+        psubh($18, $22, $22);   /* y5  a2-b2 */ \
+        psrah($2, SHIFT_INV_COL, $18);          \
+        psrah($22, SHIFT_INV_COL, $22);         \
 \
-	paddh($19, $23, $3);	/* y3  a3+b3 */	\
-	psubh($19, $23, $23);	/* y4  a3-b3 */	\
-	psrah($3, SHIFT_INV_COL, $19);	\
-	psrah($23, SHIFT_INV_COL, $23);
+        paddh($19, $23, $3);    /* y3  a3+b3 */ \
+        psubh($19, $23, $23);   /* y4  a3-b3 */ \
+        psrah($3, SHIFT_INV_COL, $19);          \
+        psrah($23, SHIFT_INV_COL, $23);
 
-#define PUT(rs) \
-	pminh(rs, $11, $2);	\
-    	pmaxh($2, $0, $2);	\
-	ppacb($0, $2, $2); \
-	sd3(2, 0, 4); \
-	__asm__ __volatile__ ("add $4, $5, $4");
+#define PUT(rs)                 \
+        pminh(rs, $11, $2);     \
+        pmaxh($2, $0, $2);      \
+        ppacb($0, $2, $2);      \
+        sd3(2, 0, 4);           \
+        __asm__ __volatile__ ("add $4, $5, $4");
 
 #define DCT_8_INV_COL8_PUT() \
-    	PUT($16);		\
-    	PUT($17);		\
-    	PUT($18);		\
-    	PUT($19);		\
-    	PUT($23);		\
-    	PUT($22);		\
-    	PUT($21);		\
-    	PUT($20);
+        PUT($16);        \
+        PUT($17);        \
+        PUT($18);        \
+        PUT($19);        \
+        PUT($23);        \
+        PUT($22);        \
+        PUT($21);        \
+        PUT($20);
 
-#define ADD(rs) \
-    ld3(4, 0, 2); \
-    pextlb($0, $2, $2); \
-    paddh($2, rs, $2); \
-    pminh($2, $11, $2);	\
-    pmaxh($2, $0, $2);	\
-    ppacb($0, $2, $2); \
-    sd3(2, 0, 4); \
-    __asm__ __volatile__ ("add $4, $5, $4");
+#define ADD(rs)          \
+        ld3(4, 0, 2);        \
+        pextlb($0, $2, $2);  \
+        paddh($2, rs, $2);   \
+        pminh($2, $11, $2);  \
+        pmaxh($2, $0, $2);   \
+        ppacb($0, $2, $2);   \
+        sd3(2, 0, 4); \
+        __asm__ __volatile__ ("add $4, $5, $4");
 
 /*fixme: schedule*/
 #define DCT_8_INV_COL8_ADD() \
-    	ADD($16);		\
-    	ADD($17);		\
-    	ADD($18);		\
-    	ADD($19);		\
-    	ADD($23);		\
-    	ADD($22);		\
-    	ADD($21);		\
-    	ADD($20);
+        ADD($16);        \
+        ADD($17);        \
+        ADD($18);        \
+        ADD($19);        \
+        ADD($23);        \
+        ADD($22);        \
+        ADD($21);        \
+        ADD($20);
 
 
 void ff_mmi_idct(int16_t * block)
 {
-    /* $4 = block */
-    __asm__ __volatile__("la $24, %0"::"m"(consttable[0]));
-    lq($24, ROUNDER_0, $8);
-    lq($24, ROUNDER_1, $7);
-    DCT_8_INV_ROW1($4, 0, TAB_i_04, $8, $8);
-    DCT_8_INV_ROW1($4, 16, TAB_i_17, $7, $9);
-    DCT_8_INV_ROW1($4, 32, TAB_i_26, $7, $10);
-    DCT_8_INV_ROW1($4, 48, TAB_i_35, $7, $11);
-    DCT_8_INV_ROW1($4, 64, TAB_i_04, $7, $12);
-    DCT_8_INV_ROW1($4, 80, TAB_i_35, $7, $13);
-    DCT_8_INV_ROW1($4, 96, TAB_i_26, $7, $14);
-    DCT_8_INV_ROW1($4, 112, TAB_i_17, $7, $15);
-    DCT_8_INV_COL8();
-    DCT_8_INV_COL8_STORE($4);
+        /* $4 = block */
+        __asm__ __volatile__("la $24, %0"::"m"(consttable[0]));
+        lq($24, ROUNDER_0, $8);
+        lq($24, ROUNDER_1, $7);
+        DCT_8_INV_ROW1($4, 0, TAB_i_04, $8, $8);
+        DCT_8_INV_ROW1($4, 16, TAB_i_17, $7, $9);
+        DCT_8_INV_ROW1($4, 32, TAB_i_26, $7, $10);
+        DCT_8_INV_ROW1($4, 48, TAB_i_35, $7, $11);
+        DCT_8_INV_ROW1($4, 64, TAB_i_04, $7, $12);
+        DCT_8_INV_ROW1($4, 80, TAB_i_35, $7, $13);
+        DCT_8_INV_ROW1($4, 96, TAB_i_26, $7, $14);
+        DCT_8_INV_ROW1($4, 112, TAB_i_17, $7, $15);
+        DCT_8_INV_COL8();
+        DCT_8_INV_COL8_STORE($4);
 
-    //let savedtemp regs be saved
-    __asm__ __volatile__(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
+        //let savedtemp regs be saved
+        __asm__ __volatile__(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
 }
 
 
 void ff_mmi_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
 {
-    /* $4 = dest, $5 = line_size, $6 = block */
-    __asm__ __volatile__("la $24, %0"::"m"(consttable[0]));
-    lq($24, ROUNDER_0, $8);
-    lq($24, ROUNDER_1, $7);
-    DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8);
-    DCT_8_INV_ROW1($6, 16, TAB_i_17, $7, $9);
-    DCT_8_INV_ROW1($6, 32, TAB_i_26, $7, $10);
-    DCT_8_INV_ROW1($6, 48, TAB_i_35, $7, $11);
-    DCT_8_INV_ROW1($6, 64, TAB_i_04, $7, $12);
-    DCT_8_INV_ROW1($6, 80, TAB_i_35, $7, $13);
-    DCT_8_INV_ROW1($6, 96, TAB_i_26, $7, $14);
-    DCT_8_INV_ROW1($6, 112, TAB_i_17, $7, $15);
-    DCT_8_INV_COL8();
-    lq($24, CLIPMAX, $11);
-    DCT_8_INV_COL8_PMS();
-    DCT_8_INV_COL8_PUT();
+        /* $4 = dest, $5 = line_size, $6 = block */
+        __asm__ __volatile__("la $24, %0"::"m"(consttable[0]));
+        lq($24, ROUNDER_0, $8);
+        lq($24, ROUNDER_1, $7);
+        DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8);
+        DCT_8_INV_ROW1($6, 16, TAB_i_17, $7, $9);
+        DCT_8_INV_ROW1($6, 32, TAB_i_26, $7, $10);
+        DCT_8_INV_ROW1($6, 48, TAB_i_35, $7, $11);
+        DCT_8_INV_ROW1($6, 64, TAB_i_04, $7, $12);
+        DCT_8_INV_ROW1($6, 80, TAB_i_35, $7, $13);
+        DCT_8_INV_ROW1($6, 96, TAB_i_26, $7, $14);
+        DCT_8_INV_ROW1($6, 112, TAB_i_17, $7, $15);
+        DCT_8_INV_COL8();
+        lq($24, CLIPMAX, $11);
+        DCT_8_INV_COL8_PMS();
+        DCT_8_INV_COL8_PUT();
 
-    //let savedtemp regs be saved
-    __asm__ __volatile__(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
+        //let savedtemp regs be saved
+        __asm__ __volatile__(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
 }
 
 
 void ff_mmi_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
 {
-    /* $4 = dest, $5 = line_size, $6 = block */
-    __asm__ __volatile__("la $24, %0"::"m"(consttable[0]));
-    lq($24, ROUNDER_0, $8);
-    lq($24, ROUNDER_1, $7);
-    DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8);
-    DCT_8_INV_ROW1($6, 16, TAB_i_17, $7, $9);
-    DCT_8_INV_ROW1($6, 32, TAB_i_26, $7, $10);
-    DCT_8_INV_ROW1($6, 48, TAB_i_35, $7, $11);
-    DCT_8_INV_ROW1($6, 64, TAB_i_04, $7, $12);
-    DCT_8_INV_ROW1($6, 80, TAB_i_35, $7, $13);
-    DCT_8_INV_ROW1($6, 96, TAB_i_26, $7, $14);
-    DCT_8_INV_ROW1($6, 112, TAB_i_17, $7, $15);
-    DCT_8_INV_COL8();
-    lq($24, CLIPMAX, $11);
-    DCT_8_INV_COL8_PMS();
-    DCT_8_INV_COL8_ADD();
+        /* $4 = dest, $5 = line_size, $6 = block */
+        __asm__ __volatile__("la $24, %0"::"m"(consttable[0]));
+        lq($24, ROUNDER_0, $8);
+        lq($24, ROUNDER_1, $7);
+        DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8);
+        DCT_8_INV_ROW1($6, 16, TAB_i_17, $7, $9);
+        DCT_8_INV_ROW1($6, 32, TAB_i_26, $7, $10);
+        DCT_8_INV_ROW1($6, 48, TAB_i_35, $7, $11);
+        DCT_8_INV_ROW1($6, 64, TAB_i_04, $7, $12);
+        DCT_8_INV_ROW1($6, 80, TAB_i_35, $7, $13);
+        DCT_8_INV_ROW1($6, 96, TAB_i_26, $7, $14);
+        DCT_8_INV_ROW1($6, 112, TAB_i_17, $7, $15);
+        DCT_8_INV_COL8();
+        lq($24, CLIPMAX, $11);
+        DCT_8_INV_COL8_PMS();
+        DCT_8_INV_COL8_ADD();
 
-    //let savedtemp regs be saved
-    __asm__ __volatile__(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
+        //let savedtemp regs be saved
+        __asm__ __volatile__(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
 }
 

Index: mmi.h
===================================================================
RCS file: /cvsroot/ffmpeg/ffmpeg/libavcodec/ps2/mmi.h,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -d -r1.2 -r1.3
--- mmi.h	3 Oct 2002 20:57:19 -0000	1.2
+++ mmi.h	22 Dec 2005 01:10:10 -0000	1.3
@@ -5,148 +5,148 @@
 
 /*
 #define r0 $zero
-#define r1 $at	//assembler!
-#define r2 $v0	//return
-#define r3 $v1	//return
-#define r4 $a0	//arg
-#define r5 $a1	//arg
-#define r6 $a2	//arg
-#define r7 $a3	//arg
-#define r8 $t0	//temp
-#define r9 $t1	//temp
-#define r10 $t2	//temp
-#define r11 $t3	//temp
-#define r12 $t4	//temp
-#define r13 $t5	//temp
-#define r14 $t6	//temp
-#define r15 $t7	//temp
-#define r16 $s0	//saved temp
-#define r17 $s1	//saved temp
-#define r18 $s2	//saved temp
-#define r19 $s3	//saved temp
-#define r20 $s4	//saved temp
-#define r21 $s5	//saved temp
-#define r22 $s6	//saved temp
-#define r23 $s7	//saved temp
-#define r24 $t8	//temp
-#define r25 $t9	//temp
-#define r26 $k0	//kernel
-#define r27 $k1	//kernel
-#define r28 $gp	//global ptr
-#define r29 $sp //stack ptr
-#define r30 $fp //frame ptr
-#define r31 $ra //return addr
+#define r1 $at          //assembler!
+#define r2 $v0          //return
+#define r3 $v1          //return
+#define r4 $a0          //arg
+#define r5 $a1          //arg
+#define r6 $a2          //arg
+#define r7 $a3          //arg
+#define r8 $t0          //temp
+#define r9 $t1          //temp
+#define r10 $t2         //temp
+#define r11 $t3         //temp
+#define r12 $t4         //temp
+#define r13 $t5         //temp
+#define r14 $t6         //temp
+#define r15 $t7         //temp
+#define r16 $s0         //saved temp
+#define r17 $s1         //saved temp
+#define r18 $s2         //saved temp
+#define r19 $s3         //saved temp
+#define r20 $s4         //saved temp
+#define r21 $s5         //saved temp
+#define r22 $s6         //saved temp
+#define r23 $s7         //saved temp
+#define r24 $t8         //temp
+#define r25 $t9         //temp
+#define r26 $k0         //kernel
+#define r27 $k1         //kernel
+#define r28 $gp         //global ptr
+#define r29 $sp         //stack ptr
+#define r30 $fp         //frame ptr
+#define r31 $ra         //return addr
 */
 
 
-#define	lq(base, off, reg)	\
-	__asm__ __volatile__ ("lq " #reg ", %0("#base ")" : : "i" (off) )
+#define         lq(base, off, reg)        \
+        __asm__ __volatile__ ("lq " #reg ", %0("#base ")" : : "i" (off) )
 
-#define	lq2(mem, reg)	\
-	__asm__ __volatile__ ("lq " #reg ", %0" : : "r" (mem))
+#define         lq2(mem, reg)        \
+        __asm__ __volatile__ ("lq " #reg ", %0" : : "r" (mem))
 
-#define	sq(reg, off, base)	\
-	__asm__ __volatile__ ("sq " #reg ", %0("#base ")" : : "i" (off) )
+#define         sq(reg, off, base)        \
+        __asm__ __volatile__ ("sq " #reg ", %0("#base ")" : : "i" (off) )
 
 /*
-#define	ld(base, off, reg)	\
-	__asm__ __volatile__ ("ld " #reg ", " #off "("#base ")")
+#define         ld(base, off, reg)        \
+        __asm__ __volatile__ ("ld " #reg ", " #off "("#base ")")
 */
 
-#define	ld3(base, off, reg)	\
-	__asm__ __volatile__ (".word %0" : : "i" ( 0xdc000000 | (base<<21) | (reg<<16) | (off)))
+#define         ld3(base, off, reg)        \
+        __asm__ __volatile__ (".word %0" : : "i" ( 0xdc000000 | (base<<21) | (reg<<16) | (off)))
 
-#define	ldr3(base, off, reg)	\
-	__asm__ __volatile__ (".word %0" : : "i" ( 0x6c000000 | (base<<21) | (reg<<16) | (off)))
+#define         ldr3(base, off, reg)        \
+        __asm__ __volatile__ (".word %0" : : "i" ( 0x6c000000 | (base<<21) | (reg<<16) | (off)))
 
-#define	ldl3(base, off, reg)	\
-	__asm__ __volatile__ (".word %0" : : "i" ( 0x68000000 | (base<<21) | (reg<<16) | (off)))
+#define         ldl3(base, off, reg)        \
+        __asm__ __volatile__ (".word %0" : : "i" ( 0x68000000 | (base<<21) | (reg<<16) | (off)))
 
 /*
-#define	sd(reg, off, base)	\
-	__asm__ __volatile__ ("sd " #reg ", " #off "("#base ")")
+#define         sd(reg, off, base)        \
+        __asm__ __volatile__ ("sd " #reg ", " #off "("#base ")")
 */
 //seems assembler has bug encoding mnemonic 'sd', so DIY
-#define	sd3(reg, off, base)	\
-	__asm__ __volatile__ (".word %0" : : "i" ( 0xfc000000 | (base<<21) | (reg<<16) | (off)))
+#define         sd3(reg, off, base)        \
+        __asm__ __volatile__ (".word %0" : : "i" ( 0xfc000000 | (base<<21) | (reg<<16) | (off)))
 
-#define	sw(reg, off, base)	\
-	__asm__ __volatile__ ("sw " #reg ", " #off "("#base ")")
+#define         sw(reg, off, base)        \
+        __asm__ __volatile__ ("sw " #reg ", " #off "("#base ")")
 
-#define	sq2(reg, mem)	\
-	__asm__ __volatile__ ("sq " #reg ", %0" : : "m" (*(mem)))
+#define         sq2(reg, mem)        \
+        __asm__ __volatile__ ("sq " #reg ", %0" : : "m" (*(mem)))
 
-#define	pinth(rs, rt, rd) \
-	__asm__ __volatile__ ("pinth  " #rd ", " #rs ", " #rt )
+#define         pinth(rs, rt, rd) \
+        __asm__ __volatile__ ("pinth  " #rd ", " #rs ", " #rt )
 
-#define	phmadh(rs, rt, rd) \
-	__asm__ __volatile__ ("phmadh " #rd ", " #rs ", " #rt )
+#define         phmadh(rs, rt, rd) \
+        __asm__ __volatile__ ("phmadh " #rd ", " #rs ", " #rt )
 
-#define	pcpyud(rs, rt, rd) \
-	__asm__ __volatile__ ("pcpyud " #rd ", " #rs ", " #rt )
+#define         pcpyud(rs, rt, rd) \
+        __asm__ __volatile__ ("pcpyud " #rd ", " #rs ", " #rt )
 
-#define	pcpyld(rs, rt, rd) \
-	__asm__ __volatile__ ("pcpyld " #rd ", " #rs ", " #rt )
+#define         pcpyld(rs, rt, rd) \
+        __asm__ __volatile__ ("pcpyld " #rd ", " #rs ", " #rt )
 
-#define	pcpyh(rt, rd) \
-	__asm__ __volatile__ ("pcpyh  " #rd ", " #rt )
+#define         pcpyh(rt, rd) \
+        __asm__ __volatile__ ("pcpyh  " #rd ", " #rt )
 
-#define	paddw(rs, rt, rd) \
-	__asm__ __volatile__ ("paddw  " #rd ", " #rs ", " #rt )
+#define         paddw(rs, rt, rd) \
+        __asm__ __volatile__ ("paddw  " #rd ", " #rs ", " #rt )
 
-#define	pextlw(rs, rt, rd) \
-	__asm__ __volatile__ ("pextlw " #rd ", " #rs ", " #rt )
+#define         pextlw(rs, rt, rd) \
+        __asm__ __volatile__ ("pextlw " #rd ", " #rs ", " #rt )
 
-#define	pextuw(rs, rt, rd) \
-	__asm__ __volatile__ ("pextuw " #rd ", " #rs ", " #rt )
+#define         pextuw(rs, rt, rd) \
+        __asm__ __volatile__ ("pextuw " #rd ", " #rs ", " #rt )
 
-#define	pextlh(rs, rt, rd) \
-	__asm__ __volatile__ ("pextlh " #rd ", " #rs ", " #rt )
+#define         pextlh(rs, rt, rd) \
+        __asm__ __volatile__ ("pextlh " #rd ", " #rs ", " #rt )
 
-#define	pextuh(rs, rt, rd) \
-	__asm__ __volatile__ ("pextuh " #rd ", " #rs ", " #rt )
+#define         pextuh(rs, rt, rd) \
+        __asm__ __volatile__ ("pextuh " #rd ", " #rs ", " #rt )
 
-#define	psubw(rs, rt, rd) \
-	__asm__ __volatile__ ("psubw  " #rd ", " #rs ", " #rt )
+#define         psubw(rs, rt, rd) \
+        __asm__ __volatile__ ("psubw  " #rd ", " #rs ", " #rt )
 
-#define	psraw(rt, sa, rd) \
-	__asm__ __volatile__ ("psraw  " #rd ", " #rt ", %0" : : "i"(sa) )
+#define         psraw(rt, sa, rd) \
+        __asm__ __volatile__ ("psraw  " #rd ", " #rt ", %0" : : "i"(sa) )
 
-#define	ppach(rs, rt, rd) \
-	__asm__ __volatile__ ("ppach  " #rd ", " #rs ", " #rt )
+#define         ppach(rs, rt, rd) \
+        __asm__ __volatile__ ("ppach  " #rd ", " #rs ", " #rt )
 
-#define	ppacb(rs, rt, rd) \
-	__asm__ __volatile__ ("ppacb  " #rd ", " #rs ", " #rt )
+#define         ppacb(rs, rt, rd) \
+        __asm__ __volatile__ ("ppacb  " #rd ", " #rs ", " #rt )
 
-#define	prevh(rt, rd) \
-	__asm__ __volatile__ ("prevh  " #rd ", " #rt )
+#define         prevh(rt, rd) \
+        __asm__ __volatile__ ("prevh  " #rd ", " #rt )
 
-#define	pmulth(rs, rt, rd) \
-	__asm__ __volatile__ ("pmulth " #rd ", " #rs ", " #rt )
+#define         pmulth(rs, rt, rd) \
+        __asm__ __volatile__ ("pmulth " #rd ", " #rs ", " #rt )
 
-#define	pmaxh(rs, rt, rd) \
-	__asm__ __volatile__ ("pmaxh " #rd ", " #rs ", " #rt )
+#define         pmaxh(rs, rt, rd) \
+        __asm__ __volatile__ ("pmaxh " #rd ", " #rs ", " #rt )
 
-#define	pminh(rs, rt, rd) \
-	__asm__ __volatile__ ("pminh " #rd ", " #rs ", " #rt )
+#define         pminh(rs, rt, rd) \
+        __asm__ __volatile__ ("pminh " #rd ", " #rs ", " #rt )
 
-#define	pinteh(rs, rt, rd) \
-	__asm__ __volatile__ ("pinteh  " #rd ", " #rs ", " #rt )
+#define         pinteh(rs, rt, rd) \
+        __asm__ __volatile__ ("pinteh  " #rd ", " #rs ", " #rt )
 
-#define	paddh(rs, rt, rd) \
-	__asm__ __volatile__ ("paddh  " #rd ", " #rs ", " #rt )
+#define         paddh(rs, rt, rd) \
+        __asm__ __volatile__ ("paddh  " #rd ", " #rs ", " #rt )
 
-#define	psubh(rs, rt, rd) \
-	__asm__ __volatile__ ("psubh  " #rd ", " #rs ", " #rt )
+#define         psubh(rs, rt, rd) \
+        __asm__ __volatile__ ("psubh  " #rd ", " #rs ", " #rt )
 
-#define	psrah(rt, sa, rd) \
-	__asm__ __volatile__ ("psrah  " #rd ", " #rt ", %0" : : "i"(sa) )
+#define         psrah(rt, sa, rd) \
+        __asm__ __volatile__ ("psrah  " #rd ", " #rt ", %0" : : "i"(sa) )
 
-#define	pmfhl_uw(rd) \
-	__asm__ __volatile__ ("pmfhl.uw  " #rd)
+#define         pmfhl_uw(rd) \
+        __asm__ __volatile__ ("pmfhl.uw  " #rd)
 
-#define	pextlb(rs, rt, rd) \
-	__asm__ __volatile__ ("pextlb  " #rd ", " #rs ", " #rt )
+#define         pextlb(rs, rt, rd) \
+        __asm__ __volatile__ ("pextlb  " #rd ", " #rs ", " #rt )
 
 #endif
 

Index: mpegvideo_mmi.c
===================================================================
RCS file: /cvsroot/ffmpeg/ffmpeg/libavcodec/ps2/mpegvideo_mmi.c,v
retrieving revision 1.7
retrieving revision 1.8
diff -u -d -r1.7 -r1.8
--- mpegvideo_mmi.c	17 Dec 2005 18:14:35 -0000	1.7
+++ mpegvideo_mmi.c	22 Dec 2005 01:10:10 -0000	1.8
@@ -41,7 +41,7 @@
                 level = block[0] * s->c_dc_scale;
         }else {
             qadd = 0;
-	    level = block[0];
+            level = block[0];
         }
         nCoeffs= 63; //does not allways use zigzag table
     } else {
@@ -49,29 +49,29 @@
     }
 
     asm volatile(
-        "add    $14, $0, %3	\n\t"
-        "pcpyld $8, %0, %0	\n\t"
-        "pcpyh  $8, $8		\n\t"   //r8 = qmul
-        "pcpyld $9, %1, %1	\n\t"
-        "pcpyh  $9, $9		\n\t"   //r9 = qadd
+        "add    $14, $0, %3     \n\t"
+        "pcpyld $8, %0, %0      \n\t"
+        "pcpyh  $8, $8          \n\t"   //r8 = qmul
+        "pcpyld $9, %1, %1      \n\t"
+        "pcpyh  $9, $9          \n\t"   //r9 = qadd
         ".p2align 2             \n\t"
-        "1:			\n\t"
-        "lq     $10, 0($14)	\n\t"   //r10 = level
-        "addi   $14, $14, 16	\n\t"	//block+=8
-        "addi   %2, %2, -8	\n\t"
-        "pcgth  $11, $0, $10	\n\t"   //r11 = level < 0 ? -1 : 0
-        "pcgth  $12, $10, $0	\n\t"   //r12 = level > 0 ? -1 : 0
-        "por    $12, $11, $12	\n\t"
-        "pmulth $10, $10, $8	\n\t"
-        "paddh  $13, $9, $11	\n\t"
+        "1:                     \n\t"
+        "lq     $10, 0($14)     \n\t"   //r10 = level
+        "addi   $14, $14, 16    \n\t"   //block+=8
+        "addi   %2, %2, -8      \n\t"
+        "pcgth  $11, $0, $10    \n\t"   //r11 = level < 0 ? -1 : 0
+        "pcgth  $12, $10, $0    \n\t"   //r12 = level > 0 ? -1 : 0
+        "por    $12, $11, $12   \n\t"
+        "pmulth $10, $10, $8    \n\t"
+        "paddh  $13, $9, $11    \n\t"
         "pxor   $13, $13, $11   \n\t"   //r13 = level < 0 ? -qadd : qadd
-        "pmfhl.uw $11		\n\t"
-        "pinteh $10, $11, $10	\n\t"   //r10 = level * qmul
-        "paddh  $10, $10, $13	\n\t"
+        "pmfhl.uw $11           \n\t"
+        "pinteh $10, $11, $10   \n\t"   //r10 = level * qmul
+        "paddh  $10, $10, $13   \n\t"
         "pand   $10, $10, $12   \n\t"
-        "sq     $10, -16($14)	\n\t"
-        "bgez   %2, 1b		\n\t"
-	:: "r"(qmul), "r" (qadd), "r" (nCoeffs), "r" (block) : "$8", "$9", "$10", "$11", "$12", "$13", "$14", "memory" );
+        "sq     $10, -16($14)   \n\t"
+        "bgez   %2, 1b          \n\t"
+        :: "r"(qmul), "r" (qadd), "r" (nCoeffs), "r" (block) : "$8", "$9", "$10", "$11", "$12", "$13", "$14", "memory" );
 
     if(s->mb_intra)
         block[0]= level;





More information about the ffmpeg-cvslog mailing list