Bläddra i källkod

- use c89-style comments
Closes issue #5194

Bernhard Reutner-Fischer 15 år sedan
förälder
incheckning
2ba017a2d5
61 ändrade filer med 1062 tillägg och 1060 borttagningar
  1. 1 1
      include/fcntl.h
  2. 1 1
      include/libc-symbols.h
  3. 1 1
      ldso/ldso/arm/dl-startup.h
  4. 7 7
      ldso/ldso/powerpc/resolve.S
  5. 1 1
      libc/inet/rpc/rpc_thread.c
  6. 1 1
      libc/misc/internals/tempname.c
  7. 2 2
      libc/string/bfin/memchr.S
  8. 40 40
      libc/string/bfin/strcmp.S
  9. 7 7
      libc/string/generic/strtok_r.c
  10. 68 68
      libc/string/ia64/bzero.S
  11. 51 51
      libc/string/ia64/memccpy.S
  12. 16 16
      libc/string/ia64/memchr.S
  13. 47 47
      libc/string/ia64/memcmp.S
  14. 80 80
      libc/string/ia64/memcpy.S
  15. 85 85
      libc/string/ia64/memmove.S
  16. 89 89
      libc/string/ia64/memset.S
  17. 16 16
      libc/string/ia64/strchr.S
  18. 1 1
      libc/string/ia64/strcmp.S
  19. 33 33
      libc/string/ia64/strcpy.S
  20. 9 9
      libc/string/ia64/strlen.S
  21. 5 5
      libc/string/ia64/strncmp.S
  22. 45 45
      libc/string/ia64/strncpy.S
  23. 1 1
      libc/string/sh64/memcpy.S
  24. 15 14
      libc/string/sh64/memset.S
  25. 14 14
      libc/string/sh64/strcpy.S
  26. 11 11
      libc/string/xtensa/memcpy.S
  27. 6 6
      libc/string/xtensa/memset.S
  28. 74 74
      libc/string/xtensa/strcmp.S
  29. 36 36
      libc/string/xtensa/strcpy.S
  30. 28 28
      libc/string/xtensa/strlen.S
  31. 75 75
      libc/string/xtensa/strncpy.S
  32. 13 13
      libc/sysdeps/linux/bfin/__longjmp.S
  33. 11 11
      libc/sysdeps/linux/bfin/bsd-_setjmp.S
  34. 1 1
      libc/sysdeps/linux/common/bits/uClibc_errno.h
  35. 1 1
      libc/sysdeps/linux/common/llseek.c
  36. 1 1
      libc/sysdeps/linux/e1/crt1.c
  37. 37 37
      libc/sysdeps/linux/ia64/__longjmp.S
  38. 22 22
      libc/sysdeps/linux/ia64/setjmp.S
  39. 8 8
      libc/sysdeps/linux/microblaze/__longjmp.S
  40. 9 9
      libc/sysdeps/linux/microblaze/crt0.S
  41. 6 6
      libc/sysdeps/linux/microblaze/vfork.S
  42. 1 1
      libc/sysdeps/linux/nios/bits/endian.h
  43. 3 3
      libc/sysdeps/linux/nios/crt1.S
  44. 3 3
      libc/sysdeps/linux/sh/clone.S
  45. 8 8
      libc/sysdeps/linux/sh/vfork.S
  46. 1 1
      libc/sysdeps/linux/sparc/qp_ops.c
  47. 2 2
      libc/sysdeps/linux/v850/__longjmp.S
  48. 10 10
      libc/sysdeps/linux/v850/crt0.S
  49. 4 4
      libc/sysdeps/linux/v850/vfork.S
  50. 3 3
      libc/sysdeps/linux/xtensa/__longjmp.S
  51. 3 3
      libc/sysdeps/linux/xtensa/setjmp.S
  52. 15 15
      libc/sysdeps/linux/xtensa/vfork.S
  53. 13 13
      libc/sysdeps/linux/xtensa/windowspill.S
  54. 1 1
      libm/e_gamma.c
  55. 1 1
      libm/e_gamma_r.c
  56. 1 1
      libm/e_lgamma.c
  57. 6 5
      libpthread/linuxthreads.old/forward.c
  58. 3 3
      libpthread/linuxthreads.old/pthread.c
  59. 2 2
      libpthread/linuxthreads/pthread.c
  60. 3 3
      libpthread/linuxthreads/sysdeps/unix/sysv/linux/sh/vfork.S
  61. 4 4
      utils/ldd.c

+ 1 - 1
include/fcntl.h

@@ -199,7 +199,7 @@ extern int posix_fadvise64 (int __fd, __off64_t __offset, __off64_t __len,
 
 
 #endif
 #endif
 
 
-#if 0 // && defined __UCLIBC_HAS_ADVANCED_REALTIME__
+#if 0 /* && defined __UCLIBC_HAS_ADVANCED_REALTIME__ */
 
 
 /* FIXME -- uClibc should probably implement these... */
 /* FIXME -- uClibc should probably implement these... */
 
 

+ 1 - 1
include/libc-symbols.h

@@ -283,7 +283,7 @@
 
 
 /* Tacking on "\n#APP\n\t#" to the section name makes gcc put it's bogus
 /* Tacking on "\n#APP\n\t#" to the section name makes gcc put it's bogus
    section attributes on what looks like a comment to the assembler.  */
    section attributes on what looks like a comment to the assembler.  */
-#ifdef __sparc__ //HAVE_SECTION_QUOTES
+#ifdef __sparc__ /* HAVE_SECTION_QUOTES */
 # define __sec_comment "\"\n#APP\n\t#\""
 # define __sec_comment "\"\n#APP\n\t#\""
 #else
 #else
 # define __sec_comment "\n#APP\n\t#"
 # define __sec_comment "\n#APP\n\t#"

+ 1 - 1
ldso/ldso/arm/dl-startup.h

@@ -159,7 +159,7 @@ void PERFORM_BOOTSTRAP_RELOC(ELF_RELOC *rpnt, unsigned long *reloc_addr,
 				if (topbits != 0xfe000000 && topbits != 0x00000000)
 				if (topbits != 0xfe000000 && topbits != 0x00000000)
 				{
 				{
 #if 0
 #if 0
-					// Don't bother with this during ldso initilization...
+					/* Don't bother with this during ldso initilization... */
 					newvalue = fix_bad_pc24(reloc_addr, symbol_addr)
 					newvalue = fix_bad_pc24(reloc_addr, symbol_addr)
 						- (unsigned long)reloc_addr + (addend << 2);
 						- (unsigned long)reloc_addr + (addend << 2);
 					topbits = newvalue & 0xfe000000;
 					topbits = newvalue & 0xfe000000;

+ 7 - 7
ldso/ldso/powerpc/resolve.S

@@ -11,19 +11,19 @@
 .type	_dl_linux_resolve,@function
 .type	_dl_linux_resolve,@function
 
 
 _dl_linux_resolve:
 _dl_linux_resolve:
-// We need to save the registers used to pass parameters, and register 0,
-// which is used by _mcount; the registers are saved in a stack frame.
+/* We need to save the registers used to pass parameters, and register 0,
+   which is used by _mcount; the registers are saved in a stack frame.  */
 	stwu 1,-64(1)
 	stwu 1,-64(1)
 	stw 0,12(1)
 	stw 0,12(1)
 	stw 3,16(1)
 	stw 3,16(1)
 	stw 4,20(1)
 	stw 4,20(1)
-// The code that calls this has put parameters for 'fixup' in r12 and r11.
+/* The code that calls this has put parameters for 'fixup' in r12 and r11. */
 	mr 3,12
 	mr 3,12
 	stw 5,24(1)
 	stw 5,24(1)
 	mr 4,11
 	mr 4,11
 	stw 6,28(1)
 	stw 6,28(1)
 	mflr 0
 	mflr 0
-// We also need to save some of the condition register fields.
+/* We also need to save some of the condition register fields. */
 	stw 7,32(1)
 	stw 7,32(1)
 	stw 0,48(1)
 	stw 0,48(1)
 	stw 8,36(1)
 	stw 8,36(1)
@@ -32,9 +32,9 @@ _dl_linux_resolve:
 	stw 10,44(1)
 	stw 10,44(1)
 	stw 0,8(1)
 	stw 0,8(1)
 	bl _dl_linux_resolver@local
 	bl _dl_linux_resolver@local
-// 'fixup' returns the address we want to branch to.
+/* 'fixup' returns the address we want to branch to. */
 	mtctr 3
 	mtctr 3
-// Put the registers back...
+/* Put the registers back... */
 	lwz 0,48(1)
 	lwz 0,48(1)
 	lwz 10,44(1)
 	lwz 10,44(1)
 	lwz 9,40(1)
 	lwz 9,40(1)
@@ -48,7 +48,7 @@ _dl_linux_resolve:
 	lwz 4,20(1)
 	lwz 4,20(1)
 	lwz 3,16(1)
 	lwz 3,16(1)
 	lwz 0,12(1)
 	lwz 0,12(1)
-// ...unwind the stack frame, and jump to the PLT entry we updated.
+/* ...unwind the stack frame, and jump to the PLT entry we updated. */
 	addi 1,1,64
 	addi 1,1,64
 	bctr
 	bctr
 
 

+ 1 - 1
libc/inet/rpc/rpc_thread.c

@@ -34,7 +34,7 @@ __rpc_thread_destroy (void)
 	if (tvp != NULL && tvp != &__libc_tsd_RPC_VARS_mem) {
 	if (tvp != NULL && tvp != &__libc_tsd_RPC_VARS_mem) {
 		__rpc_thread_svc_cleanup ();
 		__rpc_thread_svc_cleanup ();
 		__rpc_thread_clnt_cleanup ();
 		__rpc_thread_clnt_cleanup ();
-		//__rpc_thread_key_cleanup ();
+		/*__rpc_thread_key_cleanup (); */
 		free (tvp->authnone_private_s);
 		free (tvp->authnone_private_s);
 		free (tvp->clnt_perr_buf_s);
 		free (tvp->clnt_perr_buf_s);
 		free (tvp->clntraw_private_s);
 		free (tvp->clntraw_private_s);

+ 1 - 1
libc/misc/internals/tempname.c

@@ -75,7 +75,7 @@ static int direxists (const char *dir)
 int attribute_hidden ___path_search (char *tmpl, size_t tmpl_len, const char *dir,
 int attribute_hidden ___path_search (char *tmpl, size_t tmpl_len, const char *dir,
 	const char *pfx /*, int try_tmpdir*/)
 	const char *pfx /*, int try_tmpdir*/)
 {
 {
-    //const char *d;
+    /*const char *d; */
     size_t dlen, plen;
     size_t dlen, plen;
 
 
     if (!pfx || !pfx[0])
     if (!pfx || !pfx[0])

+ 2 - 2
libc/string/bfin/memchr.S

@@ -25,8 +25,8 @@
 
 
 .weak _memchr
 .weak _memchr
 ENTRY(_memchr)
 ENTRY(_memchr)
-	P0 = R0;             // P0 = address
-	P2 = R2;             // P2 = count
+	P0 = R0;             /* P0 = address */
+	P2 = R2;             /* P2 = count */
 	R1 = R1.B(Z);
 	R1 = R1.B(Z);
 	CC = R2 == 0;
 	CC = R2 == 0;
 	IF CC JUMP .Lfailed;
 	IF CC JUMP .Lfailed;

+ 40 - 40
libc/string/bfin/strcmp.S

@@ -29,66 +29,66 @@ ENTRY(_strcmp)
 	p1 = r0;
 	p1 = r0;
 	p2 = r1;
 	p2 = r1;
 
 
-	p0 = -1;	// (need for loop counter init)
+	p0 = -1;	/* (need for loop counter init) */
 
 
-	  // check if byte aligned
-	r0 = r0 | r1;	// check both pointers at same time
-	r0 <<= 30;	// dump all but last 2 bits
-	cc = az;	// are they zero?
-	if !cc jump .Lunaligned;	// no; use unaligned code.
-			// fall-thru for aligned case..
+	  /* check if byte aligned */
+	r0 = r0 | r1;	/* check both pointers at same time */
+	r0 <<= 30;	/* dump all but last 2 bits */
+	cc = az;	/* are they zero? */
+	if !cc jump .Lunaligned;	/* no; use unaligned code. */
+			/* fall-thru for aligned case.. */
 
 
-	  // note that r0 is zero from the previous...
-	  //           p0 set to -1
+	  /* note that r0 is zero from the previous... */
+	  /*           p0 set to -1 */
 
 
 	LSETUP (.Lbeginloop, .Lendloop) lc0=p0;
 	LSETUP (.Lbeginloop, .Lendloop) lc0=p0;
-	  // pick up first words
+	  /* pick up first words */
 	r1 = [p1++];
 	r1 = [p1++];
 	r2 = [p2++];
 	r2 = [p2++];
-	  // make up mask:  0FF0FF
+	  /* make up mask:  0FF0FF */
 	r7 = 0xFF;
 	r7 = 0xFF;
 	r7.h = 0xFF;
 	r7.h = 0xFF;
-		// loop : 9 cycles to check 4 characters
+		/* loop : 9 cycles to check 4 characters */
 	cc = r1 == r2;
 	cc = r1 == r2;
 .Lbeginloop:
 .Lbeginloop:
-	if !cc jump .Lnotequal4;	// compare failure, exit loop
+	if !cc jump .Lnotequal4;	/* compare failure, exit loop */
 
 
-	  // starting with   44332211
-	  // see if char 3 or char 1 is 0
-	r3 = r1 & r7;		// form 00330011
-	  // add to zero, and (r2 is free, reload)
+	  /* starting with   44332211 */
+	  /* see if char 3 or char 1 is 0 */
+	r3 = r1 & r7;		/* form 00330011 */
+	  /* add to zero, and (r2 is free, reload) */
 	r6 = r3 +|+ r0 || r2 = [p2++] || nop;
 	r6 = r3 +|+ r0 || r2 = [p2++] || nop;
-	cc = az;	// true if either is zero
-	r3 = r1 ^ r3;	        // form 44002200 (4321^0301 => 4020)
-				// (trick, saves having another mask)
-	// add to zero,  and  (r1 is free, reload)
+	cc = az;	/* true if either is zero */
+	r3 = r1 ^ r3;	        /* form 44002200 (4321^0301 => 4020) */
+				/* (trick, saves having another mask) */
+	/* add to zero,  and  (r1 is free, reload) */
 	r6 = r3 +|+ r0 || r1 = [p1++] || nop;
 	r6 = r3 +|+ r0 || r1 = [p1++] || nop;
-	cc |= az;	// true if either is zero
-	if cc jump .Lzero4;	// leave if a zero somewhere
+	cc |= az;	/* true if either is zero */
+	if cc jump .Lzero4;	/* leave if a zero somewhere */
 .Lendloop:
 .Lendloop:
 	cc = r1 == r2;
 	cc = r1 == r2;
 
 
- // loop exits
-.Lnotequal4:		// compare failure on 4-char compare
-			// address pointers are one word ahead;
-			// faster to use zero4 exit code
+ /* loop exits */
+.Lnotequal4:		/* compare failure on 4-char compare */
+			/* address pointers are one word ahead; */
+			/* faster to use zero4 exit code */
 	p1 += 4;
 	p1 += 4;
 	p2 += 4;
 	p2 += 4;
 
 
-.Lzero4:			// one of the bytes in word 1 is zero
-			// but we've already fetched the next word; so
-			// backup two to look at failing word again
+.Lzero4:			/* one of the bytes in word 1 is zero */
+			/* but we've already fetched the next word; so */
+			/* backup two to look at failing word again */
 	p1 += -8;
 	p1 += -8;
 	p2 += -8;
 	p2 += -8;
 
 
 
 
 
 
-		// here when pointers are unaligned: checks one
-		// character at a time.  Also use at the end of
-		// the word-check algorithm to figure out what happened
+		/* here when pointers are unaligned: checks one */
+		/* character at a time.  Also use at the end of */
+		/* the word-check algorithm to figure out what happened */
 .Lunaligned:
 .Lunaligned:
-	  //	R0 is non-zero from before.
-	  //           p0 set to -1
+	  /*	R0 is non-zero from before. */
+	  /*           p0 set to -1 */
 
 
 	r0 = 0 (Z);
 	r0 = 0 (Z);
 	r1 = B[p1++] (Z);
 	r1 = B[p1++] (Z);
@@ -96,18 +96,18 @@ ENTRY(_strcmp)
 	LSETUP (.Lbeginloop1, .Lendloop1) lc0=p0;
 	LSETUP (.Lbeginloop1, .Lendloop1) lc0=p0;
 
 
 .Lbeginloop1:
 .Lbeginloop1:
-	cc = r1;	// first char must be non-zero
-	// chars must be the same
+	cc = r1;	/* first char must be non-zero */
+	/* chars must be the same */
 	r3 = r2 - r1 (NS) || r1 = B[p1++] (Z) || nop;
 	r3 = r2 - r1 (NS) || r1 = B[p1++] (Z) || nop;
 	cc &= az;
 	cc &= az;
-	r3 = r0 - r2;	// second char must be non-zero
+	r3 = r0 - r2;	/* second char must be non-zero */
 	cc &= an;
 	cc &= an;
 	if !cc jump .Lexitloop1;
 	if !cc jump .Lexitloop1;
 .Lendloop1:
 .Lendloop1:
 	r2 = B[p2++] (Z);
 	r2 = B[p2++] (Z);
 
 
-.Lexitloop1: // here means we found a zero or a difference.
-	   // we have r2(N), p2(N), r1(N+1), p1(N+2)
+.Lexitloop1: /* here means we found a zero or a difference. */
+	   /* we have r2(N), p2(N), r1(N+1), p1(N+2) */
 	r1=B[p1+ -2] (Z);
 	r1=B[p1+ -2] (Z);
 	r0 = r1 - r2;
 	r0 = r1 - r2;
 	(r7:4) = [sp++];
 	(r7:4) = [sp++];

+ 7 - 7
libc/string/generic/strtok_r.c

@@ -29,17 +29,17 @@
 # define __rawmemchr strchr
 # define __rawmemchr strchr
 /* Experimentally off - libc_hidden_proto(strchr) */
 /* Experimentally off - libc_hidden_proto(strchr) */
 #endif
 #endif
-
-/* Parse S into tokens separated by characters in DELIM.
+#if 0
+   Parse S into tokens separated by characters in DELIM.
    If S is NULL, the saved pointer in SAVE_PTR is used as
    If S is NULL, the saved pointer in SAVE_PTR is used as
    the next starting point.  For example:
    the next starting point.  For example:
 	char s[] = "-abc-=-def";
 	char s[] = "-abc-=-def";
 	char *sp;
 	char *sp;
-	x = strtok_r(s, "-", &sp);	// x = "abc", sp = "=-def"
-	x = strtok_r(NULL, "-=", &sp);	// x = "def", sp = NULL
-	x = strtok_r(NULL, "=", &sp);	// x = NULL
-		// s = "abc\0-def\0"
-*/
+	x = strtok_r(s, "-", &sp);	/* x = "abc", sp = "=-def" */
+	x = strtok_r(NULL, "-=", &sp);	/* x = "def", sp = NULL */
+	x = strtok_r(NULL, "=", &sp);	/* x = NULL */
+		/* s = "abc\0-def\0" */
+#endif
 char *strtok_r (char *s, const char *delim, char **save_ptr)
 char *strtok_r (char *s, const char *delim, char **save_ptr)
 {
 {
   char *token;
   char *token;

+ 68 - 68
libc/string/ia64/bzero.S

@@ -47,13 +47,13 @@
 #define ptr1		r28
 #define ptr1		r28
 #define ptr2		r27
 #define ptr2		r27
 #define ptr3		r26
 #define ptr3		r26
-#define ptr9 		r24
+#define ptr9		r24
 #define	loopcnt		r23
 #define	loopcnt		r23
 #define linecnt		r22
 #define linecnt		r22
 #define bytecnt		r21
 #define bytecnt		r21
 
 
-// This routine uses only scratch predicate registers (p6 - p15)
-#define p_scr		p6	// default register for same-cycle branches
+/* This routine uses only scratch predicate registers (p6 - p15) */
+#define p_scr		p6	/* default register for same-cycle branches */
 #define p_unalgn	p9
 #define p_unalgn	p9
 #define p_y		p11
 #define p_y		p11
 #define p_n		p12
 #define p_n		p12
@@ -65,7 +65,7 @@
 #define MIN1		15
 #define MIN1		15
 #define MIN1P1HALF	8
 #define MIN1P1HALF	8
 #define LINE_SIZE	128
 #define LINE_SIZE	128
-#define LSIZE_SH        7			// shift amount
+#define LSIZE_SH        7			/* shift amount */
 #define PREF_AHEAD	8
 #define PREF_AHEAD	8
 
 
 #define USE_FLP
 #define USE_FLP
@@ -87,49 +87,49 @@ ENTRY(bzero)
 	movi0	save_lc = ar.lc
 	movi0	save_lc = ar.lc
 } { .mmi
 } { .mmi
 	.body
 	.body
-	mov	ret0 = dest		// return value
+	mov	ret0 = dest		/* return value */
 	nop.m	0
 	nop.m	0
 	cmp.eq	p_scr, p0 = cnt, r0
 	cmp.eq	p_scr, p0 = cnt, r0
 ;; }
 ;; }
 { .mmi
 { .mmi
-	and	ptr2 = -(MIN1+1), dest	// aligned address
-	and	tmp = MIN1, dest	// prepare to check for alignment
-	tbit.nz p_y, p_n = dest, 0	// Do we have an odd address? (M_B_U)
+	and	ptr2 = -(MIN1+1), dest	/* aligned address */
+	and	tmp = MIN1, dest	/* prepare to check for alignment */
+	tbit.nz p_y, p_n = dest, 0	/* Do we have an odd address? (M_B_U) */
 } { .mib
 } { .mib
 	mov	ptr1 = dest
 	mov	ptr1 = dest
 	nop.i	0
 	nop.i	0
-(p_scr)	br.ret.dpnt.many rp		// return immediately if count = 0
+(p_scr)	br.ret.dpnt.many rp		/* return immediately if count = 0 */
 ;; }
 ;; }
 { .mib
 { .mib
 	cmp.ne	p_unalgn, p0 = tmp, r0
 	cmp.ne	p_unalgn, p0 = tmp, r0
-} { .mib					// NB: # of bytes to move is 1
-	sub	bytecnt = (MIN1+1), tmp		//     higher than loopcnt
-	cmp.gt	p_scr, p0 = 16, cnt		// is it a minimalistic task?
-(p_scr)	br.cond.dptk.many .move_bytes_unaligned	// go move just a few (M_B_U)
+} { .mib					/* NB: # of bytes to move is 1 */
+	sub	bytecnt = (MIN1+1), tmp		/*     higher than loopcnt */
+	cmp.gt	p_scr, p0 = 16, cnt		/* is it a minimalistic task? */
+(p_scr)	br.cond.dptk.many .move_bytes_unaligned	/* go move just a few (M_B_U) */
 ;; }
 ;; }
 { .mmi
 { .mmi
-(p_unalgn) add	ptr1 = (MIN1+1), ptr2		// after alignment
-(p_unalgn) add	ptr2 = MIN1P1HALF, ptr2		// after alignment
-(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3	// should we do a st8 ?
+(p_unalgn) add	ptr1 = (MIN1+1), ptr2		/* after alignment */
+(p_unalgn) add	ptr2 = MIN1P1HALF, ptr2		/* after alignment */
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3	/* should we do a st8 ? */
 ;; }
 ;; }
 { .mib
 { .mib
 (p_y)	add	cnt = -8, cnt
 (p_y)	add	cnt = -8, cnt
-(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2	// should we do a st4 ?
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2	/* should we do a st4 ? */
 } { .mib
 } { .mib
 (p_y)	st8	[ptr2] = r0,-4
 (p_y)	st8	[ptr2] = r0,-4
 (p_n)	add	ptr2 = 4, ptr2
 (p_n)	add	ptr2 = 4, ptr2
 ;; }
 ;; }
 { .mib
 { .mib
 (p_yy)	add	cnt = -4, cnt
 (p_yy)	add	cnt = -4, cnt
-(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1	// should we do a st2 ?
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1	/* should we do a st2 ? */
 } { .mib
 } { .mib
 (p_yy)	st4	[ptr2] = r0,-2
 (p_yy)	st4	[ptr2] = r0,-2
 (p_nn)	add	ptr2 = 2, ptr2
 (p_nn)	add	ptr2 = 2, ptr2
 ;; }
 ;; }
 { .mmi
 { .mmi
-	mov	tmp = LINE_SIZE+1		// for compare
+	mov	tmp = LINE_SIZE+1		/* for compare */
 (p_y)	add	cnt = -2, cnt
 (p_y)	add	cnt = -2, cnt
-(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0	// should we do a st1 ?
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0	/* should we do a st1 ? */
 } { .mmi
 } { .mmi
 	nop.m	0
 	nop.m	0
 (p_y)	st2	[ptr2] = r0,-1
 (p_y)	st2	[ptr2] = r0,-1
@@ -138,44 +138,44 @@ ENTRY(bzero)
 
 
 { .mmi
 { .mmi
 (p_yy)	st1	[ptr2] = r0
 (p_yy)	st1	[ptr2] = r0
-  	cmp.gt	p_scr, p0 = tmp, cnt		// is it a minimalistic task?
+	cmp.gt	p_scr, p0 = tmp, cnt		/* is it a minimalistic task? */
 } { .mbb
 } { .mbb
 (p_yy)	add	cnt = -1, cnt
 (p_yy)	add	cnt = -1, cnt
-(p_scr)	br.cond.dpnt.many .fraction_of_line	// go move just a few
+(p_scr)	br.cond.dpnt.many .fraction_of_line	/* go move just a few */
 ;; }
 ;; }
 { .mib
 { .mib
-	nop.m 	0
+	nop.m	0
 	shr.u	linecnt = cnt, LSIZE_SH
 	shr.u	linecnt = cnt, LSIZE_SH
 	nop.b	0
 	nop.b	0
 ;; }
 ;; }
 
 
 	.align 32
 	.align 32
-.l1b:	// ------------------//  L1B: store ahead into cache lines; fill later
+.l1b:	/* ------------------  L1B: store ahead into cache lines; fill later */
 { .mmi
 { .mmi
-	and	tmp = -(LINE_SIZE), cnt		// compute end of range
-	mov	ptr9 = ptr1			// used for prefetching
-	and	cnt = (LINE_SIZE-1), cnt	// remainder
+	and	tmp = -(LINE_SIZE), cnt		/* compute end of range */
+	mov	ptr9 = ptr1			/* used for prefetching */
+	and	cnt = (LINE_SIZE-1), cnt	/* remainder */
 } { .mmi
 } { .mmi
-	mov	loopcnt = PREF_AHEAD-1		// default prefetch loop
-	cmp.gt	p_scr, p0 = PREF_AHEAD, linecnt	// check against actual value
+	mov	loopcnt = PREF_AHEAD-1		/* default prefetch loop */
+	cmp.gt	p_scr, p0 = PREF_AHEAD, linecnt	/* check against actual value */
 ;; }
 ;; }
 { .mmi
 { .mmi
 (p_scr)	add	loopcnt = -1, linecnt
 (p_scr)	add	loopcnt = -1, linecnt
-	add	ptr2 = 16, ptr1	// start of stores (beyond prefetch stores)
-	add	ptr1 = tmp, ptr1	// first address beyond total range
+	add	ptr2 = 16, ptr1	/* start of stores (beyond prefetch stores) */
+	add	ptr1 = tmp, ptr1	/* first address beyond total range */
 ;; }
 ;; }
 { .mmi
 { .mmi
-	add	tmp = -1, linecnt	// next loop count
+	add	tmp = -1, linecnt	/* next loop count */
 	movi0	ar.lc = loopcnt
 	movi0	ar.lc = loopcnt
 ;; }
 ;; }
 .pref_l1b:
 .pref_l1b:
 { .mib
 { .mib
-	stf.spill [ptr9] = f0, 128	// Do stores one cache line apart
+	stf.spill [ptr9] = f0, 128	/* Do stores one cache line apart */
 	nop.i   0
 	nop.i   0
 	br.cloop.dptk.few .pref_l1b
 	br.cloop.dptk.few .pref_l1b
 ;; }
 ;; }
 { .mmi
 { .mmi
-	add	ptr0 = 16, ptr2		// Two stores in parallel
+	add	ptr0 = 16, ptr2		/* Two stores in parallel */
 	movi0	ar.lc = tmp
 	movi0	ar.lc = tmp
 ;; }
 ;; }
 .l1bx:
 .l1bx:
@@ -190,7 +190,7 @@ ENTRY(bzero)
  { .mmi
  { .mmi
 	stf.spill [ptr2] = f0, 32
 	stf.spill [ptr2] = f0, 32
 	stf.spill [ptr0] = f0, 64
 	stf.spill [ptr0] = f0, 64
- 	cmp.lt	p_scr, p0 = ptr9, ptr1	// do we need more prefetching?
+	cmp.lt	p_scr, p0 = ptr9, ptr1	/* do we need more prefetching? */
  ;; }
  ;; }
 { .mmb
 { .mmb
 	stf.spill [ptr2] = f0, 32
 	stf.spill [ptr2] = f0, 32
@@ -198,14 +198,14 @@ ENTRY(bzero)
 	br.cloop.dptk.few .l1bx
 	br.cloop.dptk.few .l1bx
 ;; }
 ;; }
 { .mib
 { .mib
-	cmp.gt  p_scr, p0 = 8, cnt	// just a few bytes left ?
+	cmp.gt  p_scr, p0 = 8, cnt	/* just a few bytes left ? */
 (p_scr)	br.cond.dpnt.many  .move_bytes_from_alignment
 (p_scr)	br.cond.dpnt.many  .move_bytes_from_alignment
 ;; }
 ;; }
 
 
 .fraction_of_line:
 .fraction_of_line:
 { .mib
 { .mib
 	add	ptr2 = 16, ptr1
 	add	ptr2 = 16, ptr1
-	shr.u	loopcnt = cnt, 5   	// loopcnt = cnt / 32
+	shr.u	loopcnt = cnt, 5	/* loopcnt = cnt / 32 */
 ;; }
 ;; }
 { .mib
 { .mib
 	cmp.eq	p_scr, p0 = loopcnt, r0
 	cmp.eq	p_scr, p0 = loopcnt, r0
@@ -213,11 +213,11 @@ ENTRY(bzero)
 (p_scr)	br.cond.dpnt.many .store_words
 (p_scr)	br.cond.dpnt.many .store_words
 ;; }
 ;; }
 { .mib
 { .mib
-	and	cnt = 0x1f, cnt		// compute the remaining cnt
+	and	cnt = 0x1f, cnt		/* compute the remaining cnt */
 	movi0   ar.lc = loopcnt
 	movi0   ar.lc = loopcnt
 ;; }
 ;; }
 	.align 32
 	.align 32
-.l2:	// -----------------------------//  L2A:  store 32B in 2 cycles
+.l2:	/* -----------------------------  L2A:  store 32B in 2 cycles */
 { .mmb
 { .mmb
 	store	[ptr1] = myval, 8
 	store	[ptr1] = myval, 8
 	store	[ptr2] = myval, 8
 	store	[ptr2] = myval, 8
@@ -228,38 +228,38 @@ ENTRY(bzero)
 ;; }
 ;; }
 .store_words:
 .store_words:
 { .mib
 { .mib
-	cmp.gt	p_scr, p0 = 8, cnt	// just a few bytes left ?
-(p_scr)	br.cond.dpnt.many .move_bytes_from_alignment	// Branch
+	cmp.gt	p_scr, p0 = 8, cnt	/* just a few bytes left ? */
+(p_scr)	br.cond.dpnt.many .move_bytes_from_alignment	/* Branch */
 ;; }
 ;; }
 
 
 { .mmi
 { .mmi
-	store	[ptr1] = myval, 8	// store
-	cmp.le	p_y, p_n = 16, cnt	//
-	add	cnt = -8, cnt		// subtract
+	store	[ptr1] = myval, 8	/* store */
+	cmp.le	p_y, p_n = 16, cnt	/* */
+	add	cnt = -8, cnt		/* subtract */
 ;; }
 ;; }
 { .mmi
 { .mmi
-(p_y)	store	[ptr1] = myval, 8	// store
+(p_y)	store	[ptr1] = myval, 8	/* store */
 (p_y)	cmp.le.unc p_yy, p_nn = 16, cnt
 (p_y)	cmp.le.unc p_yy, p_nn = 16, cnt
-(p_y)	add	cnt = -8, cnt		// subtract
+(p_y)	add	cnt = -8, cnt		/* subtract */
 ;; }
 ;; }
-{ .mmi					// store
+{ .mmi					/* store */
 (p_yy)	store	[ptr1] = myval, 8
 (p_yy)	store	[ptr1] = myval, 8
-(p_yy)	add	cnt = -8, cnt		// subtract
+(p_yy)	add	cnt = -8, cnt		/* subtract */
 ;; }
 ;; }
 
 
 .move_bytes_from_alignment:
 .move_bytes_from_alignment:
 { .mib
 { .mib
 	cmp.eq	p_scr, p0 = cnt, r0
 	cmp.eq	p_scr, p0 = cnt, r0
-	tbit.nz.unc p_y, p0 = cnt, 2	// should we terminate with a st4 ?
+	tbit.nz.unc p_y, p0 = cnt, 2	/* should we terminate with a st4 ? */
 (p_scr)	br.cond.dpnt.few .restore_and_exit
 (p_scr)	br.cond.dpnt.few .restore_and_exit
 ;; }
 ;; }
 { .mib
 { .mib
 (p_y)	st4	[ptr1] = r0,4
 (p_y)	st4	[ptr1] = r0,4
-	tbit.nz.unc p_yy, p0 = cnt, 1	// should we terminate with a st2 ?
+	tbit.nz.unc p_yy, p0 = cnt, 1	/* should we terminate with a st2 ? */
 ;; }
 ;; }
 { .mib
 { .mib
 (p_yy)	st2	[ptr1] = r0,2
 (p_yy)	st2	[ptr1] = r0,2
-	tbit.nz.unc p_y, p0 = cnt, 0	// should we terminate with a st1 ?
+	tbit.nz.unc p_y, p0 = cnt, 0	/* should we terminate with a st1 ? */
 ;; }
 ;; }
 
 
 { .mib
 { .mib
@@ -281,38 +281,38 @@ ENTRY(bzero)
 (p_n)	add	ptr2 = 2, ptr1
 (p_n)	add	ptr2 = 2, ptr1
 } { .mmi
 } { .mmi
 (p_y)	add	ptr2 = 3, ptr1
 (p_y)	add	ptr2 = 3, ptr1
-(p_y)	st1	[ptr1] = r0, 1		// fill 1 (odd-aligned) byte
-(p_y)	add	cnt = -1, cnt		// [15, 14 (or less) left]
+(p_y)	st1	[ptr1] = r0, 1		/* fill 1 (odd-aligned) byte */
+(p_y)	add	cnt = -1, cnt		/* [15, 14 (or less) left] */
 ;; }
 ;; }
 { .mmi
 { .mmi
 (p_yy)	cmp.le.unc p_y, p0 = 8, cnt
 (p_yy)	cmp.le.unc p_y, p0 = 8, cnt
-	add	ptr3 = ptr1, cnt	// prepare last store
+	add	ptr3 = ptr1, cnt	/* prepare last store */
 	movi0	ar.lc = save_lc
 	movi0	ar.lc = save_lc
 } { .mmi
 } { .mmi
-(p_yy)	st2	[ptr1] = r0, 4		// fill 2 (aligned) bytes
-(p_yy)	st2	[ptr2] = r0, 4		// fill 2 (aligned) bytes
-(p_yy)	add	cnt = -4, cnt		// [11, 10 (o less) left]
+(p_yy)	st2	[ptr1] = r0, 4		/* fill 2 (aligned) bytes */
+(p_yy)	st2	[ptr2] = r0, 4		/* fill 2 (aligned) bytes */
+(p_yy)	add	cnt = -4, cnt		/* [11, 10 (o less) left] */
 ;; }
 ;; }
 { .mmi
 { .mmi
 (p_y)	cmp.le.unc p_yy, p0 = 8, cnt
 (p_y)	cmp.le.unc p_yy, p0 = 8, cnt
-	add	ptr3 = -1, ptr3		// last store
-	tbit.nz p_scr, p0 = cnt, 1	// will there be a st2 at the end ?
+	add	ptr3 = -1, ptr3		/* last store */
+	tbit.nz p_scr, p0 = cnt, 1	/* will there be a st2 at the end ? */
 } { .mmi
 } { .mmi
-(p_y)	st2	[ptr1] = r0, 4		// fill 2 (aligned) bytes
-(p_y)	st2	[ptr2] = r0, 4		// fill 2 (aligned) bytes
-(p_y)	add	cnt = -4, cnt		// [7, 6 (or less) left]
+(p_y)	st2	[ptr1] = r0, 4		/* fill 2 (aligned) bytes */
+(p_y)	st2	[ptr2] = r0, 4		/* fill 2 (aligned) bytes */
+(p_y)	add	cnt = -4, cnt		/* [7, 6 (or less) left] */
 ;; }
 ;; }
 { .mmi
 { .mmi
-(p_yy)	st2	[ptr1] = r0, 4		// fill 2 (aligned) bytes
-(p_yy)	st2	[ptr2] = r0, 4		// fill 2 (aligned) bytes
-					// [3, 2 (or less) left]
-	tbit.nz p_y, p0 = cnt, 0	// will there be a st1 at the end ?
+(p_yy)	st2	[ptr1] = r0, 4		/* fill 2 (aligned) bytes */
+(p_yy)	st2	[ptr2] = r0, 4		/* fill 2 (aligned) bytes */
+					/* [3, 2 (or less) left] */
+	tbit.nz p_y, p0 = cnt, 0	/* will there be a st1 at the end ? */
 } { .mmi
 } { .mmi
 (p_yy)	add	cnt = -4, cnt
 (p_yy)	add	cnt = -4, cnt
 ;; }
 ;; }
 { .mmb
 { .mmb
-(p_scr)	st2	[ptr1] = r0		// fill 2 (aligned) bytes
-(p_y)	st1	[ptr3] = r0		// fill last byte (using ptr3)
+(p_scr)	st2	[ptr1] = r0		/* fill 2 (aligned) bytes */
+(p_y)	st1	[ptr3] = r0		/* fill last byte (using ptr3) */
 	br.ret.sptk.many rp
 	br.ret.sptk.many rp
 ;; }
 ;; }
 END(bzero)
 END(bzero)

+ 51 - 51
libc/string/ia64/memccpy.S

@@ -23,7 +23,7 @@
    Inputs:
    Inputs:
         in0:    dest
         in0:    dest
         in1:    src
         in1:    src
-  	in2:	char
+	in2:	char
         in3:    byte count
         in3:    byte count
 
 
    This implementation assumes little endian mode (UM.be = 0).
    This implementation assumes little endian mode (UM.be = 0).
@@ -69,75 +69,75 @@ ENTRY(memccpy)
 	.rotr	r[MEMLAT + 7], tmp1[4], tmp2[4], val[4], tmp3[2], pos0[2]
 	.rotr	r[MEMLAT + 7], tmp1[4], tmp2[4], val[4], tmp3[2], pos0[2]
 	.rotp	p[MEMLAT + 6 + 1]
 	.rotp	p[MEMLAT + 6 + 1]
 
 
-	mov	ret0 = r0		// return NULL if no match
+	mov	ret0 = r0		/* return NULL if no match */
 	.save pr, saved_pr
 	.save pr, saved_pr
-	mov	saved_pr = pr		// save the predicate registers
-	mov 	dest = in0		// dest
+	mov	saved_pr = pr		/* save the predicate registers */
+	mov 	dest = in0		/* dest */
 	.save ar.lc, saved_lc
 	.save ar.lc, saved_lc
-        mov 	saved_lc = ar.lc	// save the loop counter
-        mov 	saved_ec = ar.ec	// save the loop counter
+        mov 	saved_lc = ar.lc	/* save the loop counter */
+        mov 	saved_ec = ar.ec	/* save the loop counter */
 	.body
 	.body
-	mov 	src = in1		// src
-	extr.u	char = in2, 0, 8	// char
-	mov	len = in3		// len
-	sub	tmp = r0, in0		// tmp = -dest
-	cmp.ne	p7, p0 = r0, r0		// clear p7
+	mov 	src = in1		/* src */
+	extr.u	char = in2, 0, 8	/* char */
+	mov	len = in3		/* len */
+	sub	tmp = r0, in0		/* tmp = -dest */
+	cmp.ne	p7, p0 = r0, r0		/* clear p7 */
 	;;
 	;;
-	and	loopcnt = 7, tmp	// loopcnt = -dest % 8
-	cmp.ge	p6, p0 = OP_T_THRES, len	// is len <= OP_T_THRES
-	mov	ar.ec = 0		// ec not guaranteed zero on entry
-(p6)	br.cond.spnt	.cpyfew		// copy byte by byte
+	and	loopcnt = 7, tmp	/* loopcnt = -dest % 8 */
+	cmp.ge	p6, p0 = OP_T_THRES, len	/* is len <= OP_T_THRES */
+	mov	ar.ec = 0		/* ec not guaranteed zero on entry */
+(p6)	br.cond.spnt	.cpyfew		/* copy byte by byte */
 	;;
 	;;
 	cmp.eq	p6, p0 = loopcnt, r0
 	cmp.eq	p6, p0 = loopcnt, r0
 	mux1	charx8 = char, @brcst
 	mux1	charx8 = char, @brcst
 (p6)	br.cond.sptk .dest_aligned
 (p6)	br.cond.sptk .dest_aligned
-	sub	len = len, loopcnt	// len -= -dest % 8
-	adds	loopcnt = -1, loopcnt	// --loopcnt
+	sub	len = len, loopcnt	/* len -= -dest % 8 */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
 	;;
 	;;
 	mov	ar.lc = loopcnt
 	mov	ar.lc = loopcnt
-.l1:					// copy -dest % 8 bytes
-	ld1	value = [src], 1	// value = *src++
+.l1:					/* copy -dest % 8 bytes */
+	ld1	value = [src], 1	/* value = *src++ */
 	;;
 	;;
-	st1	[dest] = value, 1	// *dest++ = value
+	st1	[dest] = value, 1	/* *dest++ = value */
 	cmp.eq	p6, p0 = value, char
 	cmp.eq	p6, p0 = value, char
 (p6)	br.cond.spnt .foundit
 (p6)	br.cond.spnt .foundit
 	br.cloop.dptk .l1
 	br.cloop.dptk .l1
 .dest_aligned:
 .dest_aligned:
-	and	sh1 = 7, src 		// sh1 = src % 8
-	and	tmp = -8, len   	// tmp = len & -OPSIZ
-	and	asrc = -8, src		// asrc = src & -OPSIZ  -- align src
-	shr.u	loopcnt = len, 3	// loopcnt = len / 8
-	and	len = 7, len ;;		// len = len % 8
-	shl	sh1 = sh1, 3		// sh1 = 8 * (src % 8)
-	adds	loopcnt = -1, loopcnt	// --loopcnt
-	mov     pr.rot = 1 << 16 ;;	// set rotating predicates
-	sub	sh2 = 64, sh1		// sh2 = 64 - sh1
-	mov	ar.lc = loopcnt		// set LC
-	cmp.eq  p6, p0 = sh1, r0 	// is the src aligned?
+	and	sh1 = 7, src 		/* sh1 = src % 8 */
+	and	tmp = -8, len   	/* tmp = len & -OPSIZ */
+	and	asrc = -8, src		/* asrc = src & -OPSIZ  -- align src */
+	shr.u	loopcnt = len, 3	/* loopcnt = len / 8 */
+	and	len = 7, len ;;		/* len = len % 8 */
+	shl	sh1 = sh1, 3		/* sh1 = 8 * (src % 8) */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
+	mov     pr.rot = 1 << 16 ;;	/* set rotating predicates */
+	sub	sh2 = 64, sh1		/* sh2 = 64 - sh1 */
+	mov	ar.lc = loopcnt		/* set LC */
+	cmp.eq  p6, p0 = sh1, r0 	/* is the src aligned? */
 (p6)    br.cond.sptk .src_aligned ;;
 (p6)    br.cond.sptk .src_aligned ;;
-	add	src = src, tmp		// src += len & -OPSIZ
-	mov	ar.ec = MEMLAT + 6 + 1 	// six more passes needed
-	ld8	r[1] = [asrc], 8 	// r[1] = w0
-	cmp.ne	p6, p0 = r0, r0	;;	// clear p6
+	add	src = src, tmp		/* src += len & -OPSIZ */
+	mov	ar.ec = MEMLAT + 6 + 1 	/* six more passes needed */
+	ld8	r[1] = [asrc], 8 	/* r[1] = w0 */
+	cmp.ne	p6, p0 = r0, r0	;;	/* clear p6 */
 	ALIGN(32)
 	ALIGN(32)
 .l2:
 .l2:
-(p[0])		ld8.s	r[0] = [asrc], 8		// r[0] = w1
-(p[MEMLAT])	shr.u	tmp1[0] = r[1 + MEMLAT], sh1	// tmp1 = w0 >> sh1
-(p[MEMLAT])	shl	tmp2[0] = r[0 + MEMLAT], sh2  	// tmp2 = w1 << sh2
+(p[0])		ld8.s	r[0] = [asrc], 8		/* r[0] = w1 */
+(p[MEMLAT])	shr.u	tmp1[0] = r[1 + MEMLAT], sh1	/* tmp1 = w0 >> sh1 */
+(p[MEMLAT])	shl	tmp2[0] = r[0 + MEMLAT], sh2  	/* tmp2 = w1 << sh2 */
 (p[MEMLAT+4])	xor	tmp3[0] = val[1], charx8
 (p[MEMLAT+4])	xor	tmp3[0] = val[1], charx8
 (p[MEMLAT+5])	czx1.r	pos0[0] = tmp3[1]
 (p[MEMLAT+5])	czx1.r	pos0[0] = tmp3[1]
-(p[MEMLAT+6])	chk.s	r[6 + MEMLAT], .recovery1	// our data isn't
-							// valid - rollback!
+(p[MEMLAT+6])	chk.s	r[6 + MEMLAT], .recovery1	/* our data isn't */
+							/* valid - rollback! */
 (p[MEMLAT+6])	cmp.ne	p6, p0 = 8, pos0[1]
 (p[MEMLAT+6])	cmp.ne	p6, p0 = 8, pos0[1]
 (p6)		br.cond.spnt	.gotit
 (p6)		br.cond.spnt	.gotit
-(p[MEMLAT+6])	st8	[dest] = val[3], 8		// store val to dest
-(p[MEMLAT+3])	or	val[0] = tmp1[3], tmp2[3] 	// val = tmp1 | tmp2
+(p[MEMLAT+6])	st8	[dest] = val[3], 8		/* store val to dest */
+(p[MEMLAT+3])	or	val[0] = tmp1[3], tmp2[3] 	/* val = tmp1 | tmp2 */
 		br.ctop.sptk    .l2
 		br.ctop.sptk    .l2
 		br.cond.sptk .cpyfew
 		br.cond.sptk .cpyfew
 
 
 .src_aligned:
 .src_aligned:
-		cmp.ne  p6, p0 = r0, r0			// clear p6
-		mov     ar.ec = MEMLAT + 2 + 1 ;;	// set EC
+		cmp.ne  p6, p0 = r0, r0			/* clear p6 */
+		mov     ar.ec = MEMLAT + 2 + 1 ;;	/* set EC */
 .l3:
 .l3:
 (p[0])		ld8.s	r[0] = [src], 8
 (p[0])		ld8.s	r[0] = [src], 8
 (p[MEMLAT])	xor	tmp3[0] = r[MEMLAT], charx8
 (p[MEMLAT])	xor	tmp3[0] = r[MEMLAT], charx8
@@ -149,8 +149,8 @@ ENTRY(memccpy)
 (p[MEMLAT+2])	st8	[dest] = r[MEMLAT+2], 8
 (p[MEMLAT+2])	st8	[dest] = r[MEMLAT+2], 8
 		br.ctop.dptk .l3
 		br.ctop.dptk .l3
 .cpyfew:
 .cpyfew:
-	cmp.eq	p6, p0 = len, r0	// is len == 0 ?
-	adds	len = -1, len		// --len;
+	cmp.eq	p6, p0 = len, r0	/* is len == 0 ? */
+	adds	len = -1, len		/* --len; */
 (p6)	br.cond.spnt	.restore_and_exit ;;
 (p6)	br.cond.spnt	.restore_and_exit ;;
 	mov	ar.lc = len
 	mov	ar.lc = len
 .l4:
 .l4:
@@ -163,14 +163,14 @@ ENTRY(memccpy)
 .foundit:
 .foundit:
 (p6)	mov	ret0 = dest
 (p6)	mov	ret0 = dest
 .restore_and_exit:
 .restore_and_exit:
-	mov     pr = saved_pr, -1    	// restore the predicate registers
-	mov 	ar.lc = saved_lc	// restore the loop counter
-	mov 	ar.ec = saved_ec ;;	// restore the epilog counter
+	mov     pr = saved_pr, -1    	/* restore the predicate registers */
+	mov 	ar.lc = saved_lc	/* restore the loop counter */
+	mov 	ar.ec = saved_ec ;;	/* restore the epilog counter */
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0
 .gotit:
 .gotit:
 	.pred.rel "mutex" p6, p7
 	.pred.rel "mutex" p6, p7
-(p6)	mov	value = val[3]		// if coming from l2
-(p7)	mov	value = r[MEMLAT+2]	// if coming from l3
+(p6)	mov	value = val[3]		/* if coming from l2 */
+(p7)	mov	value = r[MEMLAT+2]	/* if coming from l3 */
 	mov	ar.lc = pos0[1] ;;
 	mov	ar.lc = pos0[1] ;;
 .l5:
 .l5:
 	extr.u	tmp = value, 0, 8 ;;
 	extr.u	tmp = value, 0, 8 ;;

+ 16 - 16
libc/string/ia64/memchr.S

@@ -62,18 +62,18 @@ ENTRY(__memchr)
 	.rotr	value[MEMLAT+1], addr[MEMLAT+3], aux[2], poschr[2]
 	.rotr	value[MEMLAT+1], addr[MEMLAT+3], aux[2], poschr[2]
 	.rotp	p[MEMLAT+3]
 	.rotp	p[MEMLAT+3]
 	.save ar.lc, saved_lc
 	.save ar.lc, saved_lc
-        mov 	saved_lc = ar.lc 	// save the loop counter
+        mov 	saved_lc = ar.lc 	/* save the loop counter */
 	.save pr, saved_pr
 	.save pr, saved_pr
-	mov	saved_pr = pr		// save the predicates
+	mov	saved_pr = pr		/* save the predicates */
 	.body
 	.body
 	mov 	ret0 = str
 	mov 	ret0 = str
-	and 	tmp = 7, str		// tmp = str % 8
-	cmp.ne	p7, p0 = r0, r0		// clear p7
-	extr.u	chr = in1, 0, 8		// chr = (unsigned char) in1
+	and 	tmp = 7, str		/* tmp = str % 8 */
+	cmp.ne	p7, p0 = r0, r0		/* clear p7 */
+	extr.u	chr = in1, 0, 8		/* chr = (unsigned char) in1 */
 	mov	len = in2
 	mov	len = in2
-	cmp.gtu	p6, p0 = 16, in2	// use a simple loop for short
-(p6)	br.cond.spnt .srchfew ;;	// searches
-	sub	loopcnt = 8, tmp	// loopcnt = 8 - tmp
+	cmp.gtu	p6, p0 = 16, in2	/* use a simple loop for short */
+(p6)	br.cond.spnt .srchfew ;;	/* searches */
+	sub	loopcnt = 8, tmp	/* loopcnt = 8 - tmp */
 	cmp.eq	p6, p0 = tmp, r0
 	cmp.eq	p6, p0 = tmp, r0
 (p6)	br.cond.sptk	.str_aligned;;
 (p6)	br.cond.sptk	.str_aligned;;
 	sub	len = len, loopcnt
 	sub	len = len, loopcnt
@@ -86,12 +86,12 @@ ENTRY(__memchr)
 (p6)	br.cond.spnt	.foundit
 (p6)	br.cond.spnt	.foundit
 	br.cloop.sptk	.l1 ;;
 	br.cloop.sptk	.l1 ;;
 .str_aligned:
 .str_aligned:
-	cmp.ne	p6, p0 = r0, r0		// clear p6
-	shr.u	loopcnt = len, 3	// loopcnt = len / 8
-	and 	len = 7, len ;;		// remaining len = len & 7
+	cmp.ne	p6, p0 = r0, r0		/* clear p6 */
+	shr.u	loopcnt = len, 3	/* loopcnt = len / 8 */
+	and 	len = 7, len ;;		/* remaining len = len & 7 */
 	adds	loopcnt = -1, loopcnt
 	adds	loopcnt = -1, loopcnt
 	mov	ar.ec = MEMLAT + 3
 	mov	ar.ec = MEMLAT + 3
-	mux1	chrx8 = chr, @brcst ;;	// get a word full of chr
+	mux1	chrx8 = chr, @brcst ;;	/* get a word full of chr */
 	mov	ar.lc = loopcnt
 	mov	ar.lc = loopcnt
 	mov	pr.rot = 1 << 16 ;;
 	mov	pr.rot = 1 << 16 ;;
 .l2:
 .l2:
@@ -114,12 +114,12 @@ ENTRY(__memchr)
 (p6)	br.cond.dpnt	.foundit
 (p6)	br.cond.dpnt	.foundit
 	br.cloop.sptk	.l3 ;;
 	br.cloop.sptk	.l3 ;;
 .notfound:
 .notfound:
-	cmp.ne	p6, p0 = r0, r0	// clear p6 (p7 was already 0 when we got here)
-	mov	ret0 = r0 ;;	// return NULL
+	cmp.ne	p6, p0 = r0, r0	/* clear p6 (p7 was already 0 when we got here) */
+	mov	ret0 = r0 ;;	/* return NULL */
 .foundit:
 .foundit:
 	.pred.rel "mutex" p6, p7
 	.pred.rel "mutex" p6, p7
-(p6)	adds	ret0 = -1, ret0 		   // if we got here from l1 or l3
-(p7)	add	ret0 = addr[MEMLAT+2], poschr[1]   // if we got here from l2
+(p6)	adds	ret0 = -1, ret0 		   /* if we got here from l1 or l3 */
+(p7)	add	ret0 = addr[MEMLAT+2], poschr[1]   /* if we got here from l2 */
 	mov	pr = saved_pr, -1
 	mov	pr = saved_pr, -1
 	mov	ar.lc = saved_lc
 	mov	ar.lc = saved_lc
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0

+ 47 - 47
libc/string/ia64/memcmp.S

@@ -28,7 +28,7 @@
    In this form, it assumes little endian mode.  For big endian mode, the
    In this form, it assumes little endian mode.  For big endian mode, the
    the two shifts in .l2 must be inverted:
    the two shifts in .l2 must be inverted:
 
 
-	shl   	tmp1[0] = r[1 + MEMLAT], sh1   // tmp1 = w0 << sh1
+	shl	tmp1[0] = r[1 + MEMLAT], sh1   // tmp1 = w0 << sh1
 	shr.u   tmp2[0] = r[0 + MEMLAT], sh2   // tmp2 = w1 >> sh2
 	shr.u   tmp2[0] = r[0 + MEMLAT], sh2   // tmp2 = w1 >> sh2
 
 
    and all the mux1 instructions should be replaced by plain mov's.  */
    and all the mux1 instructions should be replaced by plain mov's.  */
@@ -36,8 +36,8 @@
 #include "sysdep.h"
 #include "sysdep.h"
 #undef ret
 #undef ret
 
 
-#define OP_T_THRES 	16
-#define OPSIZ 		8
+#define OP_T_THRES	16
+#define OPSIZ		8
 #define MEMLAT		2
 #define MEMLAT		2
 
 
 #define start		r15
 #define start		r15
@@ -56,85 +56,85 @@
 
 
 ENTRY(memcmp)
 ENTRY(memcmp)
 	.prologue
 	.prologue
-	alloc 	r2 = ar.pfs, 3, 37, 0, 40
+	alloc	r2 = ar.pfs, 3, 37, 0, 40
 
 
 	.rotr	r[MEMLAT + 2], q[MEMLAT + 5], tmp1[4], tmp2[4], val[2]
 	.rotr	r[MEMLAT + 2], q[MEMLAT + 5], tmp1[4], tmp2[4], val[2]
 	.rotp	p[MEMLAT + 4 + 1]
 	.rotp	p[MEMLAT + 4 + 1]
 
 
-	mov	ret0 = r0		// by default return value = 0
+	mov	ret0 = r0		/* by default return value = 0 */
 	.save pr, saved_pr
 	.save pr, saved_pr
-	mov	saved_pr = pr		// save the predicate registers
+	mov	saved_pr = pr		/* save the predicate registers */
 	.save ar.lc, saved_lc
 	.save ar.lc, saved_lc
-        mov 	saved_lc = ar.lc	// save the loop counter
+        mov	saved_lc = ar.lc	/* save the loop counter */
 	.body
 	.body
-	mov 	dest = in0		// dest
-	mov 	src = in1		// src
-	mov	len = in2		// len
-	sub	tmp = r0, in0		// tmp = -dest
+	mov	dest = in0		/* dest */
+	mov	src = in1		/* src */
+	mov	len = in2		/* len */
+	sub	tmp = r0, in0		/* tmp = -dest */
 	;;
 	;;
-	and	loopcnt = 7, tmp		// loopcnt = -dest % 8
-	cmp.ge	p6, p0 = OP_T_THRES, len	// is len <= OP_T_THRES
-(p6)	br.cond.spnt	.cmpfew			// compare byte by byte
+	and	loopcnt = 7, tmp		/* loopcnt = -dest % 8 */
+	cmp.ge	p6, p0 = OP_T_THRES, len	/* is len <= OP_T_THRES */
+(p6)	br.cond.spnt	.cmpfew			/* compare byte by byte */
 	;;
 	;;
 	cmp.eq	p6, p0 = loopcnt, r0
 	cmp.eq	p6, p0 = loopcnt, r0
 (p6)	br.cond.sptk .dest_aligned
 (p6)	br.cond.sptk .dest_aligned
-	sub	len = len, loopcnt	// len -= -dest % 8
-	adds	loopcnt = -1, loopcnt	// --loopcnt
+	sub	len = len, loopcnt	/* len -= -dest % 8 */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
 	;;
 	;;
 	mov	ar.lc = loopcnt
 	mov	ar.lc = loopcnt
-.l1:					// copy -dest % 8 bytes
-	ld1	value1 = [src], 1	// value = *src++
+.l1:					/* copy -dest % 8 bytes */
+	ld1	value1 = [src], 1	/* value = *src++ */
 	ld1	value2 = [dest], 1
 	ld1	value2 = [dest], 1
 	;;
 	;;
 	cmp.ne	p6, p0 = value1, value2
 	cmp.ne	p6, p0 = value1, value2
 (p6)	br.cond.spnt .done
 (p6)	br.cond.spnt .done
 	br.cloop.dptk .l1
 	br.cloop.dptk .l1
 .dest_aligned:
 .dest_aligned:
-	and	sh1 = 7, src 		// sh1 = src % 8
-	and	tmp = -8, len   	// tmp = len & -OPSIZ
-	and	asrc = -8, src		// asrc = src & -OPSIZ  -- align src
-	shr.u	loopcnt = len, 3	// loopcnt = len / 8
-	and	len = 7, len ;;		// len = len % 8
-	shl	sh1 = sh1, 3		// sh1 = 8 * (src % 8)
-	adds	loopcnt = -1, loopcnt	// --loopcnt
-	mov     pr.rot = 1 << 16 ;;	// set rotating predicates
-	sub	sh2 = 64, sh1		// sh2 = 64 - sh1
-	mov	ar.lc = loopcnt		// set LC
-	cmp.eq  p6, p0 = sh1, r0 	// is the src aligned?
+	and	sh1 = 7, src		/* sh1 = src % 8 */
+	and	tmp = -8, len		/* tmp = len & -OPSIZ */
+	and	asrc = -8, src		/* asrc = src & -OPSIZ  -- align src */
+	shr.u	loopcnt = len, 3	/* loopcnt = len / 8 */
+	and	len = 7, len ;;		/* len = len % 8 */
+	shl	sh1 = sh1, 3		/* sh1 = 8 * (src % 8) */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
+	mov     pr.rot = 1 << 16 ;;	/* set rotating predicates */
+	sub	sh2 = 64, sh1		/* sh2 = 64 - sh1 */
+	mov	ar.lc = loopcnt		/* set LC */
+	cmp.eq  p6, p0 = sh1, r0	/* is the src aligned? */
 (p6)    br.cond.sptk .src_aligned
 (p6)    br.cond.sptk .src_aligned
-	add	src = src, tmp		// src += len & -OPSIZ
-	mov	ar.ec = MEMLAT + 4 + 1 	// four more passes needed
-	ld8	r[1] = [asrc], 8 ;;	// r[1] = w0
+	add	src = src, tmp		/* src += len & -OPSIZ */
+	mov	ar.ec = MEMLAT + 4 + 1	/* four more passes needed */
+	ld8	r[1] = [asrc], 8 ;;	/* r[1] = w0 */
 	.align	32
 	.align	32
 
 
-// We enter this loop with p6 cleared by the above comparison
+/* We enter this loop with p6 cleared by the above comparison */
 
 
 .l2:
 .l2:
-(p[0])		ld8	r[0] = [asrc], 8		// r[0] = w1
+(p[0])		ld8	r[0] = [asrc], 8		/* r[0] = w1 */
 (p[0])		ld8	q[0] = [dest], 8
 (p[0])		ld8	q[0] = [dest], 8
-(p[MEMLAT])	shr.u	tmp1[0] = r[1 + MEMLAT], sh1	// tmp1 = w0 >> sh1
-(p[MEMLAT])	shl	tmp2[0] = r[0 + MEMLAT], sh2  	// tmp2 = w1 << sh2
+(p[MEMLAT])	shr.u	tmp1[0] = r[1 + MEMLAT], sh1	/* tmp1 = w0 >> sh1 */
+(p[MEMLAT])	shl	tmp2[0] = r[0 + MEMLAT], sh2	/* tmp2 = w1 << sh2 */
 (p[MEMLAT+4])	cmp.ne	p6, p0 = q[MEMLAT + 4], val[1]
 (p[MEMLAT+4])	cmp.ne	p6, p0 = q[MEMLAT + 4], val[1]
-(p[MEMLAT+3])	or	val[0] = tmp1[3], tmp2[3] 	// val = tmp1 | tmp2
+(p[MEMLAT+3])	or	val[0] = tmp1[3], tmp2[3]	/* val = tmp1 | tmp2 */
 (p6)		br.cond.spnt .l2exit
 (p6)		br.cond.spnt .l2exit
 		br.ctop.sptk    .l2
 		br.ctop.sptk    .l2
 		br.cond.sptk .cmpfew
 		br.cond.sptk .cmpfew
 .l3exit:
 .l3exit:
 	mux1	value1 = r[MEMLAT], @rev
 	mux1	value1 = r[MEMLAT], @rev
 	mux1	value2 = q[MEMLAT], @rev
 	mux1	value2 = q[MEMLAT], @rev
-	cmp.ne	p6, p0 = r0, r0	;;	// clear p6
+	cmp.ne	p6, p0 = r0, r0	;;	/* clear p6 */
 .l2exit:
 .l2exit:
 (p6)	mux1	value1 = val[1], @rev
 (p6)	mux1	value1 = val[1], @rev
 (p6)	mux1	value2 = q[MEMLAT + 4], @rev ;;
 (p6)	mux1	value2 = q[MEMLAT + 4], @rev ;;
 	cmp.ltu	p6, p7 = value2, value1 ;;
 	cmp.ltu	p6, p7 = value2, value1 ;;
 (p6)	mov	ret0 = -1
 (p6)	mov	ret0 = -1
 (p7)	mov	ret0 = 1
 (p7)	mov	ret0 = 1
-	mov     pr = saved_pr, -1    	// restore the predicate registers
-	mov 	ar.lc = saved_lc	// restore the loop counter
+	mov     pr = saved_pr, -1	/* restore the predicate registers */
+	mov	ar.lc = saved_lc	/* restore the loop counter */
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0
 .src_aligned:
 .src_aligned:
-	cmp.ne	p6, p0 = r0, r0		// clear p6
-	mov     ar.ec = MEMLAT + 1 ;;	// set EC
+	cmp.ne	p6, p0 = r0, r0		/* clear p6 */
+	mov     ar.ec = MEMLAT + 1 ;;	/* set EC */
 .l3:
 .l3:
 (p[0])		ld8	r[0] = [src], 8
 (p[0])		ld8	r[0] = [src], 8
 (p[0])		ld8	q[0] = [dest], 8
 (p[0])		ld8	q[0] = [dest], 8
@@ -142,8 +142,8 @@ ENTRY(memcmp)
 (p6)		br.cond.spnt .l3exit
 (p6)		br.cond.spnt .l3exit
 		br.ctop.dptk .l3 ;;
 		br.ctop.dptk .l3 ;;
 .cmpfew:
 .cmpfew:
-	cmp.eq	p6, p0 = len, r0	// is len == 0 ?
-	adds	len = -1, len		// --len;
+	cmp.eq	p6, p0 = len, r0	/* is len == 0 ? */
+	adds	len = -1, len		/* --len; */
 (p6)	br.cond.spnt	.restore_and_exit ;;
 (p6)	br.cond.spnt	.restore_and_exit ;;
 	mov	ar.lc = len
 	mov	ar.lc = len
 .l4:
 .l4:
@@ -154,10 +154,10 @@ ENTRY(memcmp)
 (p6)	br.cond.spnt	.done
 (p6)	br.cond.spnt	.done
 	br.cloop.dptk	.l4 ;;
 	br.cloop.dptk	.l4 ;;
 .done:
 .done:
-(p6)	sub	ret0 = value2, value1	// don't execute it if falling thru
+(p6)	sub	ret0 = value2, value1	/* don't execute it if falling thru */
 .restore_and_exit:
 .restore_and_exit:
-	mov     pr = saved_pr, -1    	// restore the predicate registers
-	mov 	ar.lc = saved_lc	// restore the loop counter
+	mov     pr = saved_pr, -1	/* restore the predicate registers */
+	mov	ar.lc = saved_lc	/* restore the loop counter */
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0
 END(memcmp)
 END(memcmp)
 libc_hidden_def (memcmp)
 libc_hidden_def (memcmp)

+ 80 - 80
libc/string/ia64/memcpy.S

@@ -42,8 +42,8 @@
 
 
 #define LFETCH_DIST     500
 #define LFETCH_DIST     500
 
 
-#define ALIGN_UNROLL_no   4 // no. of elements
-#define ALIGN_UNROLL_sh	  2 // (shift amount)
+#define ALIGN_UNROLL_no   4 /* no. of elements */
+#define ALIGN_UNROLL_sh	  2 /* (shift amount) */
 
 
 #define MEMLAT	8
 #define MEMLAT	8
 #define Nrot	((4*(MEMLAT+2) + 7) & ~7)
 #define Nrot	((4*(MEMLAT+2) + 7) & ~7)
@@ -168,76 +168,76 @@ ENTRY(memcpy)
 	.rotr	r[MEMLAT+1], s[MEMLAT+2], q[MEMLAT+1], t[MEMLAT+1]
 	.rotr	r[MEMLAT+1], s[MEMLAT+2], q[MEMLAT+1], t[MEMLAT+1]
 	.rotp	p[MEMLAT+2]
 	.rotp	p[MEMLAT+2]
 	.rotf	fr[MEMLAT+1], fq[MEMLAT+1], fs[MEMLAT+1], ft[MEMLAT+1]
 	.rotf	fr[MEMLAT+1], fq[MEMLAT+1], fs[MEMLAT+1], ft[MEMLAT+1]
-	mov	ret0 = in0		// return tmp2 = dest
+	mov	ret0 = in0		/* return tmp2 = dest */
 	.save   pr, saved_pr
 	.save   pr, saved_pr
-	movi0	saved_pr = pr		// save the predicate registers
+	movi0	saved_pr = pr		/* save the predicate registers */
 } { .mmi
 } { .mmi
-	and	tmp4 = 7, in0 		// check if destination is aligned
-	mov 	dest = in0		// dest
-	mov 	src = in1		// src
+	and	tmp4 = 7, in0 		/* check if destination is aligned */
+	mov 	dest = in0		/* dest */
+	mov 	src = in1		/* src */
 ;; }
 ;; }
 { .mii
 { .mii
-	cmp.eq	p_scr, p0 = in2, r0	// if (len == 0)
+	cmp.eq	p_scr, p0 = in2, r0	/* if (len == 0) */
 	.save   ar.lc, saved_lc
 	.save   ar.lc, saved_lc
-        movi0 	saved_lc = ar.lc	// save the loop counter
+        movi0 	saved_lc = ar.lc	/* save the loop counter */
 	.body
 	.body
-	cmp.ge	p_few, p0 = OP_T_THRES, in2 // is len <= OP_T_THRESH
+	cmp.ge	p_few, p0 = OP_T_THRES, in2 /* is len <= OP_T_THRESH */
 } { .mbb
 } { .mbb
-	mov	len = in2		// len
-(p_scr)	br.cond.dpnt.few .restore_and_exit // 	Branch no. 1: return dest
-(p_few) br.cond.dpnt.many .copy_bytes	// Branch no. 2: copy byte by byte
+	mov	len = in2		/* len */
+(p_scr)	br.cond.dpnt.few .restore_and_exit /* 	Branch no. 1: return dest */
+(p_few) br.cond.dpnt.many .copy_bytes	/* Branch no. 2: copy byte by byte */
 ;; }
 ;; }
 { .mmi
 { .mmi
 #if defined(USE_LFETCH)
 #if defined(USE_LFETCH)
-	lfetch.nt1 [dest]		//
-	lfetch.nt1 [src]		//
+	lfetch.nt1 [dest]		/* */
+	lfetch.nt1 [src]		/* */
 #endif
 #endif
-	shr.u	elemcnt = len, 3	// elemcnt = len / 8
+	shr.u	elemcnt = len, 3	/* elemcnt = len / 8 */
 } { .mib
 } { .mib
-	cmp.eq	p_scr, p0 = tmp4, r0	// is destination aligned?
-	sub	loopcnt = 7, tmp4	//
+	cmp.eq	p_scr, p0 = tmp4, r0	/* is destination aligned? */
+	sub	loopcnt = 7, tmp4	/* */
 (p_scr) br.cond.dptk.many .dest_aligned
 (p_scr) br.cond.dptk.many .dest_aligned
 ;; }
 ;; }
 { .mmi
 { .mmi
-	ld1	tmp2 = [src], 1		//
-	sub	len = len, loopcnt, 1	// reduce len
-	movi0	ar.lc = loopcnt		//
+	ld1	tmp2 = [src], 1		/* */
+	sub	len = len, loopcnt, 1	/* reduce len */
+	movi0	ar.lc = loopcnt		/* */
 } { .mib
 } { .mib
-	cmp.ne  p_scr, p0 = 0, loopcnt	// avoid loading beyond end-point
+	cmp.ne  p_scr, p0 = 0, loopcnt	/* avoid loading beyond end-point */
 ;; }
 ;; }
 
 
-.l0:	// ---------------------------- // L0: Align src on 8-byte boundary
+.l0:	/* ---------------------------- L0: Align src on 8-byte boundary */
 { .mmi
 { .mmi
-	st1	[dest] = tmp2, 1	//
-(p_scr)	ld1	tmp2 = [src], 1		//
+	st1	[dest] = tmp2, 1	/* */
+(p_scr)	ld1	tmp2 = [src], 1		/* */
 } { .mib
 } { .mib
-	cmp.lt	p_scr, p0 = 1, loopcnt	// avoid load beyond end-point
+	cmp.lt	p_scr, p0 = 1, loopcnt	/* avoid load beyond end-point */
 	add	loopcnt = -1, loopcnt
 	add	loopcnt = -1, loopcnt
-	br.cloop.dptk.few .l0		//
+	br.cloop.dptk.few .l0		/* */
 ;; }
 ;; }
 
 
 .dest_aligned:
 .dest_aligned:
 { .mmi
 { .mmi
-	and	tmp4 = 7, src		// ready for alignment check
-	shr.u	elemcnt = len, 3	// elemcnt = len / 8
+	and	tmp4 = 7, src		/* ready for alignment check */
+	shr.u	elemcnt = len, 3	/* elemcnt = len / 8 */
 ;; }
 ;; }
 { .mib
 { .mib
-	cmp.ne	p_scr, p0 = tmp4, r0	// is source also aligned
-	tbit.nz p_xtr, p_nxtr = src, 3	// prepare a separate move if src
-} { .mib				// is not 16B aligned
-	add	ptr2 = LFETCH_DIST, dest	// prefetch address
+	cmp.ne	p_scr, p0 = tmp4, r0	/* is source also aligned */
+	tbit.nz p_xtr, p_nxtr = src, 3	/* prepare a separate move if src */
+} { .mib				/* is not 16B aligned */
+	add	ptr2 = LFETCH_DIST, dest	/* prefetch address */
 	add	ptr1 = LFETCH_DIST, src
 	add	ptr1 = LFETCH_DIST, src
 (p_scr) br.cond.dptk.many .src_not_aligned
 (p_scr) br.cond.dptk.many .src_not_aligned
 ;; }
 ;; }
 
 
-// The optimal case, when dest, and src are aligned
+/* The optimal case, when dest, and src are aligned */
 
 
 .both_aligned:
 .both_aligned:
 { .mmi
 { .mmi
 	.pred.rel "mutex",p_xtr,p_nxtr
 	.pred.rel "mutex",p_xtr,p_nxtr
-(p_xtr)	cmp.gt  p_scr, p0 = ALIGN_UNROLL_no+1, elemcnt // Need N + 1 to qualify
-(p_nxtr) cmp.gt p_scr, p0 = ALIGN_UNROLL_no, elemcnt  // Need only N to qualify
-	movi0	pr.rot = 1 << 16	// set rotating predicates
+(p_xtr)	cmp.gt  p_scr, p0 = ALIGN_UNROLL_no+1, elemcnt /* Need N + 1 to qualify */
+(p_nxtr) cmp.gt p_scr, p0 = ALIGN_UNROLL_no, elemcnt  /* Need only N to qualify */
+	movi0	pr.rot = 1 << 16	/* set rotating predicates */
 } { .mib
 } { .mib
 (p_scr) br.cond.dpnt.many .copy_full_words
 (p_scr) br.cond.dpnt.many .copy_full_words
 ;; }
 ;; }
@@ -245,21 +245,21 @@ ENTRY(memcpy)
 { .mmi
 { .mmi
 (p_xtr)	load	tempreg = [src], 8
 (p_xtr)	load	tempreg = [src], 8
 (p_xtr) add 	elemcnt = -1, elemcnt
 (p_xtr) add 	elemcnt = -1, elemcnt
-	movi0	ar.ec = MEMLAT + 1	// set the epilog counter
+	movi0	ar.ec = MEMLAT + 1	/* set the epilog counter */
 ;; }
 ;; }
 { .mmi
 { .mmi
-(p_xtr) add	len = -8, len		//
-	add 	asrc = 16, src 		// one bank apart (for USE_INT)
-	shr.u	loopcnt = elemcnt, ALIGN_UNROLL_sh  // cater for unrolling
+(p_xtr) add	len = -8, len		/* */
+	add 	asrc = 16, src 		/* one bank apart (for USE_INT) */
+	shr.u	loopcnt = elemcnt, ALIGN_UNROLL_sh  /* cater for unrolling */
 ;;}
 ;;}
 { .mmi
 { .mmi
 	add	loopcnt = -1, loopcnt
 	add	loopcnt = -1, loopcnt
-(p_xtr)	store	[dest] = tempreg, 8	// copy the "extra" word
+(p_xtr)	store	[dest] = tempreg, 8	/* copy the "extra" word */
 	nop.i	0
 	nop.i	0
 ;; }
 ;; }
 { .mib
 { .mib
 	add	adest = 16, dest
 	add	adest = 16, dest
-	movi0	ar.lc = loopcnt 	// set the loop counter
+	movi0	ar.lc = loopcnt 	/* set the loop counter */
 ;; }
 ;; }
 
 
 #ifdef  GAS_ALIGN_BREAKS_UNWIND_INFO
 #ifdef  GAS_ALIGN_BREAKS_UNWIND_INFO
@@ -268,7 +268,7 @@ ENTRY(memcpy)
 	.align	32
 	.align	32
 #endif
 #endif
 #if defined(USE_FLP)
 #if defined(USE_FLP)
-.l1: // ------------------------------- // L1: Everything a multiple of 8
+.l1: /* ------------------------------- L1: Everything a multiple of 8 */
 { .mmi
 { .mmi
 #if defined(USE_LFETCH)
 #if defined(USE_LFETCH)
 (p[0])	lfetch.nt1 [ptr2],32
 (p[0])	lfetch.nt1 [ptr2],32
@@ -290,7 +290,7 @@ ENTRY(memcpy)
 	br.ctop.dptk.many .l1
 	br.ctop.dptk.many .l1
 ;; }
 ;; }
 #elif defined(USE_INT)
 #elif defined(USE_INT)
-.l1: // ------------------------------- // L1: Everything a multiple of 8
+.l1: /* ------------------------------- L1: Everything a multiple of 8 */
 { .mmi
 { .mmi
 (p[0])	load	the_r[0] = [src], 8
 (p[0])	load	the_r[0] = [src], 8
 (p[0])	load	the_q[0] = [asrc], 8
 (p[0])	load	the_q[0] = [asrc], 8
@@ -317,58 +317,58 @@ ENTRY(memcpy)
 
 
 .copy_full_words:
 .copy_full_words:
 { .mib
 { .mib
-	cmp.gt	p_scr, p0 = 8, len	//
-	shr.u	elemcnt = len, 3	//
+	cmp.gt	p_scr, p0 = 8, len	/* */
+	shr.u	elemcnt = len, 3	/* */
 (p_scr) br.cond.dpnt.many .copy_bytes
 (p_scr) br.cond.dpnt.many .copy_bytes
 ;; }
 ;; }
 { .mii
 { .mii
 	load	tempreg = [src], 8
 	load	tempreg = [src], 8
-	add	loopcnt = -1, elemcnt	//
+	add	loopcnt = -1, elemcnt	/* */
 ;; }
 ;; }
 { .mii
 { .mii
-	cmp.ne	p_scr, p0 = 0, loopcnt	//
-	mov	ar.lc = loopcnt		//
+	cmp.ne	p_scr, p0 = 0, loopcnt	/* */
+	mov	ar.lc = loopcnt		/* */
 ;; }
 ;; }
 
 
-.l2: // ------------------------------- // L2: Max 4 words copied separately
+.l2: /* ------------------------------- L2: Max 4 words copied separately */
 { .mmi
 { .mmi
 	store	[dest] = tempreg, 8
 	store	[dest] = tempreg, 8
-(p_scr)	load	tempreg = [src], 8	//
+(p_scr)	load	tempreg = [src], 8	/* */
 	add	len = -8, len
 	add	len = -8, len
 } { .mib
 } { .mib
-	cmp.lt	p_scr, p0 = 1, loopcnt	// avoid load beyond end-point
+	cmp.lt	p_scr, p0 = 1, loopcnt	/* avoid load beyond end-point */
 	add	loopcnt = -1, loopcnt
 	add	loopcnt = -1, loopcnt
 	br.cloop.dptk.few  .l2
 	br.cloop.dptk.few  .l2
 ;; }
 ;; }
 
 
 .copy_bytes:
 .copy_bytes:
 { .mib
 { .mib
-	cmp.eq	p_scr, p0 = len, r0	// is len == 0 ?
-	add	loopcnt = -1, len	// len--;
+	cmp.eq	p_scr, p0 = len, r0	/* is len == 0 ? */
+	add	loopcnt = -1, len	/* len--; */
 (p_scr)	br.cond.spnt	.restore_and_exit
 (p_scr)	br.cond.spnt	.restore_and_exit
 ;; }
 ;; }
 { .mii
 { .mii
 	ld1	tmp2 = [src], 1
 	ld1	tmp2 = [src], 1
 	movi0	ar.lc = loopcnt
 	movi0	ar.lc = loopcnt
-	cmp.ne	p_scr, p0 = 0, loopcnt	// avoid load beyond end-point
+	cmp.ne	p_scr, p0 = 0, loopcnt	/* avoid load beyond end-point */
 ;; }
 ;; }
 
 
-.l3: // ------------------------------- // L3: Final byte move
+.l3: /* ------------------------------- L3: Final byte move */
 { .mmi
 { .mmi
 	st1	[dest] = tmp2, 1
 	st1	[dest] = tmp2, 1
 (p_scr)	ld1	tmp2 = [src], 1
 (p_scr)	ld1	tmp2 = [src], 1
 } { .mib
 } { .mib
-	cmp.lt	p_scr, p0 = 1, loopcnt	// avoid load beyond end-point
+	cmp.lt	p_scr, p0 = 1, loopcnt	/* avoid load beyond end-point */
 	add	loopcnt = -1, loopcnt
 	add	loopcnt = -1, loopcnt
 	br.cloop.dptk.few  .l3
 	br.cloop.dptk.few  .l3
 ;; }
 ;; }
 
 
 .restore_and_exit:
 .restore_and_exit:
 { .mmi
 { .mmi
-	movi0	pr = saved_pr, -1	// restore the predicate registers
+	movi0	pr = saved_pr, -1	/* restore the predicate registers */
 ;; }
 ;; }
 { .mib
 { .mib
-	movi0	ar.lc = saved_lc	// restore the loop counter
+	movi0	ar.lc = saved_lc	/* restore the loop counter */
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0
 ;; }
 ;; }
 
 
@@ -376,41 +376,41 @@ ENTRY(memcpy)
 .src_not_aligned:
 .src_not_aligned:
 { .mmi
 { .mmi
 	cmp.gt	p_scr, p0 = 16, len
 	cmp.gt	p_scr, p0 = 16, len
-	and	sh1 = 7, src 		// sh1 = src % 8
-	shr.u	loopcnt = len, 4	// element-cnt = len / 16
+	and	sh1 = 7, src 		/* sh1 = src % 8 */
+	shr.u	loopcnt = len, 4	/* element-cnt = len / 16 */
 } { .mib
 } { .mib
 	add	tmp4 = @ltoff(.table), gp
 	add	tmp4 = @ltoff(.table), gp
 	add 	tmp3 = @ltoff(.loop56), gp
 	add 	tmp3 = @ltoff(.loop56), gp
-(p_scr)	br.cond.dpnt.many .copy_bytes	// do byte by byte if too few
+(p_scr)	br.cond.dpnt.many .copy_bytes	/* do byte by byte if too few */
 ;; }
 ;; }
 { .mmi
 { .mmi
-	and	asrc = -8, src		// asrc = (-8) -- align src for loop
-	add 	loopcnt = -1, loopcnt	// loopcnt--
-	shl	sh1 = sh1, 3		// sh1 = 8 * (src % 8)
+	and	asrc = -8, src		/* asrc = (-8) -- align src for loop */
+	add 	loopcnt = -1, loopcnt	/* loopcnt-- */
+	shl	sh1 = sh1, 3		/* sh1 = 8 * (src % 8) */
 } { .mmi
 } { .mmi
-	ld8	ptable = [tmp4]		// ptable = &table
-	ld8	ploop56 = [tmp3]	// ploop56 = &loop56
-	and	tmp2 = -16, len		// tmp2 = len & -OPSIZ
+	ld8	ptable = [tmp4]		/* ptable = &table */
+	ld8	ploop56 = [tmp3]	/* ploop56 = &loop56 */
+	and	tmp2 = -16, len		/* tmp2 = len & -OPSIZ */
 ;; }
 ;; }
 { .mmi
 { .mmi
-	add	tmp3 = ptable, sh1	// tmp3 = &table + sh1
-	add	src = src, tmp2		// src += len & (-16)
-	movi0	ar.lc = loopcnt		// set LC
+	add	tmp3 = ptable, sh1	/* tmp3 = &table + sh1 */
+	add	src = src, tmp2		/* src += len & (-16) */
+	movi0	ar.lc = loopcnt		/* set LC */
 ;; }
 ;; }
 { .mmi
 { .mmi
-	ld8	tmp4 = [tmp3]		// tmp4 = loop offset
-	sub	len = len, tmp2		// len -= len & (-16)
-	movi0	ar.ec = MEMLAT + 2 	// one more pass needed
+	ld8	tmp4 = [tmp3]		/* tmp4 = loop offset */
+	sub	len = len, tmp2		/* len -= len & (-16) */
+	movi0	ar.ec = MEMLAT + 2 	/* one more pass needed */
 ;; }
 ;; }
 { .mmi
 { .mmi
-	ld8	s[1] = [asrc], 8	// preload
-	sub	loopaddr = ploop56,tmp4	// loopadd = &loop56 - loop offset
-	movi0   pr.rot = 1 << 16	// set rotating predicates
+	ld8	s[1] = [asrc], 8	/* preload */
+	sub	loopaddr = ploop56,tmp4	/* loopadd = &loop56 - loop offset */
+	movi0   pr.rot = 1 << 16	/* set rotating predicates */
 ;; }
 ;; }
 { .mib
 { .mib
 	nop.m	0
 	nop.m	0
 	movi0	b6 = loopaddr
 	movi0	b6 = loopaddr
-	br	b6			// jump to the appropriate loop
+	br	b6			/* jump to the appropriate loop */
 ;; }
 ;; }
 
 
 	LOOP(8)
 	LOOP(8)
@@ -426,7 +426,7 @@ libc_hidden_def (memcpy)
 	.rodata
 	.rodata
 	.align 8
 	.align 8
 .table:
 .table:
-	data8	0			// dummy entry
+	data8	0			/* dummy entry */
 	data8 	.loop56 - .loop8
 	data8 	.loop56 - .loop8
 	data8 	.loop56 - .loop16
 	data8 	.loop56 - .loop16
 	data8 	.loop56 - .loop24
 	data8 	.loop56 - .loop24

+ 85 - 85
libc/string/ia64/memmove.S

@@ -81,48 +81,48 @@ ENTRY(memmove)
 	alloc 	r2 = ar.pfs, 3, Nrot - 3, 0, Nrot
 	alloc 	r2 = ar.pfs, 3, Nrot - 3, 0, Nrot
 	.rotr	r[MEMLAT + 2], q[MEMLAT + 1]
 	.rotr	r[MEMLAT + 2], q[MEMLAT + 1]
 	.rotp	p[MEMLAT + 2]
 	.rotp	p[MEMLAT + 2]
-	mov	ret0 = in0		// return value = dest
+	mov	ret0 = in0		/* return value = dest */
 	.save pr, saved_pr
 	.save pr, saved_pr
-	mov	saved_pr = pr		// save the predicate registers
+	mov	saved_pr = pr		/* save the predicate registers */
 	.save ar.lc, saved_lc
 	.save ar.lc, saved_lc
-        mov 	saved_lc = ar.lc	// save the loop counter
+        mov 	saved_lc = ar.lc	/* save the loop counter */
 	.body
 	.body
-	or	tmp3 = in0, in1 ;;	// tmp3 = dest | src
-	or	tmp3 = tmp3, in2	// tmp3 = dest | src | len
-	mov 	dest = in0		// dest
-	mov 	src = in1		// src
-	mov	len = in2		// len
-	sub	tmp2 = r0, in0		// tmp2 = -dest
-	cmp.eq	p6, p0 = in2, r0	// if (len == 0)
-(p6)	br.cond.spnt .restore_and_exit;;// 	return dest;
-	and	tmp4 = 7, tmp3 		// tmp4 = (dest | src | len) & 7
-	cmp.le	p6, p0 = dest, src	// if dest <= src it's always safe
-(p6)	br.cond.spnt .forward		// to copy forward
+	or	tmp3 = in0, in1 ;;	/* tmp3 = dest | src */
+	or	tmp3 = tmp3, in2	/* tmp3 = dest | src | len */
+	mov 	dest = in0		/* dest */
+	mov 	src = in1		/* src */
+	mov	len = in2		/* len */
+	sub	tmp2 = r0, in0		/* tmp2 = -dest */
+	cmp.eq	p6, p0 = in2, r0	/* if (len == 0) */
+(p6)	br.cond.spnt .restore_and_exit;;/* 	return dest; */
+	and	tmp4 = 7, tmp3 		/* tmp4 = (dest | src | len) & 7 */
+	cmp.le	p6, p0 = dest, src	/* if dest <= src it's always safe */
+(p6)	br.cond.spnt .forward		/* to copy forward */
 	add	tmp3 = src, len;;
 	add	tmp3 = src, len;;
-	cmp.lt	p6, p0 = dest, tmp3	// if dest > src && dest < src + len
-(p6)	br.cond.spnt .backward		// we have to copy backward
+	cmp.lt	p6, p0 = dest, tmp3	/* if dest > src && dest < src + len */
+(p6)	br.cond.spnt .backward		/* we have to copy backward */
 
 
 .forward:
 .forward:
-	shr.u	loopcnt = len, 4 ;;	// loopcnt = len / 16
-	cmp.ne	p6, p0 = tmp4, r0	// if ((dest | src | len) & 7 != 0)
-(p6)	br.cond.sptk .next		//	goto next;
+	shr.u	loopcnt = len, 4 ;;	/* loopcnt = len / 16 */
+	cmp.ne	p6, p0 = tmp4, r0	/* if ((dest | src | len) & 7 != 0) */
+(p6)	br.cond.sptk .next		/*	goto next; */
 
 
-// The optimal case, when dest, src and len are all multiples of 8
+/* The optimal case, when dest, src and len are all multiples of 8 */
 
 
 	and	tmp3 = 0xf, len
 	and	tmp3 = 0xf, len
-	mov	pr.rot = 1 << 16	// set rotating predicates
-	mov	ar.ec = MEMLAT + 1 ;;	// set the epilog counter
-	cmp.ne	p6, p0 = tmp3, r0	// do we have to copy an extra word?
-	adds	loopcnt = -1, loopcnt;;	// --loopcnt
+	mov	pr.rot = 1 << 16	/* set rotating predicates */
+	mov	ar.ec = MEMLAT + 1 ;;	/* set the epilog counter */
+	cmp.ne	p6, p0 = tmp3, r0	/* do we have to copy an extra word? */
+	adds	loopcnt = -1, loopcnt;;	/* --loopcnt */
 (p6)	ld8	value = [src], 8;;
 (p6)	ld8	value = [src], 8;;
-(p6)	st8	[dest] = value, 8	// copy the "odd" word
-	mov	ar.lc = loopcnt 	// set the loop counter
+(p6)	st8	[dest] = value, 8	/* copy the "odd" word */
+	mov	ar.lc = loopcnt 	/* set the loop counter */
 	cmp.eq	p6, p0 = 8, len
 	cmp.eq	p6, p0 = 8, len
-(p6)	br.cond.spnt .restore_and_exit;;// the one-word special case
-	adds	adest = 8, dest		// set adest one word ahead of dest
-	adds	asrc = 8, src ;;	// set asrc one word ahead of src
-	nop.b	0			// get the "golden" alignment for
-	nop.b	0			// the next loop
+(p6)	br.cond.spnt .restore_and_exit;;/* the one-word special case */
+	adds	adest = 8, dest		/* set adest one word ahead of dest */
+	adds	asrc = 8, src ;;	/* set asrc one word ahead of src */
+	nop.b	0			/* get the "golden" alignment for */
+	nop.b	0			/* the next loop */
 .l0:
 .l0:
 (p[0])		ld8	r[0] = [src], 16
 (p[0])		ld8	r[0] = [src], 16
 (p[0])		ld8	q[0] = [asrc], 16
 (p[0])		ld8	q[0] = [asrc], 16
@@ -130,50 +130,50 @@ ENTRY(memmove)
 (p[MEMLAT])	st8	[adest] = q[MEMLAT], 16
 (p[MEMLAT])	st8	[adest] = q[MEMLAT], 16
 		br.ctop.dptk .l0 ;;
 		br.ctop.dptk .l0 ;;
 
 
-	mov	pr = saved_pr, -1	// restore the predicate registers
-	mov	ar.lc = saved_lc	// restore the loop counter
+	mov	pr = saved_pr, -1	/* restore the predicate registers */
+	mov	ar.lc = saved_lc	/* restore the loop counter */
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0
 .next:
 .next:
-	cmp.ge	p6, p0 = OP_T_THRES, len	// is len <= OP_T_THRES
-	and	loopcnt = 7, tmp2 		// loopcnt = -dest % 8
-(p6)	br.cond.spnt	.cpyfew			// copy byte by byte
+	cmp.ge	p6, p0 = OP_T_THRES, len	/* is len <= OP_T_THRES */
+	and	loopcnt = 7, tmp2 		/* loopcnt = -dest % 8 */
+(p6)	br.cond.spnt	.cpyfew			/* copy byte by byte */
 	;;
 	;;
 	cmp.eq	p6, p0 = loopcnt, r0
 	cmp.eq	p6, p0 = loopcnt, r0
 (p6)	br.cond.sptk	.dest_aligned
 (p6)	br.cond.sptk	.dest_aligned
-	sub	len = len, loopcnt	// len -= -dest % 8
-	adds	loopcnt = -1, loopcnt	// --loopcnt
+	sub	len = len, loopcnt	/* len -= -dest % 8 */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
 	;;
 	;;
 	mov	ar.lc = loopcnt
 	mov	ar.lc = loopcnt
-.l1:					// copy -dest % 8 bytes
-	ld1	value = [src], 1	// value = *src++
+.l1:					/* copy -dest % 8 bytes */
+	ld1	value = [src], 1	/* value = *src++ */
 	;;
 	;;
-	st1	[dest] = value, 1	// *dest++ = value
+	st1	[dest] = value, 1	/* *dest++ = value */
 	br.cloop.dptk .l1
 	br.cloop.dptk .l1
 .dest_aligned:
 .dest_aligned:
-	and	sh1 = 7, src 		// sh1 = src % 8
-	and	tmp2 = -8, len   	// tmp2 = len & -OPSIZ
-	and	asrc = -8, src		// asrc = src & -OPSIZ  -- align src
-	shr.u	loopcnt = len, 3	// loopcnt = len / 8
-	and	len = 7, len;;		// len = len % 8
-	adds	loopcnt = -1, loopcnt	// --loopcnt
+	and	sh1 = 7, src 		/* sh1 = src % 8 */
+	and	tmp2 = -8, len   	/* tmp2 = len & -OPSIZ */
+	and	asrc = -8, src		/* asrc = src & -OPSIZ  -- align src */
+	shr.u	loopcnt = len, 3	/* loopcnt = len / 8 */
+	and	len = 7, len;;		/* len = len % 8 */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
 	addl	tmp4 = @ltoff(.table), gp
 	addl	tmp4 = @ltoff(.table), gp
 	addl	tmp3 = @ltoff(.loop56), gp
 	addl	tmp3 = @ltoff(.loop56), gp
-	mov     ar.ec = MEMLAT + 1	// set EC
-	mov     pr.rot = 1 << 16;;	// set rotating predicates
-	mov	ar.lc = loopcnt		// set LC
-	cmp.eq  p6, p0 = sh1, r0 	// is the src aligned?
+	mov     ar.ec = MEMLAT + 1	/* set EC */
+	mov     pr.rot = 1 << 16;;	/* set rotating predicates */
+	mov	ar.lc = loopcnt		/* set LC */
+	cmp.eq  p6, p0 = sh1, r0 	/* is the src aligned? */
 (p6)    br.cond.sptk .src_aligned
 (p6)    br.cond.sptk .src_aligned
-	add	src = src, tmp2		// src += len & -OPSIZ
-	shl	sh1 = sh1, 3		// sh1 = 8 * (src % 8)
-	ld8	ploop56 = [tmp3]	// ploop56 = &loop56
-	ld8	ptable = [tmp4];;	// ptable = &table
-	add	tmp3 = ptable, sh1;;	// tmp3 = &table + sh1
-	mov	ar.ec = MEMLAT + 1 + 1 // one more pass needed
-	ld8	tmp4 = [tmp3];;		// tmp4 = loop offset
-	sub	loopaddr = ploop56,tmp4	// loopadd = &loop56 - loop offset
-	ld8	r[1] = [asrc], 8;;	// w0
+	add	src = src, tmp2		/* src += len & -OPSIZ */
+	shl	sh1 = sh1, 3		/* sh1 = 8 * (src % 8) */
+	ld8	ploop56 = [tmp3]	/* ploop56 = &loop56 */
+	ld8	ptable = [tmp4];;	/* ptable = &table */
+	add	tmp3 = ptable, sh1;;	/* tmp3 = &table + sh1 */
+	mov	ar.ec = MEMLAT + 1 + 1 /* one more pass needed */
+	ld8	tmp4 = [tmp3];;		/* tmp4 = loop offset */
+	sub	loopaddr = ploop56,tmp4	/* loopadd = &loop56 - loop offset */
+	ld8	r[1] = [asrc], 8;;	/* w0 */
 	mov	b6 = loopaddr;;
 	mov	b6 = loopaddr;;
-	br	b6			// jump to the appropriate loop
+	br	b6			/* jump to the appropriate loop */
 
 
 	LOOP(8)
 	LOOP(8)
 	LOOP(16)
 	LOOP(16)
@@ -189,8 +189,8 @@ ENTRY(memmove)
 (p[MEMLAT])	st8	[dest] = r[MEMLAT], 8
 (p[MEMLAT])	st8	[dest] = r[MEMLAT], 8
 		br.ctop.dptk .l3
 		br.ctop.dptk .l3
 .cpyfew:
 .cpyfew:
-	cmp.eq	p6, p0 = len, r0	// is len == 0 ?
-	adds	len = -1, len		// --len;
+	cmp.eq	p6, p0 = len, r0	/* is len == 0 ? */
+	adds	len = -1, len		/* --len; */
 (p6)	br.cond.spnt	.restore_and_exit ;;
 (p6)	br.cond.spnt	.restore_and_exit ;;
 	mov	ar.lc = len
 	mov	ar.lc = len
 .l4:
 .l4:
@@ -199,36 +199,36 @@ ENTRY(memmove)
 	st1	[dest] = value, 1
 	st1	[dest] = value, 1
 	br.cloop.dptk	.l4 ;;
 	br.cloop.dptk	.l4 ;;
 .restore_and_exit:
 .restore_and_exit:
-	mov     pr = saved_pr, -1    	// restore the predicate registers
-	mov 	ar.lc = saved_lc	// restore the loop counter
+	mov     pr = saved_pr, -1    	/* restore the predicate registers */
+	mov 	ar.lc = saved_lc	/* restore the loop counter */
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0
 
 
-// In the case of a backward copy, optimise only the case when everything
-// is a multiple of 8, otherwise copy byte by byte.  The backward copy is
-// used only when the blocks are overlapping and dest > src.
-
+/* In the case of a backward copy, optimise only the case when everything
+   is a multiple of 8, otherwise copy byte by byte.  The backward copy is
+   used only when the blocks are overlapping and dest > src.
+*/
 .backward:
 .backward:
-	shr.u	loopcnt = len, 3	// loopcnt = len / 8
-	add	src = src, len		// src points one byte past the end
-	add	dest = dest, len ;; 	// dest points one byte past the end
-	mov	ar.ec = MEMLAT + 1	// set the epilog counter
-	mov	pr.rot = 1 << 16	// set rotating predicates
-	adds	loopcnt = -1, loopcnt	// --loopcnt
-	cmp.ne	p6, p0 = tmp4, r0	// if ((dest | src | len) & 7 != 0)
-(p6)	br.cond.sptk .bytecopy ;;	// copy byte by byte backward
-	adds	src = -8, src		// src points to the last word
-	adds	dest = -8, dest 	// dest points to the last word
-	mov	ar.lc = loopcnt;;	// set the loop counter
+	shr.u	loopcnt = len, 3	/* loopcnt = len / 8 */
+	add	src = src, len		/* src points one byte past the end */
+	add	dest = dest, len ;; 	/* dest points one byte past the end */
+	mov	ar.ec = MEMLAT + 1	/* set the epilog counter */
+	mov	pr.rot = 1 << 16	/* set rotating predicates */
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
+	cmp.ne	p6, p0 = tmp4, r0	/* if ((dest | src | len) & 7 != 0) */
+(p6)	br.cond.sptk .bytecopy ;;	/* copy byte by byte backward */
+	adds	src = -8, src		/* src points to the last word */
+	adds	dest = -8, dest 	/* dest points to the last word */
+	mov	ar.lc = loopcnt;;	/* set the loop counter */
 .l5:
 .l5:
 (p[0])		ld8	r[0] = [src], -8
 (p[0])		ld8	r[0] = [src], -8
 (p[MEMLAT])	st8	[dest] = r[MEMLAT], -8
 (p[MEMLAT])	st8	[dest] = r[MEMLAT], -8
 		br.ctop.dptk .l5
 		br.ctop.dptk .l5
 		br.cond.sptk .restore_and_exit
 		br.cond.sptk .restore_and_exit
 .bytecopy:
 .bytecopy:
-	adds	src = -1, src		// src points to the last byte
-	adds	dest = -1, dest		// dest points to the last byte
-	adds	loopcnt = -1, len;;	// loopcnt = len - 1
-	mov	ar.lc = loopcnt;;	// set the loop counter
+	adds	src = -1, src		/* src points to the last byte */
+	adds	dest = -1, dest		/* dest points to the last byte */
+	adds	loopcnt = -1, len;;	/* loopcnt = len - 1 */
+	mov	ar.lc = loopcnt;;	/* set the loop counter */
 .l6:
 .l6:
 (p[0])		ld1	r[0] = [src], -1
 (p[0])		ld1	r[0] = [src], -1
 (p[MEMLAT])	st1	[dest] = r[MEMLAT], -1
 (p[MEMLAT])	st1	[dest] = r[MEMLAT], -1
@@ -239,7 +239,7 @@ END(memmove)
 	.rodata
 	.rodata
 	.align 8
 	.align 8
 .table:
 .table:
-	data8	0			// dummy entry
+	data8	0			/* dummy entry */
 	data8 	.loop56 - .loop8
 	data8 	.loop56 - .loop8
 	data8 	.loop56 - .loop16
 	data8 	.loop56 - .loop16
 	data8 	.loop56 - .loop24
 	data8 	.loop56 - .loop24

+ 89 - 89
libc/string/ia64/memset.S

@@ -46,15 +46,15 @@
 #define ptr1		r28
 #define ptr1		r28
 #define ptr2		r27
 #define ptr2		r27
 #define ptr3		r26
 #define ptr3		r26
-#define ptr9 		r24
+#define ptr9		r24
 #define	loopcnt		r23
 #define	loopcnt		r23
 #define linecnt		r22
 #define linecnt		r22
 #define bytecnt		r21
 #define bytecnt		r21
 
 
 #define fvalue		f6
 #define fvalue		f6
 
 
-// This routine uses only scratch predicate registers (p6 - p15)
-#define p_scr		p6			// default register for same-cycle branches
+/* This routine uses only scratch predicate registers (p6 - p15) */
+#define p_scr		p6	/* default register for same-cycle branches */
 #define p_nz		p7
 #define p_nz		p7
 #define p_zr		p8
 #define p_zr		p8
 #define p_unalgn	p9
 #define p_unalgn	p9
@@ -68,7 +68,7 @@
 #define MIN1		15
 #define MIN1		15
 #define MIN1P1HALF	8
 #define MIN1P1HALF	8
 #define LINE_SIZE	128
 #define LINE_SIZE	128
-#define LSIZE_SH        7			// shift amount
+#define LSIZE_SH        7			/* shift amount */
 #define PREF_AHEAD	8
 #define PREF_AHEAD	8
 
 
 #define USE_FLP
 #define USE_FLP
@@ -90,97 +90,97 @@ ENTRY(memset)
 	movi0	save_lc = ar.lc
 	movi0	save_lc = ar.lc
 } { .mmi
 } { .mmi
 	.body
 	.body
-	mov	ret0 = dest		// return value
-	cmp.ne	p_nz, p_zr = value, r0	// use stf.spill if value is zero
+	mov	ret0 = dest		/* return value */
+	cmp.ne	p_nz, p_zr = value, r0	/* use stf.spill if value is zero */
 	cmp.eq	p_scr, p0 = cnt, r0
 	cmp.eq	p_scr, p0 = cnt, r0
 ;; }
 ;; }
 { .mmi
 { .mmi
-	and	ptr2 = -(MIN1+1), dest	// aligned address
-	and	tmp = MIN1, dest	// prepare to check for alignment
-	tbit.nz p_y, p_n = dest, 0	// Do we have an odd address? (M_B_U)
+	and	ptr2 = -(MIN1+1), dest	/* aligned address */
+	and	tmp = MIN1, dest	/* prepare to check for alignment */
+	tbit.nz p_y, p_n = dest, 0	/* Do we have an odd address? (M_B_U) */
 } { .mib
 } { .mib
 	mov	ptr1 = dest
 	mov	ptr1 = dest
-	mux1	value = value, @brcst	// create 8 identical bytes in word
-(p_scr)	br.ret.dpnt.many rp		// return immediately if count = 0
+	mux1	value = value, @brcst	/* create 8 identical bytes in word */
+(p_scr)	br.ret.dpnt.many rp		/* return immediately if count = 0 */
 ;; }
 ;; }
 { .mib
 { .mib
 	cmp.ne	p_unalgn, p0 = tmp, r0
 	cmp.ne	p_unalgn, p0 = tmp, r0
-} { .mib				// NB: # of bytes to move is 1 higher
-	sub	bytecnt = (MIN1+1), tmp	//     than loopcnt
-	cmp.gt	p_scr, p0 = 16, cnt		// is it a minimalistic task?
-(p_scr)	br.cond.dptk.many .move_bytes_unaligned	// go move just a few (M_B_U)
+} { .mib				/* NB: # of bytes to move is 1 higher */
+	sub	bytecnt = (MIN1+1), tmp	/*     than loopcnt */
+	cmp.gt	p_scr, p0 = 16, cnt		/* is it a minimalistic task? */
+(p_scr)	br.cond.dptk.many .move_bytes_unaligned	/* go move just a few (M_B_U) */
 ;; }
 ;; }
 { .mmi
 { .mmi
-(p_unalgn) add	ptr1 = (MIN1+1), ptr2		// after alignment
-(p_unalgn) add	ptr2 = MIN1P1HALF, ptr2		// after alignment
-(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3	// should we do a st8 ?
+(p_unalgn) add	ptr1 = (MIN1+1), ptr2		/* after alignment */
+(p_unalgn) add	ptr2 = MIN1P1HALF, ptr2		/* after alignment */
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 3	/* should we do a st8 ? */
 ;; }
 ;; }
 { .mib
 { .mib
 (p_y)	add	cnt = -8, cnt
 (p_y)	add	cnt = -8, cnt
-(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2	// should we do a st4 ?
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 2	/* should we do a st4 ? */
 } { .mib
 } { .mib
 (p_y)	st8	[ptr2] = value, -4
 (p_y)	st8	[ptr2] = value, -4
 (p_n)	add	ptr2 = 4, ptr2
 (p_n)	add	ptr2 = 4, ptr2
 ;; }
 ;; }
 { .mib
 { .mib
 (p_yy)	add	cnt = -4, cnt
 (p_yy)	add	cnt = -4, cnt
-(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1	// should we do a st2 ?
+(p_unalgn) tbit.nz.unc p_y, p_n = bytecnt, 1	/* should we do a st2 ? */
 } { .mib
 } { .mib
 (p_yy)	st4	[ptr2] = value, -2
 (p_yy)	st4	[ptr2] = value, -2
 (p_nn)	add	ptr2 = 2, ptr2
 (p_nn)	add	ptr2 = 2, ptr2
 ;; }
 ;; }
 { .mmi
 { .mmi
-	mov	tmp = LINE_SIZE+1		// for compare
+	mov	tmp = LINE_SIZE+1		/* for compare */
 (p_y)	add	cnt = -2, cnt
 (p_y)	add	cnt = -2, cnt
-(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0	// should we do a st1 ?
+(p_unalgn) tbit.nz.unc p_yy, p_nn = bytecnt, 0	/* should we do a st1 ? */
 } { .mmi
 } { .mmi
-	setf.sig fvalue=value			// transfer value to FLP side
+	setf.sig fvalue=value			/* transfer value to FLP side */
 (p_y)	st2	[ptr2] = value, -1
 (p_y)	st2	[ptr2] = value, -1
 (p_n)	add	ptr2 = 1, ptr2
 (p_n)	add	ptr2 = 1, ptr2
 ;; }
 ;; }
 
 
 { .mmi
 { .mmi
 (p_yy)	st1	[ptr2] = value
 (p_yy)	st1	[ptr2] = value
-  	cmp.gt	p_scr, p0 = tmp, cnt		// is it a minimalistic task?
+	cmp.gt	p_scr, p0 = tmp, cnt		/* is it a minimalistic task? */
 } { .mbb
 } { .mbb
 (p_yy)	add	cnt = -1, cnt
 (p_yy)	add	cnt = -1, cnt
-(p_scr)	br.cond.dpnt.many .fraction_of_line	// go move just a few
+(p_scr)	br.cond.dpnt.many .fraction_of_line	/* go move just a few */
 ;; }
 ;; }
 
 
 { .mib
 { .mib
 	nop.m 0
 	nop.m 0
 	shr.u	linecnt = cnt, LSIZE_SH
 	shr.u	linecnt = cnt, LSIZE_SH
-(p_zr)	br.cond.dptk.many .l1b			// Jump to use stf.spill
+(p_zr)	br.cond.dptk.many .l1b			/* Jump to use stf.spill */
 ;; }
 ;; }
 
 
 #ifndef GAS_ALIGN_BREAKS_UNWIND_INFO
 #ifndef GAS_ALIGN_BREAKS_UNWIND_INFO
-	.align 32 // -------- //  L1A: store ahead into cache lines; fill later
+	.align 32 /* --------  L1A: store ahead into cache lines; fill later */
 #endif
 #endif
 { .mmi
 { .mmi
-	and	tmp = -(LINE_SIZE), cnt		// compute end of range
-	mov	ptr9 = ptr1			// used for prefetching
-	and	cnt = (LINE_SIZE-1), cnt	// remainder
+	and	tmp = -(LINE_SIZE), cnt		/* compute end of range */
+	mov	ptr9 = ptr1			/* used for prefetching */
+	and	cnt = (LINE_SIZE-1), cnt	/* remainder */
 } { .mmi
 } { .mmi
-	mov	loopcnt = PREF_AHEAD-1		// default prefetch loop
-	cmp.gt	p_scr, p0 = PREF_AHEAD, linecnt	// check against actual value
+	mov	loopcnt = PREF_AHEAD-1		/* default prefetch loop */
+	cmp.gt	p_scr, p0 = PREF_AHEAD, linecnt	/* check against actual value */
 ;; }
 ;; }
 { .mmi
 { .mmi
-(p_scr)	add	loopcnt = -1, linecnt		// start of stores
-	add	ptr2 = 8, ptr1			// (beyond prefetch stores)
-	add	ptr1 = tmp, ptr1		// first address beyond total
-;; }						// range
+(p_scr)	add	loopcnt = -1, linecnt		/* start of stores */
+	add	ptr2 = 8, ptr1			/* (beyond prefetch stores) */
+	add	ptr1 = tmp, ptr1		/* first address beyond total */
+;; }						/* range */
 { .mmi
 { .mmi
-	add	tmp = -1, linecnt		// next loop count
+	add	tmp = -1, linecnt		/* next loop count */
 	movi0	ar.lc = loopcnt
 	movi0	ar.lc = loopcnt
 ;; }
 ;; }
 .pref_l1a:
 .pref_l1a:
 { .mib
 { .mib
-	store [ptr9] = myval, 128	// Do stores one cache line apart
+	store [ptr9] = myval, 128	/* Do stores one cache line apart */
 	nop.i	0
 	nop.i	0
 	br.cloop.dptk.few .pref_l1a
 	br.cloop.dptk.few .pref_l1a
 ;; }
 ;; }
 { .mmi
 { .mmi
-	add	ptr0 = 16, ptr2		// Two stores in parallel
+	add	ptr0 = 16, ptr2		/* Two stores in parallel */
 	movi0	ar.lc = tmp
 	movi0	ar.lc = tmp
 ;; }
 ;; }
 .l1ax:
 .l1ax:
@@ -211,7 +211,7 @@ ENTRY(memset)
  { .mmi
  { .mmi
 	store [ptr2] = myval, 8
 	store [ptr2] = myval, 8
 	store [ptr0] = myval, 32
 	store [ptr0] = myval, 32
- 	cmp.lt	p_scr, p0 = ptr9, ptr1		// do we need more prefetching?
+	cmp.lt	p_scr, p0 = ptr9, ptr1	/* do we need more prefetching? */
  ;; }
  ;; }
 { .mmb
 { .mmb
 	store [ptr2] = myval, 24
 	store [ptr2] = myval, 24
@@ -219,9 +219,9 @@ ENTRY(memset)
 	br.cloop.dptk.few .l1ax
 	br.cloop.dptk.few .l1ax
 ;; }
 ;; }
 { .mbb
 { .mbb
-	cmp.le  p_scr, p0 = 8, cnt		// just a few bytes left ?
-(p_scr) br.cond.dpnt.many  .fraction_of_line	// Branch no. 2
-	br.cond.dpnt.many  .move_bytes_from_alignment	// Branch no. 3
+	cmp.le  p_scr, p0 = 8, cnt		/* just a few bytes left ? */
+(p_scr) br.cond.dpnt.many  .fraction_of_line	/* Branch no. 2 */
+	br.cond.dpnt.many  .move_bytes_from_alignment	/* Branch no. 3 */
 ;; }
 ;; }
 
 
 #ifdef GAS_ALIGN_BREAKS_UNWIND_INFO
 #ifdef GAS_ALIGN_BREAKS_UNWIND_INFO
@@ -229,32 +229,32 @@ ENTRY(memset)
 #else
 #else
 	.align 32
 	.align 32
 #endif
 #endif
-.l1b:	// ------------------ //  L1B: store ahead into cache lines; fill later
+.l1b:	/* ------------------  L1B: store ahead into cache lines; fill later */
 { .mmi
 { .mmi
-	and	tmp = -(LINE_SIZE), cnt		// compute end of range
-	mov	ptr9 = ptr1			// used for prefetching
-	and	cnt = (LINE_SIZE-1), cnt	// remainder
+	and	tmp = -(LINE_SIZE), cnt		/* compute end of range */
+	mov	ptr9 = ptr1			/* used for prefetching */
+	and	cnt = (LINE_SIZE-1), cnt	/* remainder */
 } { .mmi
 } { .mmi
-	mov	loopcnt = PREF_AHEAD-1		// default prefetch loop
-	cmp.gt	p_scr, p0 = PREF_AHEAD, linecnt	// check against actual value
+	mov	loopcnt = PREF_AHEAD-1		/* default prefetch loop */
+	cmp.gt	p_scr, p0 = PREF_AHEAD, linecnt	/* check against actual value */
 ;; }
 ;; }
 { .mmi
 { .mmi
 (p_scr)	add	loopcnt = -1, linecnt
 (p_scr)	add	loopcnt = -1, linecnt
-	add	ptr2 = 16, ptr1	// start of stores (beyond prefetch stores)
-	add	ptr1 = tmp, ptr1	// first address beyond total range
+	add	ptr2 = 16, ptr1	/* start of stores (beyond prefetch stores) */
+	add	ptr1 = tmp, ptr1	/* first address beyond total range */
 ;; }
 ;; }
 { .mmi
 { .mmi
-	add	tmp = -1, linecnt	// next loop count
+	add	tmp = -1, linecnt	/* next loop count */
 	movi0	ar.lc = loopcnt
 	movi0	ar.lc = loopcnt
 ;; }
 ;; }
 .pref_l1b:
 .pref_l1b:
 { .mib
 { .mib
-	stf.spill [ptr9] = f0, 128	// Do stores one cache line apart
+	stf.spill [ptr9] = f0, 128	/* Do stores one cache line apart */
 	nop.i   0
 	nop.i   0
 	br.cloop.dptk.few .pref_l1b
 	br.cloop.dptk.few .pref_l1b
 ;; }
 ;; }
 { .mmi
 { .mmi
-	add	ptr0 = 16, ptr2		// Two stores in parallel
+	add	ptr0 = 16, ptr2		/* Two stores in parallel */
 	movi0	ar.lc = tmp
 	movi0	ar.lc = tmp
 ;; }
 ;; }
 .l1bx:
 .l1bx:
@@ -269,7 +269,7 @@ ENTRY(memset)
  { .mmi
  { .mmi
 	stf.spill [ptr2] = f0, 32
 	stf.spill [ptr2] = f0, 32
 	stf.spill [ptr0] = f0, 64
 	stf.spill [ptr0] = f0, 64
- 	cmp.lt	p_scr, p0 = ptr9, ptr1	// do we need more prefetching?
+	cmp.lt	p_scr, p0 = ptr9, ptr1	/* do we need more prefetching? */
  ;; }
  ;; }
 { .mmb
 { .mmb
 	stf.spill [ptr2] = f0, 32
 	stf.spill [ptr2] = f0, 32
@@ -277,14 +277,14 @@ ENTRY(memset)
 	br.cloop.dptk.few .l1bx
 	br.cloop.dptk.few .l1bx
 ;; }
 ;; }
 { .mib
 { .mib
-	cmp.gt  p_scr, p0 = 8, cnt	// just a few bytes left ?
+	cmp.gt  p_scr, p0 = 8, cnt	/* just a few bytes left ? */
 (p_scr)	br.cond.dpnt.many  .move_bytes_from_alignment
 (p_scr)	br.cond.dpnt.many  .move_bytes_from_alignment
 ;; }
 ;; }
 
 
 .fraction_of_line:
 .fraction_of_line:
 { .mib
 { .mib
 	add	ptr2 = 16, ptr1
 	add	ptr2 = 16, ptr1
-	shr.u	loopcnt = cnt, 5   	// loopcnt = cnt / 32
+	shr.u	loopcnt = cnt, 5	/* loopcnt = cnt / 32 */
 ;; }
 ;; }
 { .mib
 { .mib
 	cmp.eq	p_scr, p0 = loopcnt, r0
 	cmp.eq	p_scr, p0 = loopcnt, r0
@@ -292,13 +292,13 @@ ENTRY(memset)
 (p_scr)	br.cond.dpnt.many store_words
 (p_scr)	br.cond.dpnt.many store_words
 ;; }
 ;; }
 { .mib
 { .mib
-	and	cnt = 0x1f, cnt		// compute the remaining cnt
+	and	cnt = 0x1f, cnt		/* compute the remaining cnt */
 	movi0   ar.lc = loopcnt
 	movi0   ar.lc = loopcnt
 ;; }
 ;; }
 #ifndef GAS_ALIGN_BREAKS_UNWIND_INFO
 #ifndef GAS_ALIGN_BREAKS_UNWIND_INFO
 	.align 32
 	.align 32
 #endif
 #endif
-.l2:	// ---------------------------- //  L2A:  store 32B in 2 cycles
+.l2:	/* ----------------------------  L2A:  store 32B in 2 cycles */
 { .mmb
 { .mmb
 	store	[ptr1] = myval, 8
 	store	[ptr1] = myval, 8
 	store	[ptr2] = myval, 8
 	store	[ptr2] = myval, 8
@@ -309,34 +309,34 @@ ENTRY(memset)
 ;; }
 ;; }
 store_words:
 store_words:
 { .mib
 { .mib
-	cmp.gt	p_scr, p0 = 8, cnt		// just a few bytes left ?
-(p_scr)	br.cond.dpnt.many .move_bytes_from_alignment	// Branch
+	cmp.gt	p_scr, p0 = 8, cnt		/* just a few bytes left ? */
+(p_scr)	br.cond.dpnt.many .move_bytes_from_alignment	/* Branch */
 ;; }
 ;; }
 
 
 { .mmi
 { .mmi
-	store	[ptr1] = myval, 8		// store
-	cmp.le	p_y, p_n = 16, cnt		//
-	add	cnt = -8, cnt			// subtract
+	store	[ptr1] = myval, 8		/* store */
+	cmp.le	p_y, p_n = 16, cnt		/* */
+	add	cnt = -8, cnt			/* subtract */
 ;; }
 ;; }
 { .mmi
 { .mmi
-(p_y)	store	[ptr1] = myval, 8		// store
-(p_y)	cmp.le.unc p_yy, p_nn = 16, cnt		//
-(p_y)	add	cnt = -8, cnt			// subtract
+(p_y)	store	[ptr1] = myval, 8		/* store */
+(p_y)	cmp.le.unc p_yy, p_nn = 16, cnt		/* */
+(p_y)	add	cnt = -8, cnt			/* subtract */
 ;; }
 ;; }
-{ .mmi						// store
-(p_yy)	store	[ptr1] = myval, 8		//
-(p_yy)	add	cnt = -8, cnt			// subtract
+{ .mmi						/* store */
+(p_yy)	store	[ptr1] = myval, 8		/* */
+(p_yy)	add	cnt = -8, cnt			/* subtract */
 ;; }
 ;; }
 
 
 .move_bytes_from_alignment:
 .move_bytes_from_alignment:
 { .mib
 { .mib
 	cmp.eq	p_scr, p0 = cnt, r0
 	cmp.eq	p_scr, p0 = cnt, r0
-	tbit.nz.unc p_y, p0 = cnt, 2	// should we terminate with a st4 ?
+	tbit.nz.unc p_y, p0 = cnt, 2	/* should we terminate with a st4 ? */
 (p_scr)	br.cond.dpnt.few .restore_and_exit
 (p_scr)	br.cond.dpnt.few .restore_and_exit
 ;; }
 ;; }
 { .mib
 { .mib
 (p_y)	st4	[ptr1] = value, 4
 (p_y)	st4	[ptr1] = value, 4
-	tbit.nz.unc p_yy, p0 = cnt, 1	// should we terminate with a st2 ?
+	tbit.nz.unc p_yy, p0 = cnt, 1	/* should we terminate with a st2 ? */
 ;; }
 ;; }
 { .mib
 { .mib
 (p_yy)	st2	[ptr1] = value, 2
 (p_yy)	st2	[ptr1] = value, 2
@@ -362,38 +362,38 @@ store_words:
 (p_n)	add	ptr2 = 2, ptr1
 (p_n)	add	ptr2 = 2, ptr1
 } { .mmi
 } { .mmi
 (p_y)	add	ptr2 = 3, ptr1
 (p_y)	add	ptr2 = 3, ptr1
-(p_y)	st1	[ptr1] = value, 1	// fill 1 (odd-aligned) byte
-(p_y)	add	cnt = -1, cnt		// [15, 14 (or less) left]
+(p_y)	st1	[ptr1] = value, 1	/* fill 1 (odd-aligned) byte */
+(p_y)	add	cnt = -1, cnt		/* [15, 14 (or less) left] */
 ;; }
 ;; }
 { .mmi
 { .mmi
 (p_yy)	cmp.le.unc p_y, p0 = 8, cnt
 (p_yy)	cmp.le.unc p_y, p0 = 8, cnt
-	add	ptr3 = ptr1, cnt	// prepare last store
+	add	ptr3 = ptr1, cnt	/* prepare last store */
 	movi0	ar.lc = save_lc
 	movi0	ar.lc = save_lc
 } { .mmi
 } { .mmi
-(p_yy)	st2	[ptr1] = value, 4	// fill 2 (aligned) bytes
-(p_yy)	st2	[ptr2] = value, 4	// fill 2 (aligned) bytes
-(p_yy)	add	cnt = -4, cnt		// [11, 10 (o less) left]
+(p_yy)	st2	[ptr1] = value, 4	/* fill 2 (aligned) bytes */
+(p_yy)	st2	[ptr2] = value, 4	/* fill 2 (aligned) bytes */
+(p_yy)	add	cnt = -4, cnt		/* [11, 10 (o less) left] */
 ;; }
 ;; }
 { .mmi
 { .mmi
 (p_y)	cmp.le.unc p_yy, p0 = 8, cnt
 (p_y)	cmp.le.unc p_yy, p0 = 8, cnt
-	add	ptr3 = -1, ptr3		// last store
-	tbit.nz p_scr, p0 = cnt, 1	// will there be a st2 at the end ?
+	add	ptr3 = -1, ptr3		/* last store */
+	tbit.nz p_scr, p0 = cnt, 1	/* will there be a st2 at the end ? */
 } { .mmi
 } { .mmi
-(p_y)	st2	[ptr1] = value, 4	// fill 2 (aligned) bytes
-(p_y)	st2	[ptr2] = value, 4	// fill 2 (aligned) bytes
-(p_y)	add	cnt = -4, cnt		// [7, 6 (or less) left]
+(p_y)	st2	[ptr1] = value, 4	/* fill 2 (aligned) bytes */
+(p_y)	st2	[ptr2] = value, 4	/* fill 2 (aligned) bytes */
+(p_y)	add	cnt = -4, cnt		/* [7, 6 (or less) left] */
 ;; }
 ;; }
 { .mmi
 { .mmi
-(p_yy)	st2	[ptr1] = value, 4	// fill 2 (aligned) bytes
-(p_yy)	st2	[ptr2] = value, 4	// fill 2 (aligned) bytes
-					// [3, 2 (or less) left]
-	tbit.nz p_y, p0 = cnt, 0	// will there be a st1 at the end ?
+(p_yy)	st2	[ptr1] = value, 4	/* fill 2 (aligned) bytes */
+(p_yy)	st2	[ptr2] = value, 4	/* fill 2 (aligned) bytes */
+					/* [3, 2 (or less) left] */
+	tbit.nz p_y, p0 = cnt, 0	/* will there be a st1 at the end ? */
 } { .mmi
 } { .mmi
 (p_yy)	add	cnt = -4, cnt
 (p_yy)	add	cnt = -4, cnt
 ;; }
 ;; }
 { .mmb
 { .mmb
-(p_scr)	st2	[ptr1] = value		// fill 2 (aligned) bytes
-(p_y)	st1	[ptr3] = value		// fill last byte (using ptr3)
+(p_scr)	st2	[ptr1] = value		/* fill 2 (aligned) bytes */
+(p_y)	st1	[ptr3] = value		/* fill last byte (using ptr3) */
 	br.ret.sptk.many rp
 	br.ret.sptk.many rp
 ;; }
 ;; }
 END(memset)
 END(memset)

+ 16 - 16
libc/string/ia64/strchr.S

@@ -49,15 +49,15 @@ ENTRY(strchr)
 	.prologue
 	.prologue
 	alloc r2 = ar.pfs, 2, 0, 0, 0
 	alloc r2 = ar.pfs, 2, 0, 0, 0
 	.save ar.lc, saved_lc
 	.save ar.lc, saved_lc
-        mov 	saved_lc = ar.lc 	// save the loop counter
+        mov 	saved_lc = ar.lc 	/* save the loop counter */
 	.body
 	.body
 	mov 	ret0 = str	
 	mov 	ret0 = str	
-	and 	tmp = 7, str		// tmp = str % 8
+	and 	tmp = 7, str		/* tmp = str % 8 */
 	mux1	chrx8 = chr, @brcst
 	mux1	chrx8 = chr, @brcst
-	extr.u	chr = chr, 0, 8		// retain only the last byte
-	cmp.ne	p8, p0 = r0, r0		// clear p8
+	extr.u	chr = chr, 0, 8		/* retain only the last byte */
+	cmp.ne	p8, p0 = r0, r0		/* clear p8 */
 	;;
 	;;
-	sub	loopcnt = 8, tmp	// loopcnt = 8 - tmp
+	sub	loopcnt = 8, tmp	/* loopcnt = 8 - tmp */
 	cmp.eq	p6, p0 = tmp, r0
 	cmp.eq	p6, p0 = tmp, r0
 (p6)	br.cond.sptk	.str_aligned;;
 (p6)	br.cond.sptk	.str_aligned;;
 	adds	loopcnt = -1, loopcnt;;
 	adds	loopcnt = -1, loopcnt;;
@@ -75,10 +75,10 @@ ENTRY(strchr)
 	nop.b	0
 	nop.b	0
 	nop.b 	0
 	nop.b 	0
 .l2:	
 .l2:	
-	ld8.s	val2 = [ret0], 8	// don't bomb out here
+	ld8.s	val2 = [ret0], 8	/* don't bomb out here */
 	czx1.r	pos0 = val1	
 	czx1.r	pos0 = val1	
-	xor	tmp = val1, chrx8	// if val1 contains chr, tmp will
-	;;				// contain a zero in its position
+	xor	tmp = val1, chrx8	/* if val1 contains chr, tmp will */
+	;;				/* contain a zero in its position */
 	czx1.r	poschr = tmp
 	czx1.r	poschr = tmp
 	cmp.ne	p6, p0 = 8, pos0
 	cmp.ne	p6, p0 = 8, pos0
 	;;
 	;;
@@ -90,21 +90,21 @@ ENTRY(strchr)
 	mov	val1 = val2	
 	mov	val1 = val2	
 	br.cond.dptk .l2
 	br.cond.dptk .l2
 .foundit:
 .foundit:
-(p6)	cmp.lt	p8, p0 = pos0, poschr	// we found chr and null in the word
-(p8)	br.cond.spnt .notfound		// null was found before chr
+(p6)	cmp.lt	p8, p0 = pos0, poschr	/* we found chr and null in the word */
+(p8)	br.cond.spnt .notfound		/* null was found before chr */
 	add	ret0 = ret0, poschr ;;
 	add	ret0 = ret0, poschr ;;
-	adds	ret0 = -15, ret0 ;;	// should be -16, but we decrement
-.restore_and_exit:			// ret0 in the next instruction
-	adds	ret0 = -1, ret0		// ret0 was pointing 1 char too far
-	mov 	ar.lc = saved_lc	// restore the loop counter
+	adds	ret0 = -15, ret0 ;;	/* should be -16, but we decrement */
+.restore_and_exit:			/* ret0 in the next instruction */
+	adds	ret0 = -1, ret0		/* ret0 was pointing 1 char too far */
+	mov 	ar.lc = saved_lc	/* restore the loop counter */
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0
 .notfound:
 .notfound:
-	mov	ret0 = r0		// return NULL if null was found
+	mov	ret0 = r0		/* return NULL if null was found */
 	mov 	ar.lc = saved_lc
 	mov 	ar.lc = saved_lc
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0
 .recovery:
 .recovery:
 	adds	ret0 = -8, ret0;;
 	adds	ret0 = -8, ret0;;
-	ld8	val2 = [ret0], 8	// bomb out here
+	ld8	val2 = [ret0], 8	/* bomb out here */
 	br.cond.sptk	.back
 	br.cond.sptk	.back
 END(strchr)
 END(strchr)
 libc_hidden_def (strchr)
 libc_hidden_def (strchr)

+ 1 - 1
libc/string/ia64/strcmp.S

@@ -42,7 +42,7 @@ ENTRY(strcmp)
 .loop:
 .loop:
 	ld1	val1 = [s1], 1
 	ld1	val1 = [s1], 1
 	ld1	val2 = [s2], 1
 	ld1	val2 = [s2], 1
-	cmp.eq	p6, p0 = r0, r0		// set p6
+	cmp.eq	p6, p0 = r0, r0		/* set p6 */
 	;;
 	;;
 	cmp.ne.and p6, p0 = val1, r0
 	cmp.ne.and p6, p0 = val1, r0
 	cmp.ne.and p6, p0 = val2, r0
 	cmp.ne.and p6, p0 = val2, r0

+ 33 - 33
libc/string/ia64/strcpy.S

@@ -27,8 +27,8 @@
    In this form, it assumes little endian mode.  For big endian mode, the
    In this form, it assumes little endian mode.  For big endian mode, the
    the two shifts in .l2 must be inverted:
    the two shifts in .l2 must be inverted:
 
 
-	shl   	value = r[1], sh1   	// value = w0 << sh1
-	shr.u   tmp = r[0], sh2   	// tmp = w1 >> sh2
+	shl	value = r[1], sh1	// value = w0 << sh1
+	shr.u   tmp = r[0], sh2		// tmp = w1 >> sh2
  */
  */
 
 
 #include "sysdep.h"
 #include "sysdep.h"
@@ -53,62 +53,62 @@
 
 
 ENTRY(strcpy)
 ENTRY(strcpy)
 	.prologue
 	.prologue
-	alloc 	r2 = ar.pfs, 2, 0, 30, 32
+	alloc	r2 = ar.pfs, 2, 0, 30, 32
 
 
 #define MEMLAT 2
 #define MEMLAT 2
 	.rotr	r[MEMLAT + 2]
 	.rotr	r[MEMLAT + 2]
 	.rotp	p[MEMLAT + 1]
 	.rotp	p[MEMLAT + 1]
 
 
-	mov	ret0 = in0		// return value = dest
+	mov	ret0 = in0		/* return value = dest */
 	.save pr, saved_pr
 	.save pr, saved_pr
-	mov	saved_pr = pr           // save the predicate registers
+	mov	saved_pr = pr           /* save the predicate registers */
 	.save ar.lc, saved_lc
 	.save ar.lc, saved_lc
-        mov 	saved_lc = ar.lc	// save the loop counter
+        mov	saved_lc = ar.lc	/* save the loop counter */
 	.body
 	.body
-	sub	tmp = r0, in0 ;;	// tmp = -dest
-	mov 	dest = in0		// dest
-	mov 	src = in1		// src
-	and	loopcnt = 7, tmp ;;	// loopcnt = -dest % 8
+	sub	tmp = r0, in0 ;;	/* tmp = -dest */
+	mov	dest = in0		/* dest */
+	mov	src = in1		/* src */
+	and	loopcnt = 7, tmp ;;	/* loopcnt = -dest % 8 */
 	cmp.eq	p6, p0 = loopcnt, r0
 	cmp.eq	p6, p0 = loopcnt, r0
-	adds	loopcnt = -1, loopcnt	// --loopcnt
+	adds	loopcnt = -1, loopcnt	/* --loopcnt */
 (p6)	br.cond.sptk .dest_aligned ;;
 (p6)	br.cond.sptk .dest_aligned ;;
 	mov	ar.lc = loopcnt
 	mov	ar.lc = loopcnt
-.l1:					// copy -dest % 8 bytes
-	ld1	c = [src], 1		// c = *src++
+.l1:					/* copy -dest % 8 bytes */
+	ld1	c = [src], 1		/* c = *src++ */
 	;;
 	;;
-	st1	[dest] = c, 1		// *dest++ = c
+	st1	[dest] = c, 1		/* *dest++ = c */
 	cmp.eq	p6, p0 = c, r0
 	cmp.eq	p6, p0 = c, r0
 (p6)	br.cond.dpnt .restore_and_exit
 (p6)	br.cond.dpnt .restore_and_exit
 	br.cloop.dptk .l1 ;;
 	br.cloop.dptk .l1 ;;
 .dest_aligned:
 .dest_aligned:
-	and	sh1 = 7, src 		// sh1 = src % 8
-	mov	ar.lc = -1		// "infinite" loop
-	and	asrc = -8, src ;;	// asrc = src & -OPSIZ  -- align src
+	and	sh1 = 7, src		/* sh1 = src % 8 */
+	mov	ar.lc = -1		/* "infinite" loop */
+	and	asrc = -8, src ;;	/* asrc = src & -OPSIZ  -- align src */
 	sub	thresh = 8, sh1
 	sub	thresh = 8, sh1
-	mov	pr.rot = 1 << 16	// set rotating predicates
-	cmp.ne	p7, p0 = r0, r0		// clear p7
-	shl	sh1 = sh1, 3 ;;		// sh1 = 8 * (src % 8)
-	sub	sh2 = 64, sh1		// sh2 = 64 - sh1
-	cmp.eq  p6, p0 = sh1, r0 	// is the src aligned?
+	mov	pr.rot = 1 << 16	/* set rotating predicates */
+	cmp.ne	p7, p0 = r0, r0		/* clear p7 */
+	shl	sh1 = sh1, 3 ;;		/* sh1 = 8 * (src % 8) */
+	sub	sh2 = 64, sh1		/* sh2 = 64 - sh1 */
+	cmp.eq  p6, p0 = sh1, r0	/* is the src aligned? */
 (p6)    br.cond.sptk .src_aligned ;;
 (p6)    br.cond.sptk .src_aligned ;;
 	ld8	r[1] = [asrc],8 ;;
 	ld8	r[1] = [asrc],8 ;;
 
 
 	.align	32
 	.align	32
 .l2:
 .l2:
 	ld8.s	r[0] = [asrc], 8
 	ld8.s	r[0] = [asrc], 8
-	shr.u	value = r[1], sh1 ;; 	// value = w0 >> sh1
-	czx1.r	pos = value ;;		// do we have an "early" zero
-	cmp.lt	p7, p0 = pos, thresh	// in w0 >> sh1?
+	shr.u	value = r[1], sh1 ;;	/* value = w0 >> sh1 */
+	czx1.r	pos = value ;;		/* do we have an "early" zero */
+	cmp.lt	p7, p0 = pos, thresh	/* in w0 >> sh1? */
 (p7)	br.cond.dpnt .found0
 (p7)	br.cond.dpnt .found0
-	chk.s	r[0], .recovery2	// it is safe to do that only
-.back2:					// after the previous test
-	shl	tmp = r[0], sh2  	// tmp = w1 << sh2
+	chk.s	r[0], .recovery2	/* it is safe to do that only */
+.back2:					/* after the previous test */
+	shl	tmp = r[0], sh2		/* tmp = w1 << sh2 */
 	;;
 	;;
-	or	value = value, tmp ;;	// value |= tmp
+	or	value = value, tmp ;;	/* value |= tmp */
 	czx1.r	pos = value ;;
 	czx1.r	pos = value ;;
 	cmp.ne	p7, p0 = 8, pos
 	cmp.ne	p7, p0 = 8, pos
 (p7)	br.cond.dpnt .found0
 (p7)	br.cond.dpnt .found0
-	st8	[dest] = value, 8	// store val to dest
+	st8	[dest] = value, 8	/* store val to dest */
 	br.ctop.dptk    .l2 ;;
 	br.ctop.dptk    .l2 ;;
 .src_aligned:
 .src_aligned:
 .l3:
 .l3:
@@ -124,14 +124,14 @@ ENTRY(strcpy)
 .found0:
 .found0:
 	mov	ar.lc = pos
 	mov	ar.lc = pos
 .l4:
 .l4:
-	extr.u	c = value, 0, 8		// c = value & 0xff
+	extr.u	c = value, 0, 8		/* c = value & 0xff */
 	shr.u	value = value, 8
 	shr.u	value = value, 8
 	;;
 	;;
 	st1	[dest] = c, 1
 	st1	[dest] = c, 1
 	br.cloop.dptk	.l4 ;;
 	br.cloop.dptk	.l4 ;;
 .restore_and_exit:
 .restore_and_exit:
-	mov 	ar.lc = saved_lc	// restore the loop counter
-	mov	pr = saved_pr, -1	// restore the predicate registers
+	mov	ar.lc = saved_lc	/* restore the loop counter */
+	mov	pr = saved_pr, -1	/* restore the predicate registers */
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0
 .recovery2:
 .recovery2:
 	add	tmp = -8, asrc ;;
 	add	tmp = -8, asrc ;;

+ 9 - 9
libc/string/ia64/strlen.S

@@ -50,13 +50,13 @@ ENTRY(strlen)
 	.prologue
 	.prologue
 	alloc r2 = ar.pfs, 1, 0, 0, 0
 	alloc r2 = ar.pfs, 1, 0, 0, 0
 	.save ar.lc, saved_lc
 	.save ar.lc, saved_lc
-        mov 	saved_lc = ar.lc 	// save the loop counter
+        mov 	saved_lc = ar.lc 	/* save the loop counter */
 	.body
 	.body
 	mov 	str = in0	
 	mov 	str = in0	
-	mov 	len = r0		// len = 0
-	and 	tmp = 7, in0		// tmp = str % 8
+	mov 	len = r0		/* len = 0 */
+	and 	tmp = 7, in0		/* tmp = str % 8 */
 	;;
 	;;
-	sub	loopcnt = 8, tmp	// loopcnt = 8 - tmp
+	sub	loopcnt = 8, tmp	/* loopcnt = 8 - tmp */
 	cmp.eq	p6, p0 = tmp, r0
 	cmp.eq	p6, p0 = tmp, r0
 (p6)	br.cond.sptk	.str_aligned;;
 (p6)	br.cond.sptk	.str_aligned;;
 	adds	loopcnt = -1, loopcnt;;
 	adds	loopcnt = -1, loopcnt;;
@@ -69,11 +69,11 @@ ENTRY(strlen)
 	adds	len = 1, len
 	adds	len = 1, len
 	br.cloop.dptk	.l1
 	br.cloop.dptk	.l1
 .str_aligned:
 .str_aligned:
-	mov	origadd = str		// origadd = orig
+	mov	origadd = str		/* origadd = orig */
 	ld8	val1 = [str], 8;;
 	ld8	val1 = [str], 8;;
 	nop.b	0
 	nop.b	0
 	nop.b 	0
 	nop.b 	0
-.l2:	ld8.s	val2 = [str], 8		// don't bomb out here
+.l2:	ld8.s	val2 = [str], 8		/* don't bomb out here */
 	czx1.r	pos0 = val1	
 	czx1.r	pos0 = val1	
 	;;
 	;;
 	cmp.ne	p6, p0 = 8, pos0
 	cmp.ne	p6, p0 = 8, pos0
@@ -83,16 +83,16 @@ ENTRY(strlen)
 	mov	val1 = val2	
 	mov	val1 = val2	
 	br.cond.dptk	.l2
 	br.cond.dptk	.l2
 .foundit:
 .foundit:
-	sub	tmp = str, origadd	// tmp = crt address - orig
+	sub	tmp = str, origadd	/* tmp = crt address - orig */
 	add	len = len, pos0;;
 	add	len = len, pos0;;
 	add	len = len, tmp;;
 	add	len = len, tmp;;
 	adds	len = -16, len
 	adds	len = -16, len
 .restore_and_exit:
 .restore_and_exit:
-	mov ar.lc = saved_lc		// restore the loop counter
+	mov ar.lc = saved_lc		/* restore the loop counter */
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0
 .recovery:
 .recovery:
 	adds	str = -8, str;;
 	adds	str = -8, str;;
-	ld8	val2 = [str], 8		// bomb out here
+	ld8	val2 = [str], 8		/* bomb out here */
 	br.cond.sptk	.back
 	br.cond.sptk	.back
 END(strlen)
 END(strlen)
 libc_hidden_def (strlen)
 libc_hidden_def (strlen)

+ 5 - 5
libc/string/ia64/strncmp.S

@@ -23,7 +23,7 @@
    Inputs:
    Inputs:
         in0:    s1
         in0:    s1
         in1:    s2
         in1:    s2
-  	in2:	n
+	in2:	n
 
 
    Unlike memcmp(), this function is optimized for mismatches within the
    Unlike memcmp(), this function is optimized for mismatches within the
    first few characters.  */
    first few characters.  */
@@ -42,13 +42,13 @@
 ENTRY(strncmp)
 ENTRY(strncmp)
 	alloc	r2 = ar.pfs, 3, 0, 0, 0
 	alloc	r2 = ar.pfs, 3, 0, 0, 0
 	mov	ret0 = r0
 	mov	ret0 = r0
-	cmp.eq  p6, p0 = r0, r0		// set p6
-	cmp.eq	p7, p0 = n, r0		// return immediately if n == 0
+	cmp.eq  p6, p0 = r0, r0		/* set p6 */
+	cmp.eq	p7, p0 = n, r0		/* return immediately if n == 0 */
 (p7)	br.cond.spnt .restore_and_exit ;;
 (p7)	br.cond.spnt .restore_and_exit ;;
 .loop:
 .loop:
 	ld1	val1 = [s1], 1
 	ld1	val1 = [s1], 1
 	ld1	val2 = [s2], 1
 	ld1	val2 = [s2], 1
-	adds	n = -1, n		// n--
+	adds	n = -1, n		/* n-- */
 	;;
 	;;
 	cmp.ne.and p6, p0 = val1, r0
 	cmp.ne.and p6, p0 = val1, r0
 	cmp.ne.and p6, p0 = val2, r0
 	cmp.ne.and p6, p0 = val2, r0
@@ -58,5 +58,5 @@ ENTRY(strncmp)
 	sub	ret0 = val1, val2
 	sub	ret0 = val1, val2
 .restore_and_exit:
 .restore_and_exit:
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0
-END(strncmp)	
+END(strncmp)
 libc_hidden_weak (strncmp)
 libc_hidden_weak (strncmp)

+ 45 - 45
libc/string/ia64/strncpy.S

@@ -58,64 +58,64 @@ ENTRY(strncpy)
 	.rotr	r[MEMLAT + 2]
 	.rotr	r[MEMLAT + 2]
 	.rotp	p[MEMLAT + 1]
 	.rotp	p[MEMLAT + 1]
 
 
-	mov	ret0 = in0		// return value = dest
+	mov	ret0 = in0		/* return value = dest */
 	.save pr, saved_pr
 	.save pr, saved_pr
-	mov	saved_pr = pr           // save the predicate registers
+	mov	saved_pr = pr           /* save the predicate registers */
 	.save ar.lc, saved_lc
 	.save ar.lc, saved_lc
-	mov 	saved_lc = ar.lc	// save the loop counter
-	mov	ar.ec = 0		// ec is not guaranteed to
-					// be zero upon function entry
+	mov 	saved_lc = ar.lc	/* save the loop counter */
+	mov	ar.ec = 0		/* ec is not guaranteed to */
+					/* be zero upon function entry */
 	.body
 	.body
 	cmp.geu p6, p5 = 24, in2
 	cmp.geu p6, p5 = 24, in2
 (p6)	br.cond.spnt .short_len
 (p6)	br.cond.spnt .short_len
-	sub	tmp = r0, in0 ;;	// tmp = -dest
-	mov	len = in2		// len
-	mov 	dest = in0		// dest
-	mov 	src = in1		// src
-	and	tmp = 7, tmp ;;		// loopcnt = -dest % 8
+	sub	tmp = r0, in0 ;;	/* tmp = -dest */
+	mov	len = in2		/* len */
+	mov 	dest = in0		/* dest */
+	mov 	src = in1		/* src */
+	and	tmp = 7, tmp ;;		/* loopcnt = -dest % 8 */
 	cmp.eq	p6, p7 = tmp, r0
 	cmp.eq	p6, p7 = tmp, r0
-	adds	loopcnt = -1, tmp	// --loopcnt
+	adds	loopcnt = -1, tmp	/* --loopcnt */
 (p6)	br.cond.sptk .dest_aligned ;;
 (p6)	br.cond.sptk .dest_aligned ;;
-	sub	len = len, tmp		// len -= -dest % 8
+	sub	len = len, tmp		/* len -= -dest % 8 */
 	mov	ar.lc = loopcnt
 	mov	ar.lc = loopcnt
-.l1:					// copy -dest % 8 bytes
-(p5)	ld1	c = [src], 1		// c = *src++
+.l1:					/* copy -dest % 8 bytes */
+(p5)	ld1	c = [src], 1		/* c = *src++ */
 	;;
 	;;
-	st1	[dest] = c, 1		// *dest++ = c
+	st1	[dest] = c, 1		/* *dest++ = c */
 	cmp.ne	p5, p7 = c, r0
 	cmp.ne	p5, p7 = c, r0
 	br.cloop.dptk .l1 ;;
 	br.cloop.dptk .l1 ;;
 (p7)	br.cond.dpnt	.found0_align
 (p7)	br.cond.dpnt	.found0_align
 
 
-.dest_aligned:				// p7 should be cleared here
-	shr.u	c = len, 3		// c = len / 8
-	and	sh1 = 7, src 		// sh1 = src % 8
-	and	asrc = -8, src ;;	// asrc = src & -OPSIZ  -- align src
-	adds	c = (MEMLAT-1), c	// c = (len / 8) + MEMLAT - 1
+.dest_aligned:				/* p7 should be cleared here */
+	shr.u	c = len, 3		/* c = len / 8 */
+	and	sh1 = 7, src 		/* sh1 = src % 8 */
+	and	asrc = -8, src ;;	/* asrc = src & -OPSIZ  -- align src */
+	adds	c = (MEMLAT-1), c	/* c = (len / 8) + MEMLAT - 1 */
 	sub	thresh = 8, sh1
 	sub	thresh = 8, sh1
-	mov	pr.rot = 1 << 16	// set rotating predicates
-	shl	sh1 = sh1, 3 ;;		// sh1 = 8 * (src % 8)
-	mov	ar.lc = c		// "infinite" loop
-	sub	sh2 = 64, sh1		// sh2 = 64 - sh1
-	cmp.eq  p6, p0 = sh1, r0 	// is the src aligned?
+	mov	pr.rot = 1 << 16	/* set rotating predicates */
+	shl	sh1 = sh1, 3 ;;		/* sh1 = 8 * (src % 8) */
+	mov	ar.lc = c		/* "infinite" loop */
+	sub	sh2 = 64, sh1		/* sh2 = 64 - sh1 */
+	cmp.eq  p6, p0 = sh1, r0 	/* is the src aligned? */
 (p6)    br.cond.sptk .src_aligned
 (p6)    br.cond.sptk .src_aligned
-	adds	c = -(MEMLAT-1), c ;;	// c = (len / 8)
+	adds	c = -(MEMLAT-1), c ;;	/* c = (len / 8) */
 	ld8	r[1] = [asrc],8
 	ld8	r[1] = [asrc],8
 	mov	ar.lc = c ;;
 	mov	ar.lc = c ;;
 
 
 	.align	32
 	.align	32
 .l2:
 .l2:
-(p6)	st8	[dest] = value, 8	// store val to dest
+(p6)	st8	[dest] = value, 8	/* store val to dest */
 	ld8.s	r[0] = [asrc], 8
 	ld8.s	r[0] = [asrc], 8
-	shr.u	value = r[1], sh1 ;; 	// value = w0 >> sh1
-	czx1.r	pos = value ;;		// do we have an "early" zero
-	cmp.lt	p7, p0 = pos, thresh	// in w0 >> sh1?
-	adds	len = -8, len		// len -= 8
+	shr.u	value = r[1], sh1 ;; 	/* value = w0 >> sh1 */
+	czx1.r	pos = value ;;		/* do we have an "early" zero */
+	cmp.lt	p7, p0 = pos, thresh	/* in w0 >> sh1? */
+	adds	len = -8, len		/* len -= 8 */
 (p7)	br.cond.dpnt .nonalign_found0
 (p7)	br.cond.dpnt .nonalign_found0
-	chk.s	r[0], .recovery2	// it is safe to do that only
-.back2:					// after the previous test
-	shl	tmp = r[0], sh2  	// tmp = w1 << sh2
+	chk.s	r[0], .recovery2	/* it is safe to do that only */
+.back2:					/* after the previous test */
+	shl	tmp = r[0], sh2  	/* tmp = w1 << sh2 */
 	;;
 	;;
-	or	value = value, tmp ;;	// value |= tmp
+	or	value = value, tmp ;;	/* value |= tmp */
 	czx1.r	pos = value ;;
 	czx1.r	pos = value ;;
 	cmp.ne	p7, p6 = 8, pos
 	cmp.ne	p7, p6 = 8, pos
 (p7)	br.cond.dpnt .nonalign_found0
 (p7)	br.cond.dpnt .nonalign_found0
@@ -137,7 +137,7 @@ ENTRY(strncpy)
 (p[MEMLAT])	mov	value = r[MEMLAT]
 (p[MEMLAT])	mov	value = r[MEMLAT]
 (p[MEMLAT])	czx1.r	pos = r[MEMLAT] ;;
 (p[MEMLAT])	czx1.r	pos = r[MEMLAT] ;;
 (p[MEMLAT])	cmp.ne	p7, p0 = 8, pos
 (p[MEMLAT])	cmp.ne	p7, p0 = 8, pos
-(p[MEMLAT])	adds	len = -8, len	// len -= 8
+(p[MEMLAT])	adds	len = -8, len	/* len -= 8 */
 (p7)		br.cond.dpnt .found0
 (p7)		br.cond.dpnt .found0
 (p[MEMLAT])	st8	[dest] = r[MEMLAT], 8
 (p[MEMLAT])	st8	[dest] = r[MEMLAT], 8
 		br.ctop.dptk .l3 ;;
 		br.ctop.dptk .l3 ;;
@@ -152,7 +152,7 @@ ENTRY(strncpy)
 (p5)	br.cond.dptk	.restore_and_exit ;;
 (p5)	br.cond.dptk	.restore_and_exit ;;
 	mov	ar.lc = len
 	mov	ar.lc = len
 .l4:
 .l4:
-(p6)	extr.u	c = value, 0, 8		// c = value & 0xff
+(p6)	extr.u	c = value, 0, 8		/* c = value & 0xff */
 (p6)	shr.u	value = value, 8 ;;
 (p6)	shr.u	value = value, 8 ;;
 	st1	[dest] = c, 1
 	st1	[dest] = c, 1
 	cmp.ne	p6, p0 = c, r0
 	cmp.ne	p6, p0 = c, r0
@@ -165,7 +165,7 @@ ENTRY(strncpy)
 	mov	value = 0 ;;
 	mov	value = 0 ;;
 .found0:
 .found0:
 	shl	tmp = pos, 3
 	shl	tmp = pos, 3
-	shr.u	loopcnt = len, 4	// loopcnt = len / 16
+	shr.u	loopcnt = len, 4	/* loopcnt = len / 16 */
 	mov	c = -1 ;;
 	mov	c = -1 ;;
 	cmp.eq	p6, p0 = loopcnt, r0
 	cmp.eq	p6, p0 = loopcnt, r0
 	adds	loopcnt = -1, loopcnt
 	adds	loopcnt = -1, loopcnt
@@ -192,24 +192,24 @@ ENTRY(strncpy)
 	st1	[dest] = r0, 1
 	st1	[dest] = r0, 1
 	br.cloop.dptk	.l7 ;;
 	br.cloop.dptk	.l7 ;;
 .restore_and_exit:
 .restore_and_exit:
-	mov 	ar.lc = saved_lc	// restore the loop counter
-	mov	pr = saved_pr, -1	// restore the predicate registers
+	mov 	ar.lc = saved_lc	/* restore the loop counter */
+	mov	pr = saved_pr, -1	/* restore the predicate registers */
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0
 
 
 .short_len:
 .short_len:
 	cmp.eq	p5, p0 = in2, r0
 	cmp.eq	p5, p0 = in2, r0
 	adds	loopcnt = -1, in2
 	adds	loopcnt = -1, in2
 (p5)	br.cond.spnt .restore_and_exit ;;
 (p5)	br.cond.spnt .restore_and_exit ;;
-	mov	ar.lc = loopcnt		// p6 should be set when we get here
+	mov	ar.lc = loopcnt		/* p6 should be set when we get here */
 .l8:
 .l8:
-(p6)	ld1	c = [in1], 1		// c = *src++
+(p6)	ld1	c = [in1], 1		/* c = *src++ */
 	;;
 	;;
-	st1	[in0] = c, 1		// *dest++ = c
+	st1	[in0] = c, 1		/* *dest++ = c */
 (p6)	cmp.ne	p6, p0 = c, r0
 (p6)	cmp.ne	p6, p0 = c, r0
 	br.cloop.dptk .l8
 	br.cloop.dptk .l8
 	;;
 	;;
-	mov 	ar.lc = saved_lc	// restore the loop counter
-	mov	pr = saved_pr, -1	// restore the predicate registers
+	mov 	ar.lc = saved_lc	/* restore the loop counter */
+	mov	pr = saved_pr, -1	/* restore the predicate registers */
 	br.ret.sptk.many b0
 	br.ret.sptk.many b0
 .recovery2:
 .recovery2:
 	add	c = 8, len
 	add	c = 8, len

+ 1 - 1
libc/string/sh64/memcpy.S

@@ -151,7 +151,7 @@ Large:
 	add r2, r4, r5
 	add r2, r4, r5
 	ldlo.q r3, 0, r0
 	ldlo.q r3, 0, r0
 	addi r5, -16, r5
 	addi r5, -16, r5
-	movi 64+8, r27 // could subtract r7 from that.
+	movi 64+8, r27 /* could subtract r7 from that. */
 	stlo.q r2, 0, r0
 	stlo.q r2, 0, r0
 	sthi.q r2, 7, r0
 	sthi.q r2, 7, r0
 	ldx.q r22, r6, r0
 	ldx.q r22, r6, r0

+ 15 - 14
libc/string/sh64/memset.S

@@ -32,12 +32,12 @@ memset:
 	ptabs r18, tr2
 	ptabs r18, tr2
 	mshflo.b r3,r3,r3
 	mshflo.b r3,r3,r3
 	add r4, r22, r23
 	add r4, r22, r23
-	mperm.w r3, r63, r3	// Fill pattern now in every byte of r3
+	mperm.w r3, r63, r3	/* Fill pattern now in every byte of r3 */
 
 
 	movi 8, r9
 	movi 8, r9
-	bgtu/u r23, r9, tr0 // multiquad
+	bgtu/u r23, r9, tr0 /* multiquad */
 
 
-	beqi/u r4, 0, tr2       // Return with size 0 - ensures no mem accesses
+	beqi/u r4, 0, tr2       /* Return with size 0 - ensures no mem accesses */
 	ldlo.q r2, 0, r7
 	ldlo.q r2, 0, r7
 	shlli r4, 2, r4
 	shlli r4, 2, r4
 	movi -1, r8
 	movi -1, r8
@@ -52,20 +52,21 @@ multiquad:
 	stlo.q r2, 0, r3
 	stlo.q r2, 0, r3
 	shlri r23, 3, r24
 	shlri r23, 3, r24
 	add r2, r4, r5
 	add r2, r4, r5
-	beqi/u r24, 1, tr0 // lastquad
+	beqi/u r24, 1, tr0 /* lastquad */
 	pta/l loop, tr1
 	pta/l loop, tr1
 	sub r2, r22, r25
 	sub r2, r22, r25
-	andi r5, -8, r20   // calculate end address and
-	addi r20, -7*8, r8 // loop end address; This might overflow, so we need
-	                   // to use a different test before we start the loop
-	bge/u r24, r9, tr1 // loop
+	andi r5, -8, r20   /* calculate end address and */
+	addi r20, -7*8, r8 /* loop end address; This might overflow, so we need
+	                      to use a different test before we start the loop
+	                    */
+	bge/u r24, r9, tr1 /* loop */
 	st.q r25, 8, r3
 	st.q r25, 8, r3
 	st.q r20, -8, r3
 	st.q r20, -8, r3
 	shlri r24, 1, r24
 	shlri r24, 1, r24
-	beqi/u r24, 1, tr0 // lastquad
+	beqi/u r24, 1, tr0 /* lastquad */
 	st.q r25, 16, r3
 	st.q r25, 16, r3
 	st.q r20, -16, r3
 	st.q r20, -16, r3
-	beqi/u r24, 2, tr0 // lastquad
+	beqi/u r24, 2, tr0 /* lastquad */
 	st.q r25, 24, r3
 	st.q r25, 24, r3
 	st.q r20, -24, r3
 	st.q r20, -24, r3
 lastquad:
 lastquad:
@@ -73,15 +74,15 @@ lastquad:
 	blink tr2,r63
 	blink tr2,r63
 
 
 loop:
 loop:
-!!!	alloco r25, 32	// QQQ comment out for short-term fix to SHUK #3895.
-			// QQQ commenting out is locically correct, but sub-optimal
-			// QQQ Sean McGoogan - 4th April 2003.
+!!!	alloco r25, 32	/* QQQ comment out for short-term fix to SHUK #3895.
+			   QQQ commenting out is locically correct, but sub-optimal
+			   QQQ Sean McGoogan - 4th April 2003. */
 	st.q r25, 8, r3
 	st.q r25, 8, r3
 	st.q r25, 16, r3
 	st.q r25, 16, r3
 	st.q r25, 24, r3
 	st.q r25, 24, r3
 	st.q r25, 32, r3
 	st.q r25, 32, r3
 	addi r25, 32, r25
 	addi r25, 32, r25
-	bgeu/l r8, r25, tr1 // loop
+	bgeu/l r8, r25, tr1 /* loop */
 
 
 	st.q r20, -40, r3
 	st.q r20, -40, r3
 	st.q r20, -32, r3
 	st.q r20, -32, r3

+ 14 - 14
libc/string/sh64/strcpy.S

@@ -31,7 +31,7 @@ strcpy:
 	addi r2, 8, r0
 	addi r2, 8, r0
 	mcmpeq.b r4,r63,r6
 	mcmpeq.b r4,r63,r6
 	SHHI r6,r7,r6
 	SHHI r6,r7,r6
-	bnei/u r6,0,tr1 // shortstring
+	bnei/u r6,0,tr1 /* shortstring */
 	pta/l no_lddst, tr2
 	pta/l no_lddst, tr2
 	ori r3,-8,r23
 	ori r3,-8,r23
 	sub r2, r23, r0
 	sub r2, r23, r0
@@ -41,28 +41,28 @@ strcpy:
 	pta/l loop, tr0
 	pta/l loop, tr0
 	ori r2,-8,r22
 	ori r2,-8,r22
 	mcmpeq.b r5, r63, r6
 	mcmpeq.b r5, r63, r6
-	bgt/u r22, r23, tr2 // no_lddst
+	bgt/u r22, r23, tr2 /* no_lddst */
 
 
-	// r22 < r23 :  Need to do a load from the destination.
-	// r22 == r23 : Doesn't actually need to load from destination,
-	//              but still can be handled here.
+	/* r22 < r23 :  Need to do a load from the destination. */
+	/* r22 == r23 : Doesn't actually need to load from destination, */
+	/*              but still can be handled here. */
 	ldlo.q r2, 0, r9
 	ldlo.q r2, 0, r9
 	movi -1, r8
 	movi -1, r8
 	SHLO r8, r7, r8
 	SHLO r8, r7, r8
 	mcmv r4, r8, r9
 	mcmv r4, r8, r9
 	stlo.q r2, 0, r9
 	stlo.q r2, 0, r9
-	beqi/l r6, 0, tr0 // loop
+	beqi/l r6, 0, tr0 /* loop */
 
 
 	add r5, r63, r4
 	add r5, r63, r4
 	addi r0, 8, r0
 	addi r0, 8, r0
-	blink tr1, r63 // shortstring
+	blink tr1, r63 /* shortstring */
 no_lddst:
 no_lddst:
-	// r22 > r23: note that for r22 == r23 the sthi.q would clobber
-	//            bytes before the destination region.
+	/* r22 > r23: note that for r22 == r23 the sthi.q would clobber */
+	/*            bytes before the destination region. */
 	stlo.q r2, 0, r4
 	stlo.q r2, 0, r4
 	SHHI r4, r7, r4
 	SHHI r4, r7, r4
 	sthi.q r0, -1, r4
 	sthi.q r0, -1, r4
-	beqi/l r6, 0, tr0 // loop
+	beqi/l r6, 0, tr0 /* loop */
 
 
 	add r5, r63, r4
 	add r5, r63, r4
 	addi r0, 8, r0
 	addi r0, 8, r0
@@ -77,7 +77,7 @@ shortstring2:
 	shlri r4,8,r4
 	shlri r4,8,r4
 	addi r0,1,r0
 	addi r0,1,r0
 	bnei/l r5,0,tr1
 	bnei/l r5,0,tr1
-	blink tr4,r63 // return
+	blink tr4,r63 /* return */
 	
 	
 	.balign 8
 	.balign 8
 loop:
 loop:
@@ -86,16 +86,16 @@ loop:
 	addi r0, 16, r0
 	addi r0, 16, r0
 	sthi.q r0, -9, r5
 	sthi.q r0, -9, r5
 	mcmpeq.b r4, r63, r6
 	mcmpeq.b r4, r63, r6
-	bnei/u r6, 0, tr1 // shortstring
+	bnei/u r6, 0, tr1 /* shortstring */
 	ldx.q r0, r21, r5
 	ldx.q r0, r21, r5
 	stlo.q r0, -8, r4
 	stlo.q r0, -8, r4
 	sthi.q r0, -1, r4
 	sthi.q r0, -1, r4
 	mcmpeq.b r5, r63, r6
 	mcmpeq.b r5, r63, r6
-	beqi/l r6, 0, tr0 // loop
+	beqi/l r6, 0, tr0 /* loop */
 
 
 	add r5, r63, r4
 	add r5, r63, r4
 	addi r0, 8, r0
 	addi r0, 8, r0
-	blink tr1, r63 // shortstring
+	blink tr1, r63 /* shortstring */
 
 
 	.size	strcpy,.-strcpy
 	.size	strcpy,.-strcpy
 
 

+ 11 - 11
libc/string/xtensa/memcpy.S

@@ -83,7 +83,7 @@ __memcpy_aux:
 	loopnez	a4, 2f
 	loopnez	a4, 2f
 #else
 #else
 	beqz	a4, 2f
 	beqz	a4, 2f
-	add	a7, a3, a4	// a7 = end address for source
+	add	a7, a3, a4	/* a7 = end address for source */
 #endif
 #endif
 1:	l8ui	a6, a3, 0
 1:	l8ui	a6, a3, 0
 	addi	a3, a3, 1
 	addi	a3, a3, 1
@@ -98,7 +98,7 @@ __memcpy_aux:
 /* Destination is unaligned.  */
 /* Destination is unaligned.  */
 
 
 	.align	4
 	.align	4
-.Ldst1mod2: // dst is only byte aligned
+.Ldst1mod2: /* dst is only byte aligned */
 
 
 	/* Do short copies byte-by-byte.  */
 	/* Do short copies byte-by-byte.  */
 	_bltui	a4, 7, .Lbytecopy
 	_bltui	a4, 7, .Lbytecopy
@@ -113,7 +113,7 @@ __memcpy_aux:
 	/* Return to main algorithm if dst is now aligned.  */
 	/* Return to main algorithm if dst is now aligned.  */
 	_bbci.l	a5, 1, .Ldstaligned
 	_bbci.l	a5, 1, .Ldstaligned
 
 
-.Ldst2mod4: // dst has 16-bit alignment
+.Ldst2mod4: /* dst has 16-bit alignment */
 
 
 	/* Do short copies byte-by-byte.  */
 	/* Do short copies byte-by-byte.  */
 	_bltui	a4, 6, .Lbytecopy
 	_bltui	a4, 6, .Lbytecopy
@@ -134,7 +134,7 @@ __memcpy_aux:
 ENTRY (memcpy)
 ENTRY (memcpy)
 	/* a2 = dst, a3 = src, a4 = len */
 	/* a2 = dst, a3 = src, a4 = len */
 
 
-	mov	a5, a2		// copy dst so that a2 is return value
+	mov	a5, a2		/* copy dst so that a2 is return value */
 	_bbsi.l	a2, 0, .Ldst1mod2
 	_bbsi.l	a2, 0, .Ldst1mod2
 	_bbsi.l	a2, 1, .Ldst2mod4
 	_bbsi.l	a2, 1, .Ldst2mod4
 .Ldstaligned:
 .Ldstaligned:
@@ -152,7 +152,7 @@ ENTRY (memcpy)
 #else
 #else
 	beqz	a7, 2f
 	beqz	a7, 2f
 	slli	a8, a7, 4
 	slli	a8, a7, 4
-	add	a8, a8, a3	// a8 = end of last 16B source chunk
+	add	a8, a8, a3	/* a8 = end of last 16B source chunk */
 #endif
 #endif
 1:	l32i	a6, a3, 0
 1:	l32i	a6, a3, 0
 	l32i	a7, a3, 4
 	l32i	a7, a3, 4
@@ -218,18 +218,18 @@ ENTRY (memcpy)
 
 
 	/* Copy 16 bytes per iteration for word-aligned dst and
 	/* Copy 16 bytes per iteration for word-aligned dst and
 	   unaligned src.  */
 	   unaligned src.  */
-	ssa8	a3		// set shift amount from byte offset
+	ssa8	a3		/* set shift amount from byte offset */
 #if UNALIGNED_ADDRESSES_CHECKED
 #if UNALIGNED_ADDRESSES_CHECKED
-	and	a11, a3, a8	// save unalignment offset for below
-	sub	a3, a3, a11	// align a3
+	and	a11, a3, a8	/* save unalignment offset for below */
+	sub	a3, a3, a11	/* align a3 */
 #endif
 #endif
-	l32i	a6, a3, 0	// load first word
+	l32i	a6, a3, 0	/* load first word */
 #if XCHAL_HAVE_LOOPS
 #if XCHAL_HAVE_LOOPS
 	loopnez	a7, 2f
 	loopnez	a7, 2f
 #else
 #else
 	beqz	a7, 2f
 	beqz	a7, 2f
 	slli	a10, a7, 4
 	slli	a10, a7, 4
-	add	a10, a10, a3	// a10 = end of last 16B source chunk
+	add	a10, a10, a3	/* a10 = end of last 16B source chunk */
 #endif
 #endif
 1:	l32i	a7, a3, 4
 1:	l32i	a7, a3, 4
 	l32i	a8, a3, 8
 	l32i	a8, a3, 8
@@ -273,7 +273,7 @@ ENTRY (memcpy)
 	mov	a6, a7
 	mov	a6, a7
 4:
 4:
 #if UNALIGNED_ADDRESSES_CHECKED
 #if UNALIGNED_ADDRESSES_CHECKED
-	add	a3, a3, a11	// readjust a3 with correct misalignment
+	add	a3, a3, a11	/* readjust a3 with correct misalignment */
 #endif
 #endif
 	bbsi.l	a4, 1, 5f
 	bbsi.l	a4, 1, 5f
 	bbsi.l	a4, 0, 6f
 	bbsi.l	a4, 0, 6f

+ 6 - 6
libc/string/xtensa/memset.S

@@ -29,7 +29,7 @@
    The algorithm is as follows:
    The algorithm is as follows:
 
 
    Create a word with c in all byte positions.
    Create a word with c in all byte positions.
-	
+
    If the destination is aligned, set 16B chunks with a loop, and then
    If the destination is aligned, set 16B chunks with a loop, and then
    finish up with 8B, 4B, 2B, and 1B stores conditional on the length.
    finish up with 8B, 4B, 2B, and 1B stores conditional on the length.
 
 
@@ -57,7 +57,7 @@ __memset_aux:
 	loopnez	a4, 2f
 	loopnez	a4, 2f
 #else
 #else
 	beqz	a4, 2f
 	beqz	a4, 2f
-	add	a6, a5, a4	// a6 = ending address
+	add	a6, a5, a4	/* a6 = ending address */
 #endif
 #endif
 1:	s8i	a3, a5, 0
 1:	s8i	a3, a5, 0
 	addi	a5, a5, 1
 	addi	a5, a5, 1
@@ -71,7 +71,7 @@ __memset_aux:
 
 
 	.align	4
 	.align	4
 
 
-.Ldst1mod2: // dst is only byte aligned
+.Ldst1mod2: /* dst is only byte aligned */
 
 
 	/* Do short sizes byte-by-byte.  */
 	/* Do short sizes byte-by-byte.  */
 	bltui	a4, 8, .Lbyteset
 	bltui	a4, 8, .Lbyteset
@@ -84,7 +84,7 @@ __memset_aux:
 	/* Now retest if dst is aligned.  */
 	/* Now retest if dst is aligned.  */
 	_bbci.l	a5, 1, .Ldstaligned
 	_bbci.l	a5, 1, .Ldstaligned
 
 
-.Ldst2mod4: // dst has 16-bit alignment
+.Ldst2mod4: /* dst has 16-bit alignment */
 
 
 	/* Do short sizes byte-by-byte.  */
 	/* Do short sizes byte-by-byte.  */
 	bltui	a4, 8, .Lbyteset
 	bltui	a4, 8, .Lbyteset
@@ -108,7 +108,7 @@ ENTRY (memset)
 	slli	a7, a3, 16
 	slli	a7, a3, 16
 	or	a3, a3, a7
 	or	a3, a3, a7
 
 
-	mov	a5, a2		// copy dst so that a2 is return value
+	mov	a5, a2		/* copy dst so that a2 is return value */
 
 
 	/* Check if dst is unaligned.  */
 	/* Check if dst is unaligned.  */
 	_bbsi.l	a2, 0, .Ldst1mod2
 	_bbsi.l	a2, 0, .Ldst1mod2
@@ -124,7 +124,7 @@ ENTRY (memset)
 #else
 #else
 	beqz	a7, 2f
 	beqz	a7, 2f
 	slli	a6, a7, 4
 	slli	a6, a7, 4
-	add	a6, a6, a5	// a6 = end of last 16B chunk
+	add	a6, a6, a5	/* a6 = end of last 16B chunk */
 #endif
 #endif
 	/* Set 16 bytes per iteration.  */
 	/* Set 16 bytes per iteration.  */
 1:	s32i	a3, a5, 0
 1:	s32i	a3, a5, 0

+ 74 - 74
libc/string/xtensa/strcmp.S

@@ -45,35 +45,35 @@
 ENTRY (strcmp)
 ENTRY (strcmp)
 	/* a2 = s1, a3 = s2 */
 	/* a2 = s1, a3 = s2 */
 
 
-	l8ui	a8, a2, 0	// byte 0 from s1
-	l8ui	a9, a3, 0	// byte 0 from s2
-	movi	a10, 3		// mask
+	l8ui	a8, a2, 0	/* byte 0 from s1 */
+	l8ui	a9, a3, 0	/* byte 0 from s2 */
+	movi	a10, 3		/* mask */
 	bne	a8, a9, .Lretdiff
 	bne	a8, a9, .Lretdiff
 
 
 	or	a11, a2, a3
 	or	a11, a2, a3
 	bnone	a11, a10, .Laligned
 	bnone	a11, a10, .Laligned
 
 
-	xor	a11, a2, a3	// compare low two bits of s1 and s2
-	bany	a11, a10, .Lunaligned	// if they have different alignment
+	xor	a11, a2, a3	/* compare low two bits of s1 and s2 */
+	bany	a11, a10, .Lunaligned	/* if they have different alignment */
 
 
 	/* s1/s2 are not word-aligned.  */
 	/* s1/s2 are not word-aligned.  */
-	addi	a2, a2, 1	// advance s1
-	beqz	a8, .Leq	// bytes equal, if zero, strings are equal
-	addi	a3, a3, 1	// advance s2
-	bnone	a2, a10, .Laligned // if s1/s2 now aligned
-	l8ui	a8, a2, 0	// byte 1 from s1
-	l8ui	a9, a3, 0	// byte 1 from s2
-	addi	a2, a2, 1	// advance s1
-	bne	a8, a9, .Lretdiff // if different, return difference
-	beqz	a8, .Leq	// bytes equal, if zero, strings are equal
-	addi	a3, a3, 1	// advance s2
-	bnone	a2, a10, .Laligned // if s1/s2 now aligned
-	l8ui	a8, a2, 0	// byte 2 from s1
-	l8ui	a9, a3, 0	// byte 2 from s2
-	addi	a2, a2, 1	// advance s1
-	bne	a8, a9, .Lretdiff // if different, return difference
-	beqz	a8, .Leq	// bytes equal, if zero, strings are equal
-	addi	a3, a3, 1	// advance s2
+	addi	a2, a2, 1	/* advance s1 */
+	beqz	a8, .Leq	/* bytes equal, if zero, strings are equal */
+	addi	a3, a3, 1	/* advance s2 */
+	bnone	a2, a10, .Laligned /* if s1/s2 now aligned */
+	l8ui	a8, a2, 0	/* byte 1 from s1 */
+	l8ui	a9, a3, 0	/* byte 1 from s2 */
+	addi	a2, a2, 1	/* advance s1 */
+	bne	a8, a9, .Lretdiff /* if different, return difference */
+	beqz	a8, .Leq	/* bytes equal, if zero, strings are equal */
+	addi	a3, a3, 1	/* advance s2 */
+	bnone	a2, a10, .Laligned /* if s1/s2 now aligned */
+	l8ui	a8, a2, 0	/* byte 2 from s1 */
+	l8ui	a9, a3, 0	/* byte 2 from s2 */
+	addi	a2, a2, 1	/* advance s1 */
+	bne	a8, a9, .Lretdiff /* if different, return difference */
+	beqz	a8, .Leq	/* bytes equal, if zero, strings are equal */
+	addi	a3, a3, 1	/* advance s2 */
 	j	.Laligned
 	j	.Laligned
 
 
 /* s1 and s2 have different alignment.
 /* s1 and s2 have different alignment.
@@ -92,8 +92,8 @@ ENTRY (strcmp)
 	/* (2 mod 4) alignment for loop instruction */
 	/* (2 mod 4) alignment for loop instruction */
 .Lunaligned:
 .Lunaligned:
 #if XCHAL_HAVE_LOOPS
 #if XCHAL_HAVE_LOOPS
-	_movi.n	a8, 0		// set up for the maximum loop count
-	loop	a8, .Lretdiff	// loop forever (almost anyway)
+	_movi.n	a8, 0		/* set up for the maximum loop count */
+	loop	a8, .Lretdiff	/* loop forever (almost anyway) */
 #endif
 #endif
 .Lnextbyte:
 .Lnextbyte:
 	l8ui	a8, a2, 0
 	l8ui	a8, a2, 0
@@ -131,32 +131,32 @@ ENTRY (strcmp)
 #if XCHAL_HAVE_LOOPS
 #if XCHAL_HAVE_LOOPS
 .Laligned:
 .Laligned:
 	.begin	no-transform
 	.begin	no-transform
-	l32r	a4, .Lmask0	// mask for byte 0
+	l32r	a4, .Lmask0	/* mask for byte 0 */
 	l32r	a7, .Lmask4
 	l32r	a7, .Lmask4
 	/* Loop forever.  (a4 is more than than the maximum number
 	/* Loop forever.  (a4 is more than than the maximum number
 	   of iterations) */
 	   of iterations) */
 	loop	a4, .Laligned_done
 	loop	a4, .Laligned_done
 
 
 	/* First unrolled loop body.  */
 	/* First unrolled loop body.  */
-	l32i	a8, a2, 0	// get word from s1
-	l32i	a9, a3, 0	// get word from s2
+	l32i	a8, a2, 0	/* get word from s1 */
+	l32i	a9, a3, 0	/* get word from s2 */
 	slli	a5, a8, 1
 	slli	a5, a8, 1
 	bne	a8, a9, .Lwne2
 	bne	a8, a9, .Lwne2
 	or	a9, a8, a5
 	or	a9, a8, a5
 	bnall	a9, a7, .Lprobeq
 	bnall	a9, a7, .Lprobeq
 
 
 	/* Second unrolled loop body.  */
 	/* Second unrolled loop body.  */
-	l32i	a8, a2, 4	// get word from s1+4
-	l32i	a9, a3, 4	// get word from s2+4
+	l32i	a8, a2, 4	/* get word from s1+4 */
+	l32i	a9, a3, 4	/* get word from s2+4 */
 	slli	a5, a8, 1
 	slli	a5, a8, 1
 	bne	a8, a9, .Lwne2
 	bne	a8, a9, .Lwne2
 	or	a9, a8, a5
 	or	a9, a8, a5
 	bnall	a9, a7, .Lprobeq2
 	bnall	a9, a7, .Lprobeq2
 
 
-	addi	a2, a2, 8	// advance s1 pointer
-	addi	a3, a3, 8	// advance s2 pointer
+	addi	a2, a2, 8	/* advance s1 pointer */
+	addi	a3, a3, 8	/* advance s2 pointer */
 .Laligned_done:
 .Laligned_done:
-	or	a1, a1, a1	// nop
+	or	a1, a1, a1	/* nop */
 
 
 .Lprobeq2:
 .Lprobeq2:
 	/* Adjust pointers to account for the loop unrolling.  */
 	/* Adjust pointers to account for the loop unrolling.  */
@@ -166,15 +166,15 @@ ENTRY (strcmp)
 #else /* !XCHAL_HAVE_LOOPS */
 #else /* !XCHAL_HAVE_LOOPS */
 
 
 .Laligned:
 .Laligned:
-	movi	a4, MASK0	// mask for byte 0
+	movi	a4, MASK0	/* mask for byte 0 */
 	movi	a7, MASK4
 	movi	a7, MASK4
 	j	.Lfirstword
 	j	.Lfirstword
 .Lnextword:
 .Lnextword:
-	addi	a2, a2, 4	// advance s1 pointer
-	addi	a3, a3, 4	// advance s2 pointer
+	addi	a2, a2, 4	/* advance s1 pointer */
+	addi	a3, a3, 4	/* advance s2 pointer */
 .Lfirstword:
 .Lfirstword:
-	l32i	a8, a2, 0	// get word from s1
-	l32i	a9, a3, 0	// get word from s2
+	l32i	a8, a2, 0	/* get word from s1 */
+	l32i	a9, a3, 0	/* get word from s2 */
 	slli	a5, a8, 1
 	slli	a5, a8, 1
 	bne	a8, a9, .Lwne2
 	bne	a8, a9, .Lwne2
 	or	a9, a8, a5
 	or	a9, a8, a5
@@ -186,49 +186,49 @@ ENTRY (strcmp)
 	/* Words are probably equal, but check for sure.
 	/* Words are probably equal, but check for sure.
 	   If not, loop over the rest of string using normal algorithm.  */
 	   If not, loop over the rest of string using normal algorithm.  */
 
 
-	bnone	a8, a4, .Leq	// if byte 0 is zero
-	l32r	a5, .Lmask1	// mask for byte 1
-	l32r	a6, .Lmask2	// mask for byte 2
-	bnone	a8, a5, .Leq	// if byte 1 is zero
-	l32r	a7, .Lmask3	// mask for byte 3
-	bnone	a8, a6, .Leq	// if byte 2 is zero
-	bnone	a8, a7, .Leq	// if byte 3 is zero
-	addi.n	a2, a2, 4	// advance s1 pointer
-	addi.n	a3, a3, 4	// advance s2 pointer
+	bnone	a8, a4, .Leq	/* if byte 0 is zero */
+	l32r	a5, .Lmask1	/* mask for byte 1 */
+	l32r	a6, .Lmask2	/* mask for byte 2 */
+	bnone	a8, a5, .Leq	/* if byte 1 is zero */
+	l32r	a7, .Lmask3	/* mask for byte 3 */
+	bnone	a8, a6, .Leq	/* if byte 2 is zero */
+	bnone	a8, a7, .Leq	/* if byte 3 is zero */
+	addi.n	a2, a2, 4	/* advance s1 pointer */
+	addi.n	a3, a3, 4	/* advance s2 pointer */
 #if XCHAL_HAVE_LOOPS
 #if XCHAL_HAVE_LOOPS
 
 
 	/* align (1 mod 4) */
 	/* align (1 mod 4) */
-	loop	a4, .Leq	// loop forever (a4 is bigger than max iters)
+	loop	a4, .Leq	/* loop forever (a4 is bigger than max iters) */
 	.end	no-transform
 	.end	no-transform
 
 
-	l32i	a8, a2, 0	// get word from s1
-	l32i	a9, a3, 0	// get word from s2
-	addi	a2, a2, 4	// advance s1 pointer
+	l32i	a8, a2, 0	/* get word from s1 */
+	l32i	a9, a3, 0	/* get word from s2 */
+	addi	a2, a2, 4	/* advance s1 pointer */
 	bne	a8, a9, .Lwne
 	bne	a8, a9, .Lwne
-	bnone	a8, a4, .Leq	// if byte 0 is zero
-	bnone	a8, a5, .Leq	// if byte 1 is zero
-	bnone	a8, a6, .Leq	// if byte 2 is zero
-	bnone	a8, a7, .Leq	// if byte 3 is zero
-	addi	a3, a3, 4	// advance s2 pointer
+	bnone	a8, a4, .Leq	/* if byte 0 is zero */
+	bnone	a8, a5, .Leq	/* if byte 1 is zero */
+	bnone	a8, a6, .Leq	/* if byte 2 is zero */
+	bnone	a8, a7, .Leq	/* if byte 3 is zero */
+	addi	a3, a3, 4	/* advance s2 pointer */
 
 
 #else /* !XCHAL_HAVE_LOOPS */
 #else /* !XCHAL_HAVE_LOOPS */
 
 
 	j	.Lfirstword2
 	j	.Lfirstword2
 .Lnextword2:
 .Lnextword2:
-	addi	a3, a3, 4	// advance s2 pointer
+	addi	a3, a3, 4	/* advance s2 pointer */
 .Lfirstword2:
 .Lfirstword2:
-	l32i	a8, a2, 0	// get word from s1
-	l32i	a9, a3, 0	// get word from s2
-	addi	a2, a2, 4	// advance s1 pointer
+	l32i	a8, a2, 0	/* get word from s1 */
+	l32i	a9, a3, 0	/* get word from s2 */
+	addi	a2, a2, 4	/* advance s1 pointer */
 	bne	a8, a9, .Lwne
 	bne	a8, a9, .Lwne
-	bnone	a8, a4, .Leq	// if byte 0 is zero
-	bnone	a8, a5, .Leq	// if byte 1 is zero
-	bnone	a8, a6, .Leq	// if byte 2 is zero
-	bany	a8, a7, .Lnextword2	// if byte 3 is zero
+	bnone	a8, a4, .Leq	/* if byte 0 is zero */
+	bnone	a8, a5, .Leq	/* if byte 1 is zero */
+	bnone	a8, a6, .Leq	/* if byte 2 is zero */
+	bany	a8, a7, .Lnextword2	/* if byte 3 is zero */
 #endif /* !XCHAL_HAVE_LOOPS */
 #endif /* !XCHAL_HAVE_LOOPS */
 
 
 	/* Words are equal; some byte is zero.  */
 	/* Words are equal; some byte is zero.  */
-.Leq:	movi	a2, 0		// return equal
+.Leq:	movi	a2, 0		/* return equal */
 	retw
 	retw
 
 
 .Lwne2:	/* Words are not equal.  On big-endian processors, if none of the
 .Lwne2:	/* Words are not equal.  On big-endian processors, if none of the
@@ -243,18 +243,18 @@ ENTRY (strcmp)
 .Lposreturn:
 .Lposreturn:
 	movi	a2, 1
 	movi	a2, 1
 	retw
 	retw
-.Lsomezero:	// There is probably some zero byte.
+.Lsomezero:	/* There is probably some zero byte. */
 #endif /* __XTENSA_EB__ */
 #endif /* __XTENSA_EB__ */
 .Lwne:	/* Words are not equal.  */
 .Lwne:	/* Words are not equal.  */
-	xor	a2, a8, a9	// get word with nonzero in byte that differs
-	bany	a2, a4, .Ldiff0	// if byte 0 differs
-	movi	a5, MASK1	// mask for byte 1
-	bnone	a8, a4, .Leq	// if byte 0 is zero
-	bany	a2, a5, .Ldiff1	// if byte 1 differs
-	movi	a6, MASK2	// mask for byte 2
-	bnone	a8, a5, .Leq	// if byte 1 is zero
-	bany	a2, a6, .Ldiff2	// if byte 2 differs
-	bnone	a8, a6, .Leq	// if byte 2 is zero
+	xor	a2, a8, a9	/* get word with nonzero in byte that differs */
+	bany	a2, a4, .Ldiff0	/* if byte 0 differs */
+	movi	a5, MASK1	/* mask for byte 1 */
+	bnone	a8, a4, .Leq	/* if byte 0 is zero */
+	bany	a2, a5, .Ldiff1	/* if byte 1 differs */
+	movi	a6, MASK2	/* mask for byte 2 */
+	bnone	a8, a5, .Leq	/* if byte 1 is zero */
+	bany	a2, a6, .Ldiff2	/* if byte 2 differs */
+	bnone	a8, a6, .Leq	/* if byte 2 is zero */
 #ifdef __XTENSA_EB__
 #ifdef __XTENSA_EB__
 .Ldiff3:
 .Ldiff3:
 .Ldiff2:
 .Ldiff2:

+ 36 - 36
libc/string/xtensa/strcpy.S

@@ -36,7 +36,7 @@
 ENTRY (strcpy)
 ENTRY (strcpy)
 	/* a2 = dst, a3 = src */
 	/* a2 = dst, a3 = src */
 
 
-	mov	a10, a2		// leave dst in return value register
+	mov	a10, a2		/* leave dst in return value register */
 	movi	a4, MASK0
 	movi	a4, MASK0
 	movi	a5, MASK1
 	movi	a5, MASK1
 	movi	a6, MASK2
 	movi	a6, MASK2
@@ -51,23 +51,23 @@ ENTRY (strcpy)
 
 
 	j	.Ldstunaligned
 	j	.Ldstunaligned
 
 
-.Lsrc1mod2: // src address is odd
-	l8ui	a8, a3, 0	// get byte 0
-	addi	a3, a3, 1	// advance src pointer
-	s8i	a8, a10, 0	// store byte 0
-	beqz	a8, 1f		// if byte 0 is zero
-	addi	a10, a10, 1	// advance dst pointer
-	bbci.l	a3, 1, .Lsrcaligned // if src is now word-aligned
+.Lsrc1mod2: /* src address is odd */
+	l8ui	a8, a3, 0	/* get byte 0 */
+	addi	a3, a3, 1	/* advance src pointer */
+	s8i	a8, a10, 0	/* store byte 0 */
+	beqz	a8, 1f		/* if byte 0 is zero */
+	addi	a10, a10, 1	/* advance dst pointer */
+	bbci.l	a3, 1, .Lsrcaligned /* if src is now word-aligned */
 
 
-.Lsrc2mod4: // src address is 2 mod 4
-	l8ui	a8, a3, 0	// get byte 0
+.Lsrc2mod4: /* src address is 2 mod 4 */
+	l8ui	a8, a3, 0	/* get byte 0 */
 	/* 1-cycle interlock */
 	/* 1-cycle interlock */
-	s8i	a8, a10, 0	// store byte 0
-	beqz	a8, 1f		// if byte 0 is zero
-	l8ui	a8, a3, 1	// get byte 0
-	addi	a3, a3, 2	// advance src pointer
-	s8i	a8, a10, 1	// store byte 0
-	addi	a10, a10, 2	// advance dst pointer
+	s8i	a8, a10, 0	/* store byte 0 */
+	beqz	a8, 1f		/* if byte 0 is zero */
+	l8ui	a8, a3, 1	/* get byte 0 */
+	addi	a3, a3, 2	/* advance src pointer */
+	s8i	a8, a10, 1	/* store byte 0 */
+	addi	a10, a10, 2	/* advance dst pointer */
 	bnez	a8, .Lsrcaligned
 	bnez	a8, .Lsrcaligned
 1:	retw
 1:	retw
 
 
@@ -78,28 +78,28 @@ ENTRY (strcpy)
 #if XCHAL_HAVE_LOOPS
 #if XCHAL_HAVE_LOOPS
 	/* (2 mod 4) alignment for loop instruction */
 	/* (2 mod 4) alignment for loop instruction */
 .Laligned:
 .Laligned:
-	_movi.n	a8, 0		// set up for the maximum loop count
-	loop	a8, .Lz3	// loop forever (almost anyway)
-	l32i	a8, a3, 0	// get word from src
-	addi	a3, a3, 4	// advance src pointer
-	bnone	a8, a4, .Lz0	// if byte 0 is zero
-	bnone	a8, a5, .Lz1	// if byte 1 is zero
-	bnone	a8, a6, .Lz2	// if byte 2 is zero
-	s32i	a8, a10, 0	// store word to dst
-	bnone	a8, a7, .Lz3	// if byte 3 is zero
-	addi	a10, a10, 4	// advance dst pointer
+	_movi.n	a8, 0		/* set up for the maximum loop count */
+	loop	a8, .Lz3	/* loop forever (almost anyway) */
+	l32i	a8, a3, 0	/* get word from src */
+	addi	a3, a3, 4	/* advance src pointer */
+	bnone	a8, a4, .Lz0	/* if byte 0 is zero */
+	bnone	a8, a5, .Lz1	/* if byte 1 is zero */
+	bnone	a8, a6, .Lz2	/* if byte 2 is zero */
+	s32i	a8, a10, 0	/* store word to dst */
+	bnone	a8, a7, .Lz3	/* if byte 3 is zero */
+	addi	a10, a10, 4	/* advance dst pointer */
 
 
 #else /* !XCHAL_HAVE_LOOPS */
 #else /* !XCHAL_HAVE_LOOPS */
 
 
-1:	addi	a10, a10, 4	// advance dst pointer
+1:	addi	a10, a10, 4	/* advance dst pointer */
 .Laligned:
 .Laligned:
-	l32i	a8, a3, 0	// get word from src
-	addi	a3, a3, 4	// advance src pointer
-	bnone	a8, a4, .Lz0	// if byte 0 is zero
-	bnone	a8, a5, .Lz1	// if byte 1 is zero
-	bnone	a8, a6, .Lz2	// if byte 2 is zero
-	s32i	a8, a10, 0	// store word to dst
-	bany	a8, a7, 1b	// if byte 3 is zero
+	l32i	a8, a3, 0	/* get word from src */
+	addi	a3, a3, 4	/* advance src pointer */
+	bnone	a8, a4, .Lz0	/* if byte 0 is zero */
+	bnone	a8, a5, .Lz1	/* if byte 1 is zero */
+	bnone	a8, a6, .Lz2	/* if byte 2 is zero */
+	s32i	a8, a10, 0	/* store word to dst */
+	bany	a8, a7, 1b	/* if byte 3 is zero */
 #endif /* !XCHAL_HAVE_LOOPS */
 #endif /* !XCHAL_HAVE_LOOPS */
 
 
 .Lz3:	/* Byte 3 is zero.  */
 .Lz3:	/* Byte 3 is zero.  */
@@ -133,8 +133,8 @@ ENTRY (strcpy)
 .Ldstunaligned:
 .Ldstunaligned:
 
 
 #if XCHAL_HAVE_LOOPS
 #if XCHAL_HAVE_LOOPS
-	_movi.n	a8, 0		// set up for the maximum loop count
-	loop	a8, 2f		// loop forever (almost anyway)
+	_movi.n	a8, 0		/* set up for the maximum loop count */
+	loop	a8, 2f		/* loop forever (almost anyway) */
 #endif
 #endif
 1:	l8ui	a8, a3, 0
 1:	l8ui	a8, a3, 0
 	addi	a3, a3, 1
 	addi	a3, a3, 1

+ 28 - 28
libc/string/xtensa/strlen.S

@@ -36,7 +36,7 @@
 ENTRY (strlen)
 ENTRY (strlen)
 	/* a2 = s */
 	/* a2 = s */
 
 
-	addi	a3, a2, -4	// because we overincrement at the end
+	addi	a3, a2, -4	/* because we overincrement at the end */
 	movi	a4, MASK0
 	movi	a4, MASK0
 	movi	a5, MASK1
 	movi	a5, MASK1
 	movi	a6, MASK2
 	movi	a6, MASK2
@@ -45,21 +45,21 @@ ENTRY (strlen)
 	bbsi.l	a2, 1, .L2mod4
 	bbsi.l	a2, 1, .L2mod4
 	j	.Laligned
 	j	.Laligned
 
 
-.L1mod2: // address is odd
-	l8ui	a8, a3, 4	// get byte 0
-	addi	a3, a3, 1	// advance string pointer
-	beqz	a8, .Lz3	// if byte 0 is zero
-	bbci.l	a3, 1, .Laligned // if string pointer is now word-aligned
+.L1mod2: /* address is odd */
+	l8ui	a8, a3, 4	/* get byte 0 */
+	addi	a3, a3, 1	/* advance string pointer */
+	beqz	a8, .Lz3	/* if byte 0 is zero */
+	bbci.l	a3, 1, .Laligned /* if string pointer is now word-aligned */
 
 
-.L2mod4: // address is 2 mod 4
-	addi	a3, a3, 2	// advance ptr for aligned access
-	l32i	a8, a3, 0	// get word with first two bytes of string
-	bnone	a8, a6, .Lz2	// if byte 2 (of word, not string) is zero
-	bany	a8, a7, .Laligned // if byte 3 (of word, not string) is nonzero
+.L2mod4: /* address is 2 mod 4 */
+	addi	a3, a3, 2	/* advance ptr for aligned access */
+	l32i	a8, a3, 0	/* get word with first two bytes of string */
+	bnone	a8, a6, .Lz2	/* if byte 2 (of word, not string) is zero */
+	bany	a8, a7, .Laligned /* if byte 3 (of word, not string) is nonzero */
 
 
 	/* Byte 3 is zero.  */
 	/* Byte 3 is zero.  */
-	addi	a3, a3, 3	// point to zero byte
-	sub	a2, a3, a2	// subtract to get length
+	addi	a3, a3, 3	/* point to zero byte */
+	sub	a2, a3, a2	/* subtract to get length */
 	retw
 	retw
 
 
 
 
@@ -69,36 +69,36 @@ ENTRY (strlen)
 	/* (2 mod 4) alignment for loop instruction */
 	/* (2 mod 4) alignment for loop instruction */
 .Laligned:
 .Laligned:
 #if XCHAL_HAVE_LOOPS
 #if XCHAL_HAVE_LOOPS
-	_movi.n	a8, 0		// set up for the maximum loop count
-	loop	a8, .Lz3	// loop forever (almost anyway)
+	_movi.n	a8, 0		/* set up for the maximum loop count */
+	loop	a8, .Lz3	/* loop forever (almost anyway) */
 #endif
 #endif
-1:	l32i	a8, a3, 4	// get next word of string
-	addi	a3, a3, 4	// advance string pointer
-	bnone	a8, a4, .Lz0	// if byte 0 is zero
-	bnone	a8, a5, .Lz1	// if byte 1 is zero
-	bnone	a8, a6, .Lz2	// if byte 2 is zero
+1:	l32i	a8, a3, 4	/* get next word of string */
+	addi	a3, a3, 4	/* advance string pointer */
+	bnone	a8, a4, .Lz0	/* if byte 0 is zero */
+	bnone	a8, a5, .Lz1	/* if byte 1 is zero */
+	bnone	a8, a6, .Lz2	/* if byte 2 is zero */
 #if XCHAL_HAVE_LOOPS
 #if XCHAL_HAVE_LOOPS
-	bnone	a8, a7, .Lz3	// if byte 3 is zero
+	bnone	a8, a7, .Lz3	/* if byte 3 is zero */
 #else
 #else
-	bany	a8, a7, 1b	// repeat if byte 3 is non-zero
+	bany	a8, a7, 1b	/* repeat if byte 3 is non-zero */
 #endif
 #endif
 
 
 .Lz3:	/* Byte 3 is zero.  */
 .Lz3:	/* Byte 3 is zero.  */
-	addi	a3, a3, 3	// point to zero byte
+	addi	a3, a3, 3	/* point to zero byte */
 	/* Fall through....  */
 	/* Fall through....  */
 
 
 .Lz0:	/* Byte 0 is zero.  */
 .Lz0:	/* Byte 0 is zero.  */
-	sub	a2, a3, a2	// subtract to get length
+	sub	a2, a3, a2	/* subtract to get length */
 	retw
 	retw
 
 
 .Lz1:	/* Byte 1 is zero.  */
 .Lz1:	/* Byte 1 is zero.  */
-	addi	a3, a3, 1	// point to zero byte
-	sub	a2, a3, a2	// subtract to get length
+	addi	a3, a3, 1	/* point to zero byte */
+	sub	a2, a3, a2	/* subtract to get length */
 	retw
 	retw
 
 
 .Lz2:	/* Byte 2 is zero.  */
 .Lz2:	/* Byte 2 is zero.  */
-	addi	a3, a3, 2	// point to zero byte
-	sub	a2, a3, a2	// subtract to get length
+	addi	a3, a3, 2	/* point to zero byte */
+	sub	a2, a3, a2	/* subtract to get length */
 	retw
 	retw
 
 
 libc_hidden_def (strlen)
 libc_hidden_def (strlen)

+ 75 - 75
libc/string/xtensa/strncpy.S

@@ -41,29 +41,29 @@
 	.literal_position
 	.literal_position
 __strncpy_aux:
 __strncpy_aux:
 
 
-.Lsrc1mod2: // src address is odd
-	l8ui	a8, a3, 0	// get byte 0
-	addi	a3, a3, 1	// advance src pointer
-	s8i	a8, a10, 0	// store byte 0
-	addi	a4, a4, -1	// decrement n
-	beqz    a4, .Lret       // if n is zero
-	addi	a10, a10, 1	// advance dst pointer
-	beqz	a8, .Lfill	// if byte 0 is zero
-	bbci.l	a3, 1, .Lsrcaligned // if src is now word-aligned
-
-.Lsrc2mod4: // src address is 2 mod 4
-	l8ui	a8, a3, 0	// get byte 0
-	addi	a4, a4, -1	// decrement n
-	s8i	a8, a10, 0	// store byte 0
-	beqz    a4, .Lret       // if n is zero
-	addi	a10, a10, 1	// advance dst pointer
-	beqz	a8, .Lfill	// if byte 0 is zero
-	l8ui	a8, a3, 1	// get byte 0
-	addi	a3, a3, 2	// advance src pointer
-	s8i	a8, a10, 0	// store byte 0
-	addi	a4, a4, -1	// decrement n
-	beqz    a4, .Lret       // if n is zero
-	addi	a10, a10, 1	// advance dst pointer
+.Lsrc1mod2: /* src address is odd */
+	l8ui	a8, a3, 0	/* get byte 0 */
+	addi	a3, a3, 1	/* advance src pointer */
+	s8i	a8, a10, 0	/* store byte 0 */
+	addi	a4, a4, -1	/* decrement n */
+	beqz    a4, .Lret       /* if n is zero */
+	addi	a10, a10, 1	/* advance dst pointer */
+	beqz	a8, .Lfill	/* if byte 0 is zero */
+	bbci.l	a3, 1, .Lsrcaligned /* if src is now word-aligned */
+
+.Lsrc2mod4: /* src address is 2 mod 4 */
+	l8ui	a8, a3, 0	/* get byte 0 */
+	addi	a4, a4, -1	/* decrement n */
+	s8i	a8, a10, 0	/* store byte 0 */
+	beqz    a4, .Lret       /* if n is zero */
+	addi	a10, a10, 1	/* advance dst pointer */
+	beqz	a8, .Lfill	/* if byte 0 is zero */
+	l8ui	a8, a3, 1	/* get byte 0 */
+	addi	a3, a3, 2	/* advance src pointer */
+	s8i	a8, a10, 0	/* store byte 0 */
+	addi	a4, a4, -1	/* decrement n */
+	beqz    a4, .Lret       /* if n is zero */
+	addi	a10, a10, 1	/* advance dst pointer */
 	bnez	a8, .Lsrcaligned
 	bnez	a8, .Lsrcaligned
 	j	.Lfill
 	j	.Lfill
 
 
@@ -74,8 +74,8 @@ __strncpy_aux:
 ENTRY (strncpy)
 ENTRY (strncpy)
 	/* a2 = dst, a3 = src */
 	/* a2 = dst, a3 = src */
 
 
-	mov	a10, a2		// leave dst in return value register
-	beqz    a4, .Lret       // if n is zero
+	mov	a10, a2		/* leave dst in return value register */
+	beqz    a4, .Lret       /* if n is zero */
 
 
 	movi	a11, MASK0
 	movi	a11, MASK0
 	movi	a5, MASK1
 	movi	a5, MASK1
@@ -125,28 +125,28 @@ ENTRY (strncpy)
 
 
 .Lfillcleanup:
 .Lfillcleanup:
 	/* Fill leftover (1 to 3) bytes with zero.  */
 	/* Fill leftover (1 to 3) bytes with zero.  */
-	s8i	a9, a10, 0	// store byte 0
-	addi	a4, a4, -1	// decrement n
+	s8i	a9, a10, 0	/* store byte 0 */
+	addi	a4, a4, -1	/* decrement n */
 	addi	a10, a10, 1
 	addi	a10, a10, 1
-	bnez    a4, .Lfillcleanup 
+	bnez    a4, .Lfillcleanup
 
 
 2:	retw
 2:	retw
-	
-.Lfill1mod2: // dst address is odd
-	s8i	a9, a10, 0	// store byte 0
-	addi	a4, a4, -1	// decrement n
-	beqz    a4, 2b		// if n is zero
-	addi    a10, a10, 1	// advance dst pointer
-	bbci.l	a10, 1, .Lfillaligned // if dst is now word-aligned
-
-.Lfill2mod4: // dst address is 2 mod 4
-	s8i	a9, a10, 0	// store byte 0
-	addi	a4, a4, -1	// decrement n
-	beqz    a4, 2b		// if n is zero
-	s8i	a9, a10, 1	// store byte 1
-	addi	a4, a4, -1	// decrement n
-	beqz    a4, 2b		// if n is zero
-	addi    a10, a10, 2	// advance dst pointer
+
+.Lfill1mod2: /* dst address is odd */
+	s8i	a9, a10, 0	/* store byte 0 */
+	addi	a4, a4, -1	/* decrement n */
+	beqz    a4, 2b		/* if n is zero */
+	addi    a10, a10, 1	/* advance dst pointer */
+	bbci.l	a10, 1, .Lfillaligned /* if dst is now word-aligned */
+
+.Lfill2mod4: /* dst address is 2 mod 4 */
+	s8i	a9, a10, 0	/* store byte 0 */
+	addi	a4, a4, -1	/* decrement n */
+	beqz    a4, 2b		/* if n is zero */
+	s8i	a9, a10, 1	/* store byte 1 */
+	addi	a4, a4, -1	/* decrement n */
+	beqz    a4, 2b		/* if n is zero */
+	addi    a10, a10, 2	/* advance dst pointer */
 	j	.Lfillaligned
 	j	.Lfillaligned
 
 
 
 
@@ -156,32 +156,32 @@ ENTRY (strncpy)
 	/* (2 mod 4) alignment for loop instruction */
 	/* (2 mod 4) alignment for loop instruction */
 .Laligned:
 .Laligned:
 #if XCHAL_HAVE_LOOPS
 #if XCHAL_HAVE_LOOPS
-	_movi.n	a8, 0		// set up for the maximum loop count
-	loop	a8, 1f		// loop forever (almost anyway)
-	blti	a4, 5, .Ldstunaligned // n is near limit; do one at a time
-	l32i	a8, a3, 0	// get word from src
-	addi	a3, a3, 4	// advance src pointer
-	bnone	a8, a11, .Lz0	// if byte 0 is zero
-	bnone	a8, a5, .Lz1	// if byte 1 is zero
-	bnone	a8, a6, .Lz2	// if byte 2 is zero
-	s32i	a8, a10, 0	// store word to dst
-	addi	a4, a4, -4	// decrement n
-	addi	a10, a10, 4	// advance dst pointer
-	bnone	a8, a7, .Lfill	// if byte 3 is zero
-1:	
+	_movi.n	a8, 0		/* set up for the maximum loop count */
+	loop	a8, 1f		/* loop forever (almost anyway) */
+	blti	a4, 5, .Ldstunaligned /* n is near limit; do one at a time */
+	l32i	a8, a3, 0	/* get word from src */
+	addi	a3, a3, 4	/* advance src pointer */
+	bnone	a8, a11, .Lz0	/* if byte 0 is zero */
+	bnone	a8, a5, .Lz1	/* if byte 1 is zero */
+	bnone	a8, a6, .Lz2	/* if byte 2 is zero */
+	s32i	a8, a10, 0	/* store word to dst */
+	addi	a4, a4, -4	/* decrement n */
+	addi	a10, a10, 4	/* advance dst pointer */
+	bnone	a8, a7, .Lfill	/* if byte 3 is zero */
+1:
 
 
 #else /* !XCHAL_HAVE_LOOPS */
 #else /* !XCHAL_HAVE_LOOPS */
 
 
-1:	blti	a4, 5, .Ldstunaligned // n is near limit; do one at a time
-	l32i	a8, a3, 0	// get word from src
-	addi	a3, a3, 4	// advance src pointer
-	bnone	a8, a11, .Lz0	// if byte 0 is zero
-	bnone	a8, a5, .Lz1	// if byte 1 is zero
-	bnone	a8, a6, .Lz2	// if byte 2 is zero
-	s32i	a8, a10, 0	// store word to dst
-	addi	a4, a4, -4	// decrement n
-	addi	a10, a10, 4	// advance dst pointer
-	bany	a8, a7, 1b	// no zeroes
+1:	blti	a4, 5, .Ldstunaligned /* n is near limit; do one at a time */
+	l32i	a8, a3, 0	/* get word from src */
+	addi	a3, a3, 4	/* advance src pointer */
+	bnone	a8, a11, .Lz0	/* if byte 0 is zero */
+	bnone	a8, a5, .Lz1	/* if byte 1 is zero */
+	bnone	a8, a6, .Lz2	/* if byte 2 is zero */
+	s32i	a8, a10, 0	/* store word to dst */
+	addi	a4, a4, -4	/* decrement n */
+	addi	a10, a10, 4	/* advance dst pointer */
+	bany	a8, a7, 1b	/* no zeroes */
 #endif /* !XCHAL_HAVE_LOOPS */
 #endif /* !XCHAL_HAVE_LOOPS */
 
 
 	j	.Lfill
 	j	.Lfill
@@ -191,8 +191,8 @@ ENTRY (strncpy)
 	movi	a8, 0
 	movi	a8, 0
 #endif
 #endif
 	s8i	a8, a10, 0
 	s8i	a8, a10, 0
-	addi	a4, a4, -1	// decrement n
-	addi	a10, a10, 1	// advance dst pointer
+	addi	a4, a4, -1	/* decrement n */
+	addi	a10, a10, 1	/* advance dst pointer */
 	j	.Lfill
 	j	.Lfill
 
 
 .Lz1:	/* Byte 1 is zero.  */
 .Lz1:	/* Byte 1 is zero.  */
@@ -200,8 +200,8 @@ ENTRY (strncpy)
         extui   a8, a8, 16, 16
         extui   a8, a8, 16, 16
 #endif
 #endif
 	s16i	a8, a10, 0
 	s16i	a8, a10, 0
-	addi	a4, a4, -2	// decrement n
-	addi	a10, a10, 2	// advance dst pointer
+	addi	a4, a4, -2	/* decrement n */
+	addi	a10, a10, 2	/* advance dst pointer */
 	j	.Lfill
 	j	.Lfill
 
 
 .Lz2:	/* Byte 2 is zero.  */
 .Lz2:	/* Byte 2 is zero.  */
@@ -211,8 +211,8 @@ ENTRY (strncpy)
 	s16i	a8, a10, 0
 	s16i	a8, a10, 0
 	movi	a8, 0
 	movi	a8, 0
 	s8i	a8, a10, 2
 	s8i	a8, a10, 2
-	addi	a4, a4, -3	// decrement n
-	addi	a10, a10, 3	// advance dst pointer
+	addi	a4, a4, -3	/* decrement n */
+	addi	a10, a10, 3	/* advance dst pointer */
 	j	.Lfill
 	j	.Lfill
 
 
 	.align	4
 	.align	4
@@ -220,8 +220,8 @@ ENTRY (strncpy)
 .Ldstunaligned:
 .Ldstunaligned:
 
 
 #if XCHAL_HAVE_LOOPS
 #if XCHAL_HAVE_LOOPS
-	_movi.n	a8, 0		// set up for the maximum loop count
-	loop	a8, 2f		// loop forever (almost anyway)
+	_movi.n	a8, 0		/* set up for the maximum loop count */
+	loop	a8, 2f		/* loop forever (almost anyway) */
 #endif
 #endif
 1:	l8ui	a8, a3, 0
 1:	l8ui	a8, a3, 0
 	addi	a3, a3, 1
 	addi	a3, a3, 1

+ 13 - 13
libc/sysdeps/linux/bfin/__longjmp.S

@@ -15,7 +15,7 @@
 ___longjmp:
 ___longjmp:
 	P0 = R0;
 	P0 = R0;
 	R0 = [P0 + 0x00];
 	R0 = [P0 + 0x00];
-	[--SP] = R0;		// Put P0 on the stack
+	[--SP] = R0;		/* Put P0 on the stack */
 
 
 	P1 = [P0 + 0x04];
 	P1 = [P0 + 0x04];
 	P2 = [P0 + 0x08];
 	P2 = [P0 + 0x08];
@@ -24,12 +24,12 @@ ___longjmp:
 	P5 = [P0 + 0x14];
 	P5 = [P0 + 0x14];
 
 
 	FP = [P0 + 0x18];
 	FP = [P0 + 0x18];
-	R0 = [SP++];		// Grab P0 from old stack
-	SP = [P0 + 0x1C];	// Update Stack Pointer
-	[--SP] = R0;		// Put P0 on new stack
-	[--SP] = R1;		// Put VAL arg on new stack
+	R0 = [SP++];		/* Grab P0 from old stack */
+	SP = [P0 + 0x1C];	/* Update Stack Pointer */
+	[--SP] = R0;		/* Put P0 on new stack */
+	[--SP] = R1;		/* Put VAL arg on new stack */
 
 
-	R0 = [P0 + 0x20];	// Data Registers
+	R0 = [P0 + 0x20];	/* Data Registers */
 	R1 = [P0 + 0x24];
 	R1 = [P0 + 0x24];
 	R2 = [P0 + 0x28];
 	R2 = [P0 + 0x28];
 	R3 = [P0 + 0x2C];
 	R3 = [P0 + 0x2C];
@@ -41,12 +41,12 @@ ___longjmp:
 	R0 = [P0 + 0x40];
 	R0 = [P0 + 0x40];
 	ASTAT = R0;
 	ASTAT = R0;
 
 
-	R0 = [P0 + 0x44];	// Loop Counters
+	R0 = [P0 + 0x44];	/* Loop Counters */
 	LC0 = R0;
 	LC0 = R0;
 	R0 = [P0 + 0x48];
 	R0 = [P0 + 0x48];
 	LC1 = R0;
 	LC1 = R0;
 
 
-	R0 = [P0 + 0x4C];	// Accumulators
+	R0 = [P0 + 0x4C];	/* Accumulators */
 	A0.W = R0;
 	A0.W = R0;
 	R0 = [P0 + 0x50];
 	R0 = [P0 + 0x50];
 	A0.X = R0;
 	A0.X = R0;
@@ -55,7 +55,7 @@ ___longjmp:
 	R0 = [P0 + 0x58];
 	R0 = [P0 + 0x58];
 	A1.X = R0;
 	A1.X = R0;
 
 
-	R0 = [P0 + 0x5C];	// Index Registers
+	R0 = [P0 + 0x5C];	/* Index Registers */
 	I0 = R0;
 	I0 = R0;
 	R0 = [P0 + 0x60];
 	R0 = [P0 + 0x60];
 	I1 = R0;
 	I1 = R0;
@@ -64,7 +64,7 @@ ___longjmp:
 	R0 = [P0 + 0x68];
 	R0 = [P0 + 0x68];
 	I3 = R0;
 	I3 = R0;
 
 
-	R0 = [P0 + 0x6C];	// Modifier Registers
+	R0 = [P0 + 0x6C];	/* Modifier Registers */
 	M0 = R0;
 	M0 = R0;
 	R0 = [P0 + 0x70];
 	R0 = [P0 + 0x70];
 	M1 = R0;
 	M1 = R0;
@@ -73,7 +73,7 @@ ___longjmp:
 	R0 = [P0 + 0x78];
 	R0 = [P0 + 0x78];
 	M3 = R0;
 	M3 = R0;
 
 
-	R0 = [P0 + 0x7C];	// Length Registers
+	R0 = [P0 + 0x7C];	/* Length Registers */
 	L0 = R0;
 	L0 = R0;
 	R0 = [P0 + 0x80];
 	R0 = [P0 + 0x80];
 	L1 = R0;
 	L1 = R0;
@@ -82,7 +82,7 @@ ___longjmp:
 	R0 = [P0 + 0x88];
 	R0 = [P0 + 0x88];
 	L3 = R0;
 	L3 = R0;
 
 
-	R0 = [P0 + 0x8C];	// Base Registers
+	R0 = [P0 + 0x8C];	/* Base Registers */
 	B0 = R0;
 	B0 = R0;
 	R0 = [P0 + 0x90];
 	R0 = [P0 + 0x90];
 	B1 = R0;
 	B1 = R0;
@@ -91,7 +91,7 @@ ___longjmp:
 	R0 = [P0 + 0x98];
 	R0 = [P0 + 0x98];
 	B3 = R0;
 	B3 = R0;
 
 
-	R0 = [P0 + 0x9C];	// Return Address (PC)
+	R0 = [P0 + 0x9C];	/* Return Address (PC) */
 	RETS = R0;
 	RETS = R0;
 
 
 	R0 = [SP++];
 	R0 = [SP++];

+ 11 - 11
libc/sysdeps/linux/bfin/bsd-_setjmp.S

@@ -12,20 +12,20 @@
 .align 4;
 .align 4;
 
 
 __setjmp:
 __setjmp:
-	[--SP] = P0;	// Save P0
+	[--SP] = P0;	/* Save P0 */
 	P0 = R0;
 	P0 = R0;
 	R0 = [SP++];
 	R0 = [SP++];
-	[P0 + 0x00] = R0;	// Save saved P0
+	[P0 + 0x00] = R0;	/* Save saved P0 */
 	[P0 + 0x04] = P1;
 	[P0 + 0x04] = P1;
 	[P0 + 0x08] = P2;
 	[P0 + 0x08] = P2;
 	[P0 + 0x0C] = P3;
 	[P0 + 0x0C] = P3;
 	[P0 + 0x10] = P4;
 	[P0 + 0x10] = P4;
 	[P0 + 0x14] = P5;
 	[P0 + 0x14] = P5;
 
 
-	[P0 + 0x18] = FP;	// Frame Pointer
-	[P0 + 0x1C] = SP;	// Stack Pointer
+	[P0 + 0x18] = FP;	/* Frame Pointer */
+	[P0 + 0x1C] = SP;	/* Stack Pointer */
 
 
-	[P0 + 0x20] = P0;	// Data Registers
+	[P0 + 0x20] = P0;	/* Data Registers */
 	[P0 + 0x24] = R1;
 	[P0 + 0x24] = R1;
 	[P0 + 0x28] = R2;
 	[P0 + 0x28] = R2;
 	[P0 + 0x2C] = R3;
 	[P0 + 0x2C] = R3;
@@ -37,12 +37,12 @@ __setjmp:
 	R0 = ASTAT;
 	R0 = ASTAT;
 	[P0 + 0x40] = R0;
 	[P0 + 0x40] = R0;
 
 
-	R0 = LC0;		// Loop Counters
+	R0 = LC0;		/* Loop Counters */
 	[P0 + 0x44] = R0;
 	[P0 + 0x44] = R0;
 	R0 = LC1;
 	R0 = LC1;
 	[P0 + 0x48] = R0;
 	[P0 + 0x48] = R0;
 
 
-	R0 = A0.W;		// Accumulators
+	R0 = A0.W;		/* Accumulators */
 	[P0 + 0x4C] = R0;
 	[P0 + 0x4C] = R0;
 	R0 = A0.X;
 	R0 = A0.X;
 	[P0 + 0x50] = R0;
 	[P0 + 0x50] = R0;
@@ -51,7 +51,7 @@ __setjmp:
 	R0 = A1.X;
 	R0 = A1.X;
 	[P0 + 0x58] = R0;
 	[P0 + 0x58] = R0;
 
 
-	R0 = I0;		// Index Registers
+	R0 = I0;		/* Index Registers */
 	[P0 + 0x5C] = R0;
 	[P0 + 0x5C] = R0;
 	R0 = I1;
 	R0 = I1;
 	[P0 + 0x60] = R0;
 	[P0 + 0x60] = R0;
@@ -60,7 +60,7 @@ __setjmp:
 	R0 = I3;
 	R0 = I3;
 	[P0 + 0x68] = R0;
 	[P0 + 0x68] = R0;
 
 
-	R0 = M0;		// Modifier Registers
+	R0 = M0;		/* Modifier Registers */
 	[P0 + 0x6C] = R0;
 	[P0 + 0x6C] = R0;
 	R0 = M1;
 	R0 = M1;
 	[P0 + 0x70] = R0;
 	[P0 + 0x70] = R0;
@@ -69,7 +69,7 @@ __setjmp:
 	R0 = M3;
 	R0 = M3;
 	[P0 + 0x78] = R0;
 	[P0 + 0x78] = R0;
 
 
-	R0 = L0;		// Length Registers
+	R0 = L0;		/* Length Registers */
 	[P0 + 0x7c] = R0;
 	[P0 + 0x7c] = R0;
 	R0 = L1;
 	R0 = L1;
 	[P0 + 0x80] = R0;
 	[P0 + 0x80] = R0;
@@ -78,7 +78,7 @@ __setjmp:
 	R0 = L3;
 	R0 = L3;
 	[P0 + 0x88] = R0;
 	[P0 + 0x88] = R0;
 
 
-	R0 = B0;		// Base Registers
+	R0 = B0;		/* Base Registers */
 	[P0 + 0x8C] = R0;
 	[P0 + 0x8C] = R0;
 	R0 = B1;
 	R0 = B1;
 	[P0 + 0x90] = R0;
 	[P0 + 0x90] = R0;

+ 1 - 1
libc/sysdeps/linux/common/bits/uClibc_errno.h

@@ -9,7 +9,7 @@
 #ifdef IS_IN_rtld
 #ifdef IS_IN_rtld
 # undef errno
 # undef errno
 # define errno _dl_errno
 # define errno _dl_errno
-extern int _dl_errno; // attribute_hidden;
+extern int _dl_errno; /* attribute_hidden; */
 #elif defined __UCLIBC_HAS_THREADS__
 #elif defined __UCLIBC_HAS_THREADS__
 # include <tls.h>
 # include <tls.h>
 # if defined USE___THREAD && USE___THREAD
 # if defined USE___THREAD && USE___THREAD

+ 1 - 1
libc/sysdeps/linux/common/llseek.c

@@ -40,4 +40,4 @@ loff_t __libc_lseek64(int fd, loff_t offset, int whence)
 libc_hidden_proto(lseek64)
 libc_hidden_proto(lseek64)
 weak_alias(__libc_lseek64,lseek64)
 weak_alias(__libc_lseek64,lseek64)
 libc_hidden_weak(lseek64)
 libc_hidden_weak(lseek64)
-//strong_alias(__libc_lseek64,_llseek)
+/*strong_alias(__libc_lseek64,_llseek) */

+ 1 - 1
libc/sysdeps/linux/e1/crt1.c

@@ -26,7 +26,7 @@
  * is linking when the main() function is in a static library (.a)
  * is linking when the main() function is in a static library (.a)
  * we can be sure that main() actually gets linked in */
  * we can be sure that main() actually gets linked in */
 extern void main(int argc,void *argv,void *envp);
 extern void main(int argc,void *argv,void *envp);
-//void (*mainp)(int argc,void *argv,void *envp) = main;
+/* void (*mainp)(int argc,void *argv,void *envp) = main; */
 
 
 void __uClibc_main(int argc,void *argv,void *envp);
 void __uClibc_main(int argc,void *argv,void *envp);
 
 

+ 37 - 37
libc/sysdeps/linux/ia64/__longjmp.S

@@ -44,18 +44,18 @@
 LEAF(__longjmp)
 LEAF(__longjmp)
 	alloc r8=ar.pfs,2,1,0,0
 	alloc r8=ar.pfs,2,1,0,0
 	mov r27=ar.rsc
 	mov r27=ar.rsc
-	add r2=0x98,in0		// r2 <- &jmpbuf.orig_jmp_buf_addr
+	add r2=0x98,in0		/* r2 <- &jmpbuf.orig_jmp_buf_addr */
 	;;
 	;;
-	ld8 r8=[r2],-16		// r8 <- orig_jmp_buf_addr
+	ld8 r8=[r2],-16		/* r8 <- orig_jmp_buf_addr */
 	mov r10=ar.bsp
 	mov r10=ar.bsp
-	and r11=~0x3,r27	// clear ar.rsc.mode
+	and r11=~0x3,r27	/* clear ar.rsc.mode */
 	;;
 	;;
-	flushrs			// flush dirty regs to backing store (must be first in insn grp)
-	ld8 r23=[r2],8		// r23 <- jmpbuf.ar_bsp
-	sub r8=r8,in0		// r8 <- &orig_jmpbuf - &jmpbuf
+	flushrs			/* flush dirty regs to backing store (must be first in insn grp) */
+	ld8 r23=[r2],8		/* r23 <- jmpbuf.ar_bsp */
+	sub r8=r8,in0		/* r8 <- &orig_jmpbuf - &jmpbuf */
 	;;
 	;;
-	ld8 r25=[r2]		// r25 <- jmpbuf.ar_unat
-	extr.u r8=r8,3,6	// r8 <- (&orig_jmpbuf - &jmpbuf)/8 & 0x3f
+	ld8 r25=[r2]		/* r25 <- jmpbuf.ar_unat */
+	extr.u r8=r8,3,6	/* r8 <- (&orig_jmpbuf - &jmpbuf)/8 & 0x3f */
 	;;
 	;;
 	cmp.lt pNeg,pPos=r8,r0
 	cmp.lt pNeg,pPos=r8,r0
 	mov r2=in0
 	mov r2=in0
@@ -65,49 +65,49 @@ LEAF(__longjmp)
 (pPos)	sub r17=64,r8
 (pPos)	sub r17=64,r8
 (pNeg)	sub r17=r0,r8
 (pNeg)	sub r17=r0,r8
 	;;
 	;;
-	mov ar.rsc=r11		// put RSE in enforced lazy mode
+	mov ar.rsc=r11		/* put RSE in enforced lazy mode */
 	shr.u r8=r25,r16
 	shr.u r8=r25,r16
-	add r3=8,in0		// r3 <- &jmpbuf.r1
+	add r3=8,in0		/* r3 <- &jmpbuf.r1 */
 	shl r9=r25,r17
 	shl r9=r25,r17
 	;;
 	;;
 	or r25=r8,r9
 	or r25=r8,r9
 	;;
 	;;
 	mov r26=ar.rnat
 	mov r26=ar.rnat
-	mov ar.unat=r25		// setup ar.unat (NaT bits for r1, r4-r7, and r12)
+	mov ar.unat=r25		/* setup ar.unat (NaT bits for r1, r4-r7, and r12) */
 	;;
 	;;
-	ld8.fill.nta sp=[r2],16	// r12 (sp)
-	ld8.fill.nta gp=[r3],16		// r1 (gp)
-	dep r11=-1,r23,3,6	// r11 <- ia64_rse_rnat_addr(jmpbuf.ar_bsp)
+	ld8.fill.nta sp=[r2],16	/* r12 (sp) */
+	ld8.fill.nta gp=[r3],16		/* r1 (gp) */
+	dep r11=-1,r23,3,6	/* r11 <- ia64_rse_rnat_addr(jmpbuf.ar_bsp) */
 	;;
 	;;
-	ld8.nta r16=[r2],16		// caller's unat
-	ld8.nta r17=[r3],16		// fpsr
+	ld8.nta r16=[r2],16		/* caller's unat */
+	ld8.nta r17=[r3],16		/* fpsr */
 	;;
 	;;
-	ld8.fill.nta r4=[r2],16	// r4
-	ld8.fill.nta r5=[r3],16		// r5 (gp)
-	cmp.geu p8,p0=r10,r11	// p8 <- (ar.bsp >= jmpbuf.ar_bsp)
+	ld8.fill.nta r4=[r2],16	/* r4 */
+	ld8.fill.nta r5=[r3],16		/* r5 (gp) */
+	cmp.geu p8,p0=r10,r11	/* p8 <- (ar.bsp >= jmpbuf.ar_bsp) */
 	;;
 	;;
-	ld8.fill.nta r6=[r2],16	// r6
-	ld8.fill.nta r7=[r3],16		// r7
+	ld8.fill.nta r6=[r2],16	/* r6 */
+	ld8.fill.nta r7=[r3],16		/* r7 */
 	;;
 	;;
-	mov ar.unat=r16			// restore caller's unat
-	mov ar.fpsr=r17			// restore fpsr
+	mov ar.unat=r16			/* restore caller's unat */
+	mov ar.fpsr=r17			/* restore fpsr */
 	;;
 	;;
-	ld8.nta r16=[r2],16		// b0
-	ld8.nta r17=[r3],16		// b1
+	ld8.nta r16=[r2],16		/* b0 */
+	ld8.nta r17=[r3],16		/* b1 */
 	;;
 	;;
-(p8)	ld8 r26=[r11]		// r26 <- *ia64_rse_rnat_addr(jmpbuf.ar_bsp)
-	mov ar.bspstore=r23	// restore ar.bspstore
+(p8)	ld8 r26=[r11]		/* r26 <- *ia64_rse_rnat_addr(jmpbuf.ar_bsp) */
+	mov ar.bspstore=r23	/* restore ar.bspstore */
 	;;
 	;;
-	ld8.nta r18=[r2],16		// b2
-	ld8.nta r19=[r3],16		// b3
+	ld8.nta r18=[r2],16		/* b2 */
+	ld8.nta r19=[r3],16		/* b3 */
 	;;
 	;;
-	ld8.nta r20=[r2],16		// b4
-	ld8.nta r21=[r3],16		// b5
+	ld8.nta r20=[r2],16		/* b4 */
+	ld8.nta r21=[r3],16		/* b5 */
 	;;
 	;;
-	ld8.nta r11=[r2],16		// ar.pfs
-	ld8.nta r22=[r3],56		// ar.lc
+	ld8.nta r11=[r2],16		/* ar.pfs */
+	ld8.nta r22=[r3],56		/* ar.lc */
 	;;
 	;;
-	ld8.nta r24=[r2],32		// pr
+	ld8.nta r24=[r2],32		/* pr */
 	mov b0=r16
 	mov b0=r16
 	;;
 	;;
 	ldf.fill.nta f2=[r2],32
 	ldf.fill.nta f2=[r2],32
@@ -149,12 +149,12 @@ LEAF(__longjmp)
 	ldf.fill.nta f31=[r3]
 	ldf.fill.nta f31=[r3]
 (p8)	mov r8=1
 (p8)	mov r8=1
 
 
-	mov ar.rnat=r26		// restore ar.rnat
+	mov ar.rnat=r26		/* restore ar.rnat */
 	;;
 	;;
-	mov ar.rsc=r27		// restore ar.rsc
+	mov ar.rsc=r27		/* restore ar.rsc */
 (p9)	mov r8=in1
 (p9)	mov r8=in1
 
 
-	invala			// virt. -> phys. regnum mapping may change
+	invala			/* virt. -> phys. regnum mapping may change */
 	mov pr=r24,-1
 	mov pr=r24,-1
 	ret
 	ret
 END(__longjmp)
 END(__longjmp)

+ 22 - 22
libc/sysdeps/linux/ia64/setjmp.S

@@ -95,15 +95,15 @@ ENTRY(__sigsetjmp)
 	mov r2=in0
 	mov r2=in0
 	add r3=8,in0
 	add r3=8,in0
 	;;
 	;;
-.mem.offset 8,0;	st8.spill.nta [r2]=sp,16	// r12 (sp)
-.mem.offset 0,0;	st8.spill.nta [r3]=gp,16	// r1 (gp)
+.mem.offset 8,0;	st8.spill.nta [r2]=sp,16	/* r12 (sp) */
+.mem.offset 0,0;	st8.spill.nta [r3]=gp,16	/* r1 (gp) */
 	;;
 	;;
-	st8.nta [r2]=loc2,16		// save caller's unat
-	st8.nta [r3]=r17,16		// save fpsr
+	st8.nta [r2]=loc2,16		/* save caller's unat */
+	st8.nta [r3]=r17,16		/* save fpsr */
 	add r8=0xa0,in0
 	add r8=0xa0,in0
 	;;
 	;;
-.mem.offset 8,0;	st8.spill.nta [r2]=r4,16	// r4
-.mem.offset 0,0;	st8.spill.nta [r3]=r5,16	// r5
+.mem.offset 8,0;	st8.spill.nta [r2]=r4,16	/* r4 */
+.mem.offset 0,0;	st8.spill.nta [r3]=r5,16	/* r5 */
 	add r9=0xb0,in0
 	add r9=0xb0,in0
 	;;
 	;;
 	stf.spill.nta [r8]=f2,32
 	stf.spill.nta [r8]=f2,32
@@ -145,39 +145,39 @@ ENTRY(__sigsetjmp)
 	stf.spill.nta [r8]=f30
 	stf.spill.nta [r8]=f30
 	stf.spill.nta [r9]=f31
 	stf.spill.nta [r9]=f31
 
 
-.mem.offset 8,0;	st8.spill.nta [r2]=r6,16	// r6
-.mem.offset 0,0;	st8.spill.nta [r3]=r7,16	// r7
+.mem.offset 8,0;	st8.spill.nta [r2]=r6,16	/* r6 */
+.mem.offset 0,0;	st8.spill.nta [r3]=r7,16	/* r7 */
 	;;
 	;;
 	mov r23=ar.bsp
 	mov r23=ar.bsp
 	mov r25=ar.unat
 	mov r25=ar.unat
 	mov out0=in0
 	mov out0=in0
 
 
-	st8.nta [r2]=loc0,16		// b0
-	st8.nta [r3]=r17,16		// b1
+	st8.nta [r2]=loc0,16		/* b0 */
+	st8.nta [r3]=r17,16		/* b1 */
 	mov out1=in1
 	mov out1=in1
 	;;
 	;;
-	st8.nta [r2]=r18,16		// b2
-	st8.nta [r3]=r19,16		// b3
+	st8.nta [r2]=r18,16		/* b2 */
+	st8.nta [r3]=r19,16		/* b3 */
 	;;
 	;;
-	st8.nta [r2]=r20,16		// b4
-	st8.nta [r3]=r21,16		// b5
+	st8.nta [r2]=r20,16		/* b4 */
+	st8.nta [r3]=r21,16		/* b5 */
 	;;
 	;;
-	st8.nta [r2]=loc1,16		// ar.pfs
-	st8.nta [r3]=r22,16		// ar.lc
+	st8.nta [r2]=loc1,16		/* ar.pfs */
+	st8.nta [r3]=r22,16		/* ar.lc */
 	;;
 	;;
-	st8.nta [r2]=r24,16		// pr
-	st8.nta [r3]=r23,16		// ar.bsp
+	st8.nta [r2]=r24,16		/* pr */
+	st8.nta [r3]=r23,16		/* ar.bsp */
 	;;
 	;;
-	st8.nta [r2]=r25		// ar.unat
-	st8.nta [r3]=in0		// &__jmp_buf
+	st8.nta [r2]=r25		/* ar.unat */
+	st8.nta [r3]=in0		/* &__jmp_buf */
 #if defined NOT_IN_libc && defined IS_IN_rtld
 #if defined NOT_IN_libc && defined IS_IN_rtld
 	/* In ld.so we never save the signal mask.  */
 	/* In ld.so we never save the signal mask.  */
 	;;
 	;;
 #else
 #else
 	br.call.dpnt.few rp=__sigjmp_save
 	br.call.dpnt.few rp=__sigjmp_save
 #endif
 #endif
-.ret0:					// force a new bundle ::q
-	mov.m ar.unat=loc2		// restore caller's unat
+.ret0:					/* force a new bundle ::q */
+	mov.m ar.unat=loc2		/* restore caller's unat */
 	mov rp=loc0
 	mov rp=loc0
 	mov ar.pfs=loc1
 	mov ar.pfs=loc1
 	mov r8=0
 	mov r8=0

+ 8 - 8
libc/sysdeps/linux/microblaze/__longjmp.S

@@ -8,7 +8,7 @@
  * This file is subject to the terms and conditions of the GNU Lesser
  * This file is subject to the terms and conditions of the GNU Lesser
  * General Public License.  See the file COPYING.LIB in the main
  * General Public License.  See the file COPYING.LIB in the main
  * directory of this archive for more details.
  * directory of this archive for more details.
- * 
+ *
  * Written by Miles Bader <miles@gnu.org>
  * Written by Miles Bader <miles@gnu.org>
  */
  */
 
 
@@ -20,25 +20,25 @@
 
 
 	.text
 	.text
 C_ENTRY(__longjmp):
 C_ENTRY(__longjmp):
-	/* load registers from memory to r5 (arg0)*/
+	/* load registers from memory to r5 (arg0) */
 	lwi	r1, r5, 0
 	lwi	r1, r5, 0
 	lwi	r15, r5, 4
 	lwi	r15, r5, 4
 	lwi	r18, r5, 8
 	lwi	r18, r5, 8
 	lwi	r19, r5, 12
 	lwi	r19, r5, 12
 	lwi	r20, r5, 16
 	lwi	r20, r5, 16
-	lwi	r21, r5, 20 
-	lwi	r22, r5, 24 
+	lwi	r21, r5, 20
+	lwi	r22, r5, 24
 	lwi	r23, r5, 28
 	lwi	r23, r5, 28
-	lwi	r24, r5, 32 
+	lwi	r24, r5, 32
 	lwi	r25, r5, 36
 	lwi	r25, r5, 36
 	lwi	r26, r5, 40
 	lwi	r26, r5, 40
 	lwi	r27, r5, 44
 	lwi	r27, r5, 44
 	lwi	r28, r5, 48
 	lwi	r28, r5, 48
 	lwi	r29, r5, 52
 	lwi	r29, r5, 52
 	lwi	r30, r5, 56
 	lwi	r30, r5, 56
-	
-	addi	r3, r0, 1		// return val
-	rtsd	r15, 8			// normal return
+
+	addi	r3, r0, 1		/* return val */
+	rtsd	r15, 8			/* normal return */
 	nop
 	nop
 
 
 C_END(__longjmp)
 C_END(__longjmp)

+ 9 - 9
libc/sysdeps/linux/microblaze/crt0.S

@@ -20,22 +20,22 @@
 
 
 	.text
 	.text
 C_ENTRY(_start):
 C_ENTRY(_start):
-	lw	r5, r0, r1		// Arg 0: argc
+	lw	r5, r0, r1	/* Arg 0: argc */
 
 
-	addi	r6, r1, 4		// Arg 1: argv
+	addi	r6, r1, 4	/* Arg 1: argv */
 
 
-					// Arg 2: envp
-	addi	r3, r5, 1		// skip argc elements to get envp start
-					// ...plus the NULL at the end of argv
-	add	r3, r3, r3		// Make word offset
+				/* Arg 2: envp */
+	addi	r3, r5, 1	/* skip argc elements to get envp start */
+				/* ...plus the NULL at the end of argv */
+	add	r3, r3, r3	/* Make word offset */
 	add	r3, r3, r3
 	add	r3, r3, r3
-	add	r7, r6, r3		// add to argv to get offset
+	add	r7, r6, r3	/* add to argv to get offset */
 
 
-	// Load SDAs
+	/* Load SDAs */
 	la	r2, r0, C_SYMBOL_NAME(_SDA_BASE_)
 	la	r2, r0, C_SYMBOL_NAME(_SDA_BASE_)
 	la	r13, r0, C_SYMBOL_NAME(_SDA2_BASE_)
 	la	r13, r0, C_SYMBOL_NAME(_SDA2_BASE_)
 
 
-	// tail-call uclibc's startup routine
+	/* tail-call uclibc's startup routine */
 	brid	C_SYMBOL_NAME(__uClibc_main)
 	brid	C_SYMBOL_NAME(__uClibc_main)
 	nop
 	nop
 
 

+ 6 - 6
libc/sysdeps/linux/microblaze/vfork.S

@@ -8,7 +8,7 @@
  *
  *
  * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
  * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
  */
  */
-/* 
+/*
  * Written by Miles Bader <miles@gnu.org>
  * Written by Miles Bader <miles@gnu.org>
  * Microblaze port by John Williams
  * Microblaze port by John Williams
  */
  */
@@ -31,13 +31,13 @@ C_ENTRY (__vfork):
 	addi	r12, r0, SYS_vfork
 	addi	r12, r0, SYS_vfork
 	bralid	r17, 0x08;
 	bralid	r17, 0x08;
 	nop
 	nop
-	addi	r4, r3, 125		// minimum err value
-	blti	r4, 1f			// is r3 < -125?
-	rtsd	r15, 8			// normal return
+	addi	r4, r3, 125		/* minimum err value */
+	blti	r4, 1f			/* is r3 < -125? */
+	rtsd	r15, 8			/* normal return */
 	nop
 	nop
-1:	sub 	r3, r3, r0		// r3 = -r3
+1:	sub	r3, r3, r0		/* r3 = -r3 */
 	swi	r3, r0, C_SYMBOL_NAME(errno);
 	swi	r3, r0, C_SYMBOL_NAME(errno);
-	rtsd	r15, 8			// error return
+	rtsd	r15, 8			/* error return */
 	nop
 	nop
 C_END(__vfork)
 C_END(__vfork)
 weak_alias(__vfork,vfork)
 weak_alias(__vfork,vfork)

+ 1 - 1
libc/sysdeps/linux/nios/bits/endian.h

@@ -4,5 +4,5 @@
 # error "Never use <bits/endian.h> directly; include <endian.h> instead."
 # error "Never use <bits/endian.h> directly; include <endian.h> instead."
 #endif
 #endif
 
 
-//mle
+/*mle */
 #define __BYTE_ORDER __LITTLE_ENDIAN
 #define __BYTE_ORDER __LITTLE_ENDIAN

+ 3 - 3
libc/sysdeps/linux/nios/crt1.S

@@ -35,13 +35,13 @@ Cambridge, MA 02139, USA.  */
 
 
 	.text
 	.text
 
 
-_start: 
+_start:
 	nop
 	nop
 	nop
 	nop
 
 
 	MOVIA	%o0, main@h
 	MOVIA	%o0, main@h
-	lds	%o1,[%sp, (REGWIN_SZ / 4) + 0]	// main's argc
-	lds	%o2,[%sp, (REGWIN_SZ / 4) + 1]	// main's argv
+	lds	%o1,[%sp, (REGWIN_SZ / 4) + 0]	/* main's argc */
+	lds	%o2,[%sp, (REGWIN_SZ / 4) + 1]	/* main's argv */
 
 
 	MOVIA	%o3, _init@h
 	MOVIA	%o3, _init@h
 	MOVIA	%o4, _fini@h
 	MOVIA	%o4, _fini@h

+ 3 - 3
libc/sysdeps/linux/sh/clone.S

@@ -63,7 +63,7 @@ clone:
 	trapa	#(__SH_SYSCALL_TRAP_BASE + 2)
 	trapa	#(__SH_SYSCALL_TRAP_BASE + 2)
 	mov     r0, r1
 	mov     r0, r1
 #ifdef __CONFIG_SH2__
 #ifdef __CONFIG_SH2__
-// 12 arithmetic shifts for the crappy sh2, because shad doesn't exist!	
+/* 12 arithmetic shifts for the crappy sh2, because shad doesn't exist!	 */
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
@@ -80,8 +80,8 @@ clone:
 	mov	#-12, r2
 	mov	#-12, r2
 	shad	r2, r1
 	shad	r2, r1
 #endif
 #endif
-	not	r1, r1			// r1=0 means r0 = -1 to -4095
-	tst	r1, r1			// i.e. error in linux
+	not	r1, r1			/* r1=0 means r0 = -1 to -4095 */
+	tst	r1, r1			/* i.e. error in linux */
 	bf/s	2f
 	bf/s	2f
 	 tst	r0, r0
 	 tst	r0, r0
         bra __syscall_error
         bra __syscall_error

+ 8 - 8
libc/sysdeps/linux/sh/vfork.S

@@ -42,7 +42,7 @@ __vfork:
 	trapa	#__SH_SYSCALL_TRAP_BASE
 	trapa	#__SH_SYSCALL_TRAP_BASE
 	mov     r0, r1
 	mov     r0, r1
 #ifdef __CONFIG_SH2__
 #ifdef __CONFIG_SH2__
-// 12 arithmetic shifts for the crappy sh2, because shad doesn't exist!	
+/* 12 arithmetic shifts for the crappy sh2, because shad doesn't exist!	 */
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
@@ -55,13 +55,13 @@ __vfork:
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
-#else		
+#else
 	mov	#-12, r2
 	mov	#-12, r2
 	shad	r2, r1
 	shad	r2, r1
 #endif
 #endif
 
 
-	not	r1, r1			// r1=0 means r0 = -1 to -4095
-	tst	r1, r1			// i.e. error in linux
+	not	r1, r1			/* r1=0 means r0 = -1 to -4095 */
+	tst	r1, r1			/* i.e. error in linux */
 	bf	2f
 	bf	2f
 	mov.w	.L1, r1
 	mov.w	.L1, r1
 	cmp/eq	r1, r0
 	cmp/eq	r1, r0
@@ -73,7 +73,7 @@ __vfork:
 	trapa	#__SH_SYSCALL_TRAP_BASE
 	trapa	#__SH_SYSCALL_TRAP_BASE
 	mov     r0, r1
 	mov     r0, r1
 #ifdef __CONFIG_SH2__
 #ifdef __CONFIG_SH2__
-// 12 arithmetic shifts for the crappy sh2, because shad doesn't exist!	
+/* 12 arithmetic shifts for the crappy sh2, because shad doesn't exist!	 */
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
@@ -86,13 +86,13 @@ __vfork:
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
 	shar	r1
-#else		
+#else
 	mov	#-12, r2
 	mov	#-12, r2
 	shad	r2, r1
 	shad	r2, r1
 #endif
 #endif
 
 
-	not	r1, r1			// r1=0 means r0 = -1 to -4095
-	tst	r1, r1			// i.e. error in linux
+	not	r1, r1			/* r1=0 means r0 = -1 to -4095 */
+	tst	r1, r1			/* i.e. error in linux */
 	bt/s	__syscall_error
 	bt/s	__syscall_error
 	 mov	r0, r4
 	 mov	r0, r4
 2:
 2:

+ 1 - 1
libc/sysdeps/linux/sparc/qp_ops.c

@@ -1,4 +1,4 @@
-// XXX add ops from glibc sysdeps/sparc/sparc64/soft-fp
+/* XXX add ops from glibc sysdeps/sparc/sparc64/soft-fp */
 
 
 #define fakedef(name)                                                   \
 #define fakedef(name)                                                   \
     void name(void)                                                     \
     void name(void)                                                     \

+ 2 - 2
libc/sysdeps/linux/v850/__longjmp.S

@@ -7,7 +7,7 @@
  * This file is subject to the terms and conditions of the GNU Lesser
  * This file is subject to the terms and conditions of the GNU Lesser
  * General Public License.  See the file COPYING.LIB in the main
  * General Public License.  See the file COPYING.LIB in the main
  * directory of this archive for more details.
  * directory of this archive for more details.
- * 
+ *
  * Written by Miles Bader <miles@gnu.org>
  * Written by Miles Bader <miles@gnu.org>
  */
  */
 
 
@@ -35,7 +35,7 @@ C_ENTRY(__longjmp):
 	sld.w	40[ep], r27
 	sld.w	40[ep], r27
 	sld.w	44[ep], r28
 	sld.w	44[ep], r28
 	sld.w	48[ep], r29
 	sld.w	48[ep], r29
-	mov	1, r10			// return val
+	mov	1, r10			/* return val */
 	jmp	[lp]
 	jmp	[lp]
 C_END(__longjmp)
 C_END(__longjmp)
 libc_hidden_def(__longjmp)
 libc_hidden_def(__longjmp)

+ 10 - 10
libc/sysdeps/linux/v850/crt0.S

@@ -19,25 +19,25 @@
 
 
 	.text
 	.text
 C_ENTRY(start):
 C_ENTRY(start):
-	ld.w	0[sp], r6		// Arg 0: argc
+	ld.w	0[sp], r6	/* Arg 0: argc */
 
 
-	addi	4, sp, r7		// Arg 1: argv
+	addi	4, sp, r7	/* Arg 1: argv */
 
 
-	mov	r7, r8			// Arg 2: envp
-	mov	r6, r10			// skip argc elements to get envp start
-	add	1, r10			// ...plus the NULL at the end of argv
-	shl	2, r10			// Convert to byte-count to skip
+	mov	r7, r8		/* Arg 2: envp */
+	mov	r6, r10		/* skip argc elements to get envp start */
+	add	1, r10		/* ...plus the NULL at the end of argv */
+	shl	2, r10		/* Convert to byte-count to skip */
 	add	r10, r8
 	add	r10, r8
 
 
-	// Load CTBP register
+	/* Load CTBP register */
 	mov	hilo(C_SYMBOL_NAME(_ctbp)), r19
 	mov	hilo(C_SYMBOL_NAME(_ctbp)), r19
 	ldsr	r19, ctbp
 	ldsr	r19, ctbp
 
 
-	// Load GP
+	/* Load GP */
 	mov	hilo(C_SYMBOL_NAME(_gp)), gp
 	mov	hilo(C_SYMBOL_NAME(_gp)), gp
 
 
-	// tail-call uclibc's startup routine
-	addi	-24, sp, sp		// Stack space reserved for args
+	/* tail-call uclibc's startup routine */
+	addi	-24, sp, sp		/* Stack space reserved for args */
 	jr	C_SYMBOL_NAME(__uClibc_main)
 	jr	C_SYMBOL_NAME(__uClibc_main)
 
 
 
 

+ 4 - 4
libc/sysdeps/linux/v850/vfork.S

@@ -7,7 +7,7 @@
  * This file is subject to the terms and conditions of the GNU Lesser
  * This file is subject to the terms and conditions of the GNU Lesser
  * General Public License.  See the file COPYING.LIB in the main
  * General Public License.  See the file COPYING.LIB in the main
  * directory of this archive for more details.
  * directory of this archive for more details.
- * 
+ *
  * Written by Miles Bader <miles@gnu.org>
  * Written by Miles Bader <miles@gnu.org>
  */
  */
 
 
@@ -29,14 +29,14 @@
 C_ENTRY (__vfork):
 C_ENTRY (__vfork):
 	addi	SYS_vfork, r0, r12
 	addi	SYS_vfork, r0, r12
 	trap	0
 	trap	0
-	addi	-125, r0, r11		// minimum err value
+	addi	-125, r0, r11		/* minimum err value */
 	cmp	r11, r10
 	cmp	r11, r10
 	bh	1f
 	bh	1f
-	jmp	[lp]			// normal return
+	jmp	[lp]			/* normal return */
 1:	mov	hilo(C_SYMBOL_NAME(errno)), r11
 1:	mov	hilo(C_SYMBOL_NAME(errno)), r11
 	subr	r0, r10
 	subr	r0, r10
 	st.w	r10, 0[r11]
 	st.w	r10, 0[r11]
-	jmp	[lp]			// error return
+	jmp	[lp]			/* error return */
 C_END(__vfork)
 C_END(__vfork)
 weak_alias(__vfork,vfork)
 weak_alias(__vfork,vfork)
 libc_hidden_weak(vfork)
 libc_hidden_weak(vfork)

+ 3 - 3
libc/sysdeps/linux/xtensa/__longjmp.S

@@ -84,7 +84,7 @@ ENTRY (__longjmp)
 	slli	a4, a7, 4
 	slli	a4, a7, 4
 	sub	a6, a8, a4
 	sub	a6, a8, a4
 	addi	a5, a2, 16
 	addi	a5, a2, 16
-	addi	a8, a8, -16		// a8 = end of register overflow area
+	addi	a8, a8, -16		/* a8 = end of register overflow area */
 .Lljloop:
 .Lljloop:
 	l32i	a7, a5, 0
 	l32i	a7, a5, 0
 	l32i	a4, a5, 4
 	l32i	a4, a5, 4
@@ -105,8 +105,8 @@ ENTRY (__longjmp)
 	   case the contents were moved by an alloca after calling
 	   case the contents were moved by an alloca after calling
 	   setjmp.  This is a bit paranoid but it doesn't cost much.  */
 	   setjmp.  This is a bit paranoid but it doesn't cost much.  */
 
 
-	l32i	a7, a2, 4		// load the target stack pointer
-	addi	a7, a7, -16		// find the destination save area
+	l32i	a7, a2, 4		/* load the target stack pointer */
+	addi	a7, a7, -16		/* find the destination save area */
 	l32i	a4, a2, 48
 	l32i	a4, a2, 48
 	l32i	a5, a2, 52
 	l32i	a5, a2, 52
 	s32i	a4, a7, 0
 	s32i	a4, a7, 0

+ 3 - 3
libc/sysdeps/linux/xtensa/setjmp.S

@@ -61,13 +61,13 @@ END (setjmp)
 		    a3 = int savemask)  */
 		    a3 = int savemask)  */
 
 
 ENTRY (__sigsetjmp)
 ENTRY (__sigsetjmp)
-1:	
+1:
 	/* Flush registers.  */
 	/* Flush registers.  */
 	movi	a4, __window_spill
 	movi	a4, __window_spill
 	callx4	a4
 	callx4	a4
 
 
 	/* Preserve the second argument (savemask) in a15.  The selection
 	/* Preserve the second argument (savemask) in a15.  The selection
- 	   of a15 is arbitrary, except it's otherwise unused.  There is no
+	   of a15 is arbitrary, except it's otherwise unused.  There is no
 	   risk of triggering a window overflow since we just returned
 	   risk of triggering a window overflow since we just returned
 	   from __window_spill().  */
 	   from __window_spill().  */
 	mov	a15, a3
 	mov	a15, a3
@@ -90,7 +90,7 @@ ENTRY (__sigsetjmp)
 	slli	a4, a3, 4
 	slli	a4, a3, 4
 	sub	a5, a7, a4
 	sub	a5, a7, a4
 	addi	a6, a2, 16
 	addi	a6, a2, 16
-	addi	a7, a7, -16		// a7 = end of register overflow area
+	addi	a7, a7, -16		/* a7 = end of register overflow area */
 .Lsjloop:
 .Lsjloop:
 	l32i	a3, a5, 0
 	l32i	a3, a5, 0
 	l32i	a4, a5, 4
 	l32i	a4, a5, 4

+ 15 - 15
libc/sysdeps/linux/xtensa/vfork.S

@@ -52,19 +52,19 @@
 ENTRY (__vfork)
 ENTRY (__vfork)
 
 
 	movi	a6, .Ljumptable
 	movi	a6, .Ljumptable
-	extui	a2, a0, 30, 2		// call-size: call4/8/12 = 1/2/3
-	addx4	a4, a2, a6		// find return address in jumptable
+	extui	a2, a0, 30, 2		/* call-size: call4/8/12 = 1/2/3 */
+	addx4	a4, a2, a6		/* find return address in jumptable */
 	l32i	a4, a4, 0
 	l32i	a4, a4, 0
 	add	a4, a4, a6
 	add	a4, a4, a6
 
 
 	slli	a2, a2, 30
 	slli	a2, a2, 30
-	xor	a3, a0, a2		// remove call-size from return address
-	extui	a5, a4, 30, 2		// get high bits of jump target
+	xor	a3, a0, a2		/* remove call-size from return addr */
+	extui	a5, a4, 30, 2		/* get high bits of jump target */
 	slli	a5, a5, 30
 	slli	a5, a5, 30
-	or	a3, a3, a5		// stuff them into the return address
-	xor	a4, a4, a5		// clear high bits of jump target
-	or	a0, a4, a2		// create temporary return address
-	retw				// "return" to .L4, .L8, or .L12
+	or	a3, a3, a5		/* stuff them into the return address */
+	xor	a4, a4, a5		/* clear high bits of jump target */
+	or	a0, a4, a2		/* create temporary return address */
+	retw				/* "return" to .L4, .L8, or .L12 */
 
 
 	.align	4
 	.align	4
 .Ljumptable:
 .Ljumptable:
@@ -81,7 +81,7 @@ ENTRY (__vfork)
 
 
 	/* Use syscall 'clone'.  Set new stack pointer to the same address.  */
 	/* Use syscall 'clone'.  Set new stack pointer to the same address.  */
 	movi	a2, SYS_ify (clone)
 	movi	a2, SYS_ify (clone)
- 	movi	a3, 0
+	movi	a3, 0
 	movi	a6, CLONE_VM | CLONE_VFORK | SIGCHLD
 	movi	a6, CLONE_VM | CLONE_VFORK | SIGCHLD
         syscall
         syscall
 
 
@@ -95,7 +95,7 @@ ENTRY (__vfork)
 
 
 	bgeu	a6, a5, 1f
 	bgeu	a6, a5, 1f
 	jx	a7
 	jx	a7
-1:	call4	.Lerr			// returns to original caller
+1:	call4	.Lerr			/* returns to original caller */
 
 
 
 
 	/* a11: return address */
 	/* a11: return address */
@@ -121,7 +121,7 @@ ENTRY (__vfork)
 
 
 	bgeu	a10, a9, 1f
 	bgeu	a10, a9, 1f
 	jx	a11
 	jx	a11
-1:	call8	.Lerr			// returns to original caller
+1:	call8	.Lerr			/* returns to original caller */
 
 
 
 
 	/* a15: return address */
 	/* a15: return address */
@@ -148,18 +148,18 @@ ENTRY (__vfork)
 
 
 	bgeu	a14, a13, 1f
 	bgeu	a14, a13, 1f
 	jx	a15
 	jx	a15
-1:	call12	.Lerr			// returns to original caller
+1:	call12	.Lerr			/* returns to original caller */
 
 
 
 
 	.align 4
 	.align 4
 .Lerr:	entry	a1, 16
 .Lerr:	entry	a1, 16
 
 
 	/* Restore the return address.  */
 	/* Restore the return address.  */
-	extui	a4, a0, 30, 2		// get the call-size bits
+	extui	a4, a0, 30, 2		/* get the call-size bits */
 	slli	a4, a4, 30
 	slli	a4, a4, 30
-	slli	a3, a3, 2		// clear high bits of target address
+	slli	a3, a3, 2		/* clear high bits of target address */
 	srli	a3, a3, 2
 	srli	a3, a3, 2
-	or	a0, a3, a4		// combine them
+	or	a0, a3, a4		/* combine them */
 
 
 	PSEUDO_END (__vfork)
 	PSEUDO_END (__vfork)
 .Lpseudo_end:
 .Lpseudo_end:

+ 13 - 13
libc/sysdeps/linux/xtensa/windowspill.S

@@ -26,8 +26,8 @@
 	.type   __window_spill, @function
 	.type   __window_spill, @function
 __window_spill:
 __window_spill:
 	entry	a1, 48
 	entry	a1, 48
-        bbci.l  a0, 31, .L4		// branch if called with call4
-        bbsi.l  a0, 30, .L12		// branch if called with call12
+        bbci.l  a0, 31, .L4		/* branch if called with call4 */
+        bbsi.l  a0, 30, .L12		/* branch if called with call12 */
 
 
 	/* Called with call8: touch register NUM_REGS-12 (4/20/52) */
 	/* Called with call8: touch register NUM_REGS-12 (4/20/52) */
 .L8:
 .L8:
@@ -36,18 +36,18 @@ __window_spill:
 	retw
 	retw
 
 
 	.align	4
 	.align	4
-1:	_entry	a1, 48			// touch NUM_REGS-24 (x/8/40)
+1:	_entry	a1, 48			/* touch NUM_REGS-24 (x/8/40) */
 
 
 #if XCHAL_NUM_AREGS == 32
 #if XCHAL_NUM_AREGS == 32
 	mov	a8, a0
 	mov	a8, a0
 	retw
 	retw
 #else
 #else
 	mov	a12, a0
 	mov	a12, a0
-	_entry	a1, 48			// touch NUM_REGS-36 (x/x/28)
+	_entry	a1, 48			/* touch NUM_REGS-36 (x/x/28) */
 	mov	a12, a0
 	mov	a12, a0
-	_entry	a1, 48			// touch NUM_REGS-48 (x/x/16)
+	_entry	a1, 48			/* touch NUM_REGS-48 (x/x/16) */
 	mov	a12, a0
 	mov	a12, a0
-	_entry	a1, 16			// touch NUM_REGS-60 (x/x/4)
+	_entry	a1, 16			/* touch NUM_REGS-60 (x/x/4) */
 #endif
 #endif
 #endif
 #endif
 	mov	a4, a0
 	mov	a4, a0
@@ -62,14 +62,14 @@ __window_spill:
 	retw
 	retw
 
 
 	.align	4
 	.align	4
-1:	_entry	a1, 48			// touch NUM_REGS-20 (x/12/44)
+1:	_entry	a1, 48			/* touch NUM_REGS-20 (x/12/44) */
 	mov	a12, a0
 	mov	a12, a0
 #if XCHAL_NUM_AREGS > 32
 #if XCHAL_NUM_AREGS > 32
-	_entry	a1, 48			// touch NUM_REGS-32 (x/x/32)
+	_entry	a1, 48			/* touch NUM_REGS-32 (x/x/32) */
 	mov	a12, a0
 	mov	a12, a0
-	_entry	a1, 48			// touch NUM_REGS-44 (x/x/20)
+	_entry	a1, 48			/* touch NUM_REGS-44 (x/x/20) */
 	mov	a12, a0
 	mov	a12, a0
-	_entry	a1, 48			// touch NUM_REGS-56 (x/x/8)
+	_entry	a1, 48			/* touch NUM_REGS-56 (x/x/8) */
 	mov	a8, a0
 	mov	a8, a0
 #endif
 #endif
 #endif
 #endif
@@ -82,14 +82,14 @@ __window_spill:
 	retw
 	retw
 
 
 	.align	4
 	.align	4
-1:	_entry	a1, 48			// touch NUM_REGS-28 (x/4/36)
+1:	_entry	a1, 48			/* touch NUM_REGS-28 (x/4/36) */
 #if XCHAL_NUM_AREGS == 32
 #if XCHAL_NUM_AREGS == 32
 	mov	a4, a0
 	mov	a4, a0
 #else
 #else
 	mov	a12, a0
 	mov	a12, a0
-	_entry	a1, 48			// touch NUM_REGS-40 (x/x/24)
+	_entry	a1, 48			/* touch NUM_REGS-40 (x/x/24) */
 	mov	a12, a0
 	mov	a12, a0
-	_entry	a1, 48			// touch NUM_REGS-52 (x/x/12)
+	_entry	a1, 48			/* touch NUM_REGS-52 (x/x/12) */
 	mov	a12, a0
 	mov	a12, a0
 #endif
 #endif
 #endif
 #endif

+ 1 - 1
libm/e_gamma.c

@@ -23,7 +23,7 @@
 
 
 libm_hidden_proto(signgam)
 libm_hidden_proto(signgam)
 #ifdef __STDC__
 #ifdef __STDC__
-	//__private_extern__
+	/* __private_extern__ */
 	double attribute_hidden __ieee754_gamma(double x)
 	double attribute_hidden __ieee754_gamma(double x)
 #else
 #else
 	double attribute_hidden __ieee754_gamma(x)
 	double attribute_hidden __ieee754_gamma(x)

+ 1 - 1
libm/e_gamma_r.c

@@ -22,7 +22,7 @@
 #include "math_private.h"
 #include "math_private.h"
 
 
 #ifdef __STDC__
 #ifdef __STDC__
-	//__private_extern__
+	/* __private_extern__ */
 	double attribute_hidden __ieee754_gamma_r(double x, int *signgamp)
 	double attribute_hidden __ieee754_gamma_r(double x, int *signgamp)
 #else
 #else
 	double attribute_hidden __ieee754_gamma_r(x,signgamp)
 	double attribute_hidden __ieee754_gamma_r(x,signgamp)

+ 1 - 1
libm/e_lgamma.c

@@ -23,7 +23,7 @@
 
 
 libm_hidden_proto(signgam)
 libm_hidden_proto(signgam)
 #ifdef __STDC__
 #ifdef __STDC__
-	//__private_extern__
+	/* __private_extern__ */
 	double attribute_hidden __ieee754_lgamma(double x)
 	double attribute_hidden __ieee754_lgamma(double x)
 #else
 #else
 	double attribute_hidden __ieee754_lgamma(x)
 	double attribute_hidden __ieee754_lgamma(x)

+ 6 - 5
libpthread/linuxthreads.old/forward.c

@@ -23,20 +23,21 @@
 
 
 /* psm: keep this before internals.h */
 /* psm: keep this before internals.h */
 libc_hidden_proto(exit)
 libc_hidden_proto(exit)
-/* vda: here's why:
+#if 0
+vda: here's why:
 In libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h
 In libpthread/linuxthreads.old/sysdeps/pthread/bits/libc-lock.h
 adding libc_hidden_proto(foo) just before weak_extern (__pthread_initialize)
 adding libc_hidden_proto(foo) just before weak_extern (__pthread_initialize)
 will not warn:
 will not warn:
-    //libc_hidden_proto(foo)
+    /* libc_hidden_proto(foo) */
     weak_extern (__pthread_initialize)
     weak_extern (__pthread_initialize)
-    //libc_hidden_proto(foo)
+    /* libc_hidden_proto(foo) */
 but adding after will! Which is extremely strange -
 but adding after will! Which is extremely strange -
 weak_extern expands into just "#pragma weak __pthread_initialize".
 weak_extern expands into just "#pragma weak __pthread_initialize".
 TODO: determine whether it is a gcc bug or what
 TODO: determine whether it is a gcc bug or what
-(see gcc.gnu.org/bugzilla/show_bug.cgi?id=36282).
+(see gcc.gnu.org/PR36282).
 For now, just include all headers before internals.h
 For now, just include all headers before internals.h
 (they are again included in internals.h - maybe remove them there later)
 (they are again included in internals.h - maybe remove them there later)
-*/
+#endif
 #include <string.h>
 #include <string.h>
 #include <limits.h>
 #include <limits.h>
 #include <setjmp.h>
 #include <setjmp.h>

+ 3 - 3
libpthread/linuxthreads.old/pthread.c

@@ -477,12 +477,12 @@ static void pthread_initialize(void)
   __libc_sigaction(__pthread_sig_restart, &sa, NULL);
   __libc_sigaction(__pthread_sig_restart, &sa, NULL);
   sa.sa_handler = pthread_handle_sigcancel;
   sa.sa_handler = pthread_handle_sigcancel;
   sigaddset(&sa.sa_mask, __pthread_sig_restart);
   sigaddset(&sa.sa_mask, __pthread_sig_restart);
-  // sa.sa_flags = 0;
+  /* sa.sa_flags = 0; */
   __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
   __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
   if (__pthread_sig_debug > 0) {
   if (__pthread_sig_debug > 0) {
       sa.sa_handler = pthread_handle_sigdebug;
       sa.sa_handler = pthread_handle_sigdebug;
       sigemptyset(&sa.sa_mask);
       sigemptyset(&sa.sa_mask);
-      // sa.sa_flags = 0;
+      /* sa.sa_flags = 0; */
       __libc_sigaction(__pthread_sig_debug, &sa, NULL);
       __libc_sigaction(__pthread_sig_debug, &sa, NULL);
   }
   }
   /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
   /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
@@ -530,7 +530,7 @@ int __pthread_initialize_manager(void)
 	 __pthread_manager_thread_bos, __pthread_manager_thread_tos);
 	 __pthread_manager_thread_bos, __pthread_manager_thread_tos);
 #if 0
 #if 0
   PDEBUG("initial stack: estimate bos=%p, tos=%p\n",
   PDEBUG("initial stack: estimate bos=%p, tos=%p\n",
-  	 __pthread_initial_thread_bos, __pthread_initial_thread_tos);
+	 __pthread_initial_thread_bos, __pthread_initial_thread_tos);
 #endif
 #endif
 
 
   /* Setup pipe to communicate with thread manager */
   /* Setup pipe to communicate with thread manager */

+ 2 - 2
libpthread/linuxthreads/pthread.c

@@ -567,12 +567,12 @@ static void pthread_initialize(void)
   __libc_sigaction(__pthread_sig_restart, &sa, NULL);
   __libc_sigaction(__pthread_sig_restart, &sa, NULL);
   sa.sa_handler = pthread_handle_sigcancel;
   sa.sa_handler = pthread_handle_sigcancel;
   sigaddset(&sa.sa_mask, __pthread_sig_restart);
   sigaddset(&sa.sa_mask, __pthread_sig_restart);
-  // sa.sa_flags = 0;
+  /* sa.sa_flags = 0; */
   __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
   __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
   if (__pthread_sig_debug > 0) {
   if (__pthread_sig_debug > 0) {
     sa.sa_handler = pthread_handle_sigdebug;
     sa.sa_handler = pthread_handle_sigdebug;
     sigemptyset(&sa.sa_mask);
     sigemptyset(&sa.sa_mask);
-    // sa.sa_flags = 0;
+    /* sa.sa_flags = 0; */
     __libc_sigaction(__pthread_sig_debug, &sa, NULL);
     __libc_sigaction(__pthread_sig_debug, &sa, NULL);
   }
   }
   /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
   /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */

+ 3 - 3
libpthread/linuxthreads/sysdeps/unix/sysv/linux/sh/vfork.S

@@ -44,8 +44,8 @@ ENTRY (__vfork)
 	mov     r0, r1
 	mov     r0, r1
 	mov	#-12, r2
 	mov	#-12, r2
 	shad	r2, r1
 	shad	r2, r1
-	not	r1, r1			// r1=0 means r0 = -1 to -4095
-	tst	r1, r1			// i.e. error in linux
+	not	r1, r1			/* r1=0 means r0 = -1 to -4095 */
+	tst	r1, r1			/* i.e. error in linux */
 	bf	.Lpseudo_end
 	bf	.Lpseudo_end
 	SYSCALL_ERROR_HANDLER
 	SYSCALL_ERROR_HANDLER
 .Lpseudo_end:
 .Lpseudo_end:
@@ -64,7 +64,7 @@ ENTRY (__vfork)
 	.long	pthread_create
 	.long	pthread_create
 #endif
 #endif
 
 
-.Lhidden_fork:	
+.Lhidden_fork:
 	mov.l	.L2, r1
 	mov.l	.L2, r1
 	braf	r1
 	braf	r1
 	 nop
 	 nop

+ 4 - 4
utils/ldd.c

@@ -525,7 +525,7 @@ static int add_library(ElfW(Ehdr) *ehdr, ElfW(Dyn) *dynamic, int is_setuid, char
 			tmp1++;
 			tmp1++;
 		}
 		}
 		if (strcmp(tmp2, s) == 0) {
 		if (strcmp(tmp2, s) == 0) {
-			//printf("find_elf_interpreter is skipping '%s' (already in list)\n", cur->name);
+			/*printf("find_elf_interpreter is skipping '%s' (already in list)\n", cur->name); */
 			return 0;
 			return 0;
 		}
 		}
 	}
 	}
@@ -543,7 +543,7 @@ static int add_library(ElfW(Ehdr) *ehdr, ElfW(Dyn) *dynamic, int is_setuid, char
 	/* Now try and locate where this library might be living... */
 	/* Now try and locate where this library might be living... */
 	locate_library_file(ehdr, dynamic, is_setuid, newlib);
 	locate_library_file(ehdr, dynamic, is_setuid, newlib);
 
 
-	//printf("add_library is adding '%s' to '%s'\n", newlib->name, newlib->path);
+	/*printf("add_library is adding '%s' to '%s'\n", newlib->name, newlib->path); */
 	if (!lib_list) {
 	if (!lib_list) {
 		lib_list = newlib;
 		lib_list = newlib;
 	} else {
 	} else {
@@ -596,7 +596,7 @@ static struct library *find_elf_interpreter(ElfW(Ehdr) *ehdr)
 		for (cur = lib_list; cur; cur = cur->next) {
 		for (cur = lib_list; cur; cur = cur->next) {
 			/* Check if this library is already in the list */
 			/* Check if this library is already in the list */
 			if (strcmp(cur->name, tmp1) == 0) {
 			if (strcmp(cur->name, tmp1) == 0) {
-				//printf("find_elf_interpreter is replacing '%s' (already in list)\n", cur->name);
+				/*printf("find_elf_interpreter is replacing '%s' (already in list)\n", cur->name); */
 				newlib = cur;
 				newlib = cur;
 				free(newlib->name);
 				free(newlib->name);
 				if (newlib->path != not_found) {
 				if (newlib->path != not_found) {
@@ -618,7 +618,7 @@ static struct library *find_elf_interpreter(ElfW(Ehdr) *ehdr)
 		newlib->next = NULL;
 		newlib->next = NULL;
 
 
 #if 0
 #if 0
-		//printf("find_elf_interpreter is adding '%s' to '%s'\n", newlib->name, newlib->path);
+		/*printf("find_elf_interpreter is adding '%s' to '%s'\n", newlib->name, newlib->path); */
 		if (!lib_list) {
 		if (!lib_list) {
 			lib_list = newlib;
 			lib_list = newlib;
 		} else {
 		} else {