Browse Source

- fixup asm. No object-code changes

Bernhard Reutner-Fischer 16 years ago
parent
commit
1d8abd74c4
72 changed files with 275 additions and 275 deletions
  1. 1 1
      include/atomic.h
  2. 5 5
      ldso/ldso/arm/dl-sysdep.h
  3. 1 1
      ldso/ldso/bfin/dl-inlines.h
  4. 1 1
      ldso/ldso/bfin/dl-syscalls.h
  5. 3 3
      ldso/ldso/bfin/dl-sysdep.h
  6. 1 1
      ldso/ldso/frv/dl-inlines.h
  7. 1 1
      ldso/ldso/frv/dl-syscalls.h
  8. 1 1
      ldso/ldso/frv/dl-sysdep.h
  9. 2 2
      ldso/ldso/frv/elfinterp.c
  10. 2 2
      ldso/ldso/m68k/dl-sysdep.h
  11. 1 1
      ldso/ldso/mips/dl-sysdep.h
  12. 9 9
      ldso/ldso/powerpc/dl-sysdep.h
  13. 5 5
      ldso/ldso/sparc/dl-sysdep.h
  14. 1 1
      libc/string/cris/memcpy.c
  15. 1 1
      libc/string/cris/memset.c
  16. 1 1
      libc/string/powerpc/memset.c
  17. 3 3
      libc/sysdeps/linux/alpha/bits/atomic.h
  18. 5 5
      libc/sysdeps/linux/alpha/bits/mathinline.h
  19. 1 1
      libc/sysdeps/linux/cris/__init_brk.c
  20. 3 3
      libc/sysdeps/linux/cris/bits/syscalls.h
  21. 1 1
      libc/sysdeps/linux/cris/brk.c
  22. 1 1
      libc/sysdeps/linux/cris/sbrk.c
  23. 18 18
      libc/sysdeps/linux/e1/bits/fenvinline.h
  24. 2 2
      libc/sysdeps/linux/e1/longjmp.c
  25. 4 4
      libc/sysdeps/linux/e1/setjmp.c
  26. 1 1
      libc/sysdeps/linux/h8300/brk.c
  27. 1 1
      libc/sysdeps/linux/h8300/ptrace.c
  28. 2 2
      libc/sysdeps/linux/hppa/bits/syscalls.h
  29. 2 2
      libc/sysdeps/linux/hppa/syscall.c
  30. 1 1
      libc/sysdeps/linux/i386/bits/atomic.h
  31. 1 1
      libc/sysdeps/linux/i386/bits/syscalls.h
  32. 1 1
      libc/sysdeps/linux/ia64/__syscall_error.c
  33. 9 9
      libc/sysdeps/linux/ia64/bits/syscalls.h
  34. 1 1
      libc/sysdeps/linux/m68k/__syscall_error.c
  35. 8 8
      libc/sysdeps/linux/m68k/bits/mathinline.h
  36. 1 1
      libc/sysdeps/linux/m68k/brk.c
  37. 1 1
      libc/sysdeps/linux/m68k/fpu_control.h
  38. 6 6
      libc/sysdeps/linux/microblaze/clone.c
  39. 9 9
      libc/sysdeps/linux/microblaze/syscall.c
  40. 1 1
      libc/sysdeps/linux/mips/brk.c
  41. 35 35
      libc/sysdeps/linux/mips/setjmp_aux.c
  42. 3 3
      libc/sysdeps/linux/mips/sigaction.c
  43. 3 3
      libc/sysdeps/linux/nios/brk.c
  44. 4 4
      libc/sysdeps/linux/nios2/brk.c
  45. 6 6
      libc/sysdeps/linux/nios2/clone.c
  46. 9 9
      libc/sysdeps/linux/nios2/syscall.c
  47. 4 4
      libc/sysdeps/linux/powerpc/bits/atomic.h
  48. 2 2
      libc/sysdeps/linux/powerpc/fpu_control.h
  49. 4 4
      libc/sysdeps/linux/sparc/bits/mathinline.h
  50. 1 1
      libc/sysdeps/linux/sparc/brk.c
  51. 6 6
      libc/sysdeps/linux/v850/clone.c
  52. 9 9
      libc/sysdeps/linux/v850/syscall.c
  53. 1 1
      libc/sysdeps/linux/x86_64/bits/atomic.h
  54. 7 7
      libc/sysdeps/linux/x86_64/bits/syscalls.h
  55. 1 1
      libc/sysdeps/linux/x86_64/brk.c
  56. 4 4
      libc/sysdeps/linux/x86_64/sigaction.c
  57. 2 2
      libc/sysdeps/linux/xtensa/bits/syscalls.h
  58. 2 2
      libm/powerpc/classic/s_ceil.c
  59. 2 2
      libm/powerpc/classic/s_floor.c
  60. 21 21
      libm/powerpc/classic/s_modf.c
  61. 2 2
      libm/powerpc/classic/s_nearbyint.c
  62. 3 3
      libm/powerpc/classic/s_round.c
  63. 2 2
      libm/powerpc/classic/s_trunc.c
  64. 2 2
      libm/powerpc/e500/fpu/fenv_libc.h
  65. 5 5
      libm/powerpc/e500/spe-raise.c
  66. 1 1
      libpthread/linuxthreads.old/sysdeps/bfin/pt-machine.h
  67. 2 2
      libpthread/linuxthreads.old/sysdeps/frv/pt-machine.h
  68. 4 4
      libpthread/linuxthreads.old/sysdeps/i386/tls.h
  69. 6 6
      libpthread/linuxthreads.old/sysdeps/i386/useldt.h
  70. 1 1
      libpthread/linuxthreads.old/sysdeps/sh/tls.h
  71. 1 1
      libpthread/linuxthreads.old/sysdeps/x86_64/tls.h
  72. 1 1
      test/math/libm-test.inc

+ 1 - 1
include/atomic.h

@@ -240,7 +240,7 @@
 
 
 
 
 #ifndef atomic_full_barrier
 #ifndef atomic_full_barrier
-# define atomic_full_barrier() __asm ("" ::: "memory")
+# define atomic_full_barrier() __asm__ ("" ::: "memory")
 #endif
 #endif
 
 
 
 

+ 5 - 5
ldso/ldso/arm/dl-sysdep.h

@@ -77,14 +77,14 @@ elf_machine_dynamic (void)
 {
 {
   Elf32_Addr dynamic;
   Elf32_Addr dynamic;
 #if !defined __thumb__
 #if !defined __thumb__
-  asm ("ldr %0, 2f\n"
+  __asm__ ("ldr %0, 2f\n"
        "1: ldr %0, [pc, %0]\n"
        "1: ldr %0, [pc, %0]\n"
        "b 3f\n"
        "b 3f\n"
        "2: .word _GLOBAL_OFFSET_TABLE_ - (1b+8)\n"
        "2: .word _GLOBAL_OFFSET_TABLE_ - (1b+8)\n"
        "3:" : "=r" (dynamic));
        "3:" : "=r" (dynamic));
 #else
 #else
   int tmp;
   int tmp;
-  asm (".align 2\n"
+  __asm__ (".align 2\n"
        "bx     pc\n"
        "bx     pc\n"
        "nop\n"
        "nop\n"
        ".arm\n"
        ".arm\n"
@@ -107,16 +107,16 @@ elf_machine_dynamic (void)
 static inline Elf32_Addr __attribute__ ((unused))
 static inline Elf32_Addr __attribute__ ((unused))
 elf_machine_load_address (void)
 elf_machine_load_address (void)
 {
 {
-	extern void __dl_start asm ("_dl_start");
+	extern void __dl_start __asm__ ("_dl_start");
 	Elf32_Addr got_addr = (Elf32_Addr) &__dl_start;
 	Elf32_Addr got_addr = (Elf32_Addr) &__dl_start;
 	Elf32_Addr pcrel_addr;
 	Elf32_Addr pcrel_addr;
 #if defined __OPTIMIZE__ && !defined __thumb__
 #if defined __OPTIMIZE__ && !defined __thumb__
-	asm ("adr %0, _dl_start" : "=r" (pcrel_addr));
+	__asm__ ("adr %0, _dl_start" : "=r" (pcrel_addr));
 #else
 #else
 	/* A simple adr does not work in Thumb mode because the offset is
 	/* A simple adr does not work in Thumb mode because the offset is
 	   negative, and for debug builds may be too large.  */
 	   negative, and for debug builds may be too large.  */
 	int tmp;
 	int tmp;
-	asm ("adr %1, 1f\n\t"
+	__asm__ ("adr %1, 1f\n\t"
 		 "ldr %0, [%1]\n\t"
 		 "ldr %0, [%1]\n\t"
 		 "add %0, %0, %1\n\t"
 		 "add %0, %0, %1\n\t"
 		 "b 2f\n\t"
 		 "b 2f\n\t"

+ 1 - 1
ldso/ldso/bfin/dl-inlines.h

@@ -422,7 +422,7 @@ _dl_lookup_address (void const *address)
   struct funcdesc_value const *fd;
   struct funcdesc_value const *fd;
 
 
   /* Make sure we don't make assumptions about its alignment.  */
   /* Make sure we don't make assumptions about its alignment.  */
-  asm ("" : "+r" (address));
+  __asm__ ("" : "+r" (address));
 
 
   if ((Elf32_Addr)address & 7)
   if ((Elf32_Addr)address & 7)
     /* It's not a function descriptor.  */
     /* It's not a function descriptor.  */

+ 1 - 1
ldso/ldso/bfin/dl-syscalls.h

@@ -67,7 +67,7 @@ _dl_mmap(__ptr_t addr, size_t len, int prot, int flags, int fd, __off_t offset)
       if (! _dl_mmap_base)
       if (! _dl_mmap_base)
 	{
 	{
 	  void *stack;
 	  void *stack;
-	  asm ("mov sp, %0" : "=r" (stack));
+	  __asm__ ("mov sp, %0" : "=r" (stack));
 	  _dl_mmap_base = (void *)(((long)stack + 2 * PAGE_SIZE) & -PAGE_SIZE);
 	  _dl_mmap_base = (void *)(((long)stack + 2 * PAGE_SIZE) & -PAGE_SIZE);
 	retry:
 	retry:
 	  if (((void **)_dl_mmap_base)[0] == _dl_mmap_base
 	  if (((void **)_dl_mmap_base)[0] == _dl_mmap_base

+ 3 - 3
ldso/ldso/bfin/dl-sysdep.h

@@ -80,7 +80,7 @@ struct funcdesc_ht;
     do {								\
     do {								\
 	static const char __attribute__((section(".text"))) __s[] = (S); \
 	static const char __attribute__((section(".text"))) __s[] = (S); \
       const char *__p, *__scratch;					\
       const char *__p, *__scratch;					\
-      asm ("call 1f;\n1:\n\t"						\
+      __asm__ ("call 1f;\n1:\n\t"						\
 	   "%1 = RETS;\n\t"						\
 	   "%1 = RETS;\n\t"						\
 	   "%0 = [%3 + 1b@GOT17M4];\n\t"				\
 	   "%0 = [%3 + 1b@GOT17M4];\n\t"				\
 	   "%1 = %1 - %0;\n\t"						\
 	   "%1 = %1 - %0;\n\t"						\
@@ -89,7 +89,7 @@ struct funcdesc_ht;
 	   : "d" (__s), "a" (dl_boot_got_pointer) : "RETS");				\
 	   : "d" (__s), "a" (dl_boot_got_pointer) : "RETS");				\
       SEND_STDERR (__p);						\
       SEND_STDERR (__p);						\
       {	int __t;							\
       {	int __t;							\
-	  for (__t = 0; __t < 0x1000000; __t++) asm volatile ("");	} \
+	  for (__t = 0; __t < 0x1000000; __t++) __asm__ __volatile__ ("");	} \
   } while (0)
   } while (0)
 
 
 #define DL_LOADADDR_TYPE struct elf32_fdpic_loadaddr
 #define DL_LOADADDR_TYPE struct elf32_fdpic_loadaddr
@@ -101,7 +101,7 @@ struct funcdesc_ht;
   ((void(*)(void)) _dl_funcdesc_for ((void*)(ADDR), (LOADADDR).got_value))
   ((void(*)(void)) _dl_funcdesc_for ((void*)(ADDR), (LOADADDR).got_value))
 
 
 #define _dl_stabilize_funcdesc(val) \
 #define _dl_stabilize_funcdesc(val) \
-  ({ asm ("" : "+m" (*(val))); (val); })
+  ({ __asm__ ("" : "+m" (*(val))); (val); })
 
 
 #define DL_CALL_FUNC_AT_ADDR(ADDR, LOADADDR, SIGNATURE, ...) \
 #define DL_CALL_FUNC_AT_ADDR(ADDR, LOADADDR, SIGNATURE, ...) \
   ({ struct funcdesc_value fd = { (void*)(ADDR), (LOADADDR).got_value }; \
   ({ struct funcdesc_value fd = { (void*)(ADDR), (LOADADDR).got_value }; \

+ 1 - 1
ldso/ldso/frv/dl-inlines.h

@@ -406,7 +406,7 @@ _dl_lookup_address (void const *address)
   struct funcdesc_value const *fd;
   struct funcdesc_value const *fd;
 
 
   /* Make sure we don't make assumptions about its alignment.  */
   /* Make sure we don't make assumptions about its alignment.  */
-  asm ("" : "+r" (address));
+  __asm__ ("" : "+r" (address));
 
 
   if ((Elf32_Addr)address & 7)
   if ((Elf32_Addr)address & 7)
     /* It's not a function descriptor.  */
     /* It's not a function descriptor.  */

+ 1 - 1
ldso/ldso/frv/dl-syscalls.h

@@ -53,7 +53,7 @@ _dl_mmap(__ptr_t addr, size_t len, int prot, int flags, int fd, __off_t offset)
       if (! _dl_mmap_base)
       if (! _dl_mmap_base)
 	{
 	{
 	  void *stack;
 	  void *stack;
-	  asm ("mov sp, %0" : "=r" (stack));
+	  __asm__ ("mov sp, %0" : "=r" (stack));
 	  _dl_mmap_base = (void *)(((long)stack + 2 * PAGE_SIZE) & -PAGE_SIZE);
 	  _dl_mmap_base = (void *)(((long)stack + 2 * PAGE_SIZE) & -PAGE_SIZE);
 	retry:
 	retry:
 	  if (((void **)_dl_mmap_base)[0] == _dl_mmap_base
 	  if (((void **)_dl_mmap_base)[0] == _dl_mmap_base

+ 1 - 1
ldso/ldso/frv/dl-sysdep.h

@@ -76,7 +76,7 @@ struct funcdesc_ht;
   ((void(*)(void)) _dl_funcdesc_for ((void*)(ADDR), (LOADADDR).got_value))
   ((void(*)(void)) _dl_funcdesc_for ((void*)(ADDR), (LOADADDR).got_value))
 
 
 #define _dl_stabilize_funcdesc(val) \
 #define _dl_stabilize_funcdesc(val) \
-  ({ asm ("" : "+m" (*(val))); (val); })
+  ({ __asm__ ("" : "+m" (*(val))); (val); })
 
 
 #define DL_CALL_FUNC_AT_ADDR(ADDR, LOADADDR, SIGNATURE, ...) \
 #define DL_CALL_FUNC_AT_ADDR(ADDR, LOADADDR, SIGNATURE, ...) \
   ({ struct funcdesc_value fd = { (void*)(ADDR), (LOADADDR).got_value }; \
   ({ struct funcdesc_value fd = { (void*)(ADDR), (LOADADDR).got_value }; \

+ 2 - 2
ldso/ldso/frv/elfinterp.c

@@ -172,7 +172,7 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope,
 
 
 	reloc_addr   = (unsigned long *)(intptr_t)
 	reloc_addr   = (unsigned long *)(intptr_t)
 	  DL_RELOC_ADDR (rpnt->r_offset, tpnt->loadaddr);
 	  DL_RELOC_ADDR (rpnt->r_offset, tpnt->loadaddr);
-	asm ("" : "=r" (reloc_addr_packed) : "0" (reloc_addr));
+	__asm__ ("" : "=r" (reloc_addr_packed) : "0" (reloc_addr));
 	reloc_type   = ELF32_R_TYPE(rpnt->r_info);
 	reloc_type   = ELF32_R_TYPE(rpnt->r_info);
 	symtab_index = ELF32_R_SYM(rpnt->r_info);
 	symtab_index = ELF32_R_SYM(rpnt->r_info);
 	symbol_addr  = 0;
 	symbol_addr  = 0;
@@ -238,7 +238,7 @@ _dl_do_reloc (struct elf_resolve *tpnt,struct dyn_elf *scope,
 				= symbol_tpnt->loadaddr.got_value;
 				= symbol_tpnt->loadaddr.got_value;
 		else
 		else
 			funcval.got_value = 0;
 			funcval.got_value = 0;
-		asm ("std%I0\t%1, %M0"
+		__asm__ ("std%I0\t%1, %M0"
 		     : "=m" (*(struct funcdesc_value *)reloc_addr)
 		     : "=m" (*(struct funcdesc_value *)reloc_addr)
 		     : "e" (funcval));
 		     : "e" (funcval));
 		break;
 		break;

+ 2 - 2
ldso/ldso/m68k/dl-sysdep.h

@@ -44,7 +44,7 @@ extern unsigned long _dl_linux_resolver (struct elf_resolve *, int);
 static inline Elf32_Addr
 static inline Elf32_Addr
 elf_machine_dynamic (void)
 elf_machine_dynamic (void)
 {
 {
-	register Elf32_Addr *got asm ("%a5");
+	register Elf32_Addr *got __asm__ ("%a5");
 	return *got;
 	return *got;
 }
 }
 
 
@@ -54,7 +54,7 @@ static inline Elf32_Addr
 elf_machine_load_address (void)
 elf_machine_load_address (void)
 {
 {
 	Elf32_Addr addr;
 	Elf32_Addr addr;
-	asm ("lea _dl_start(%%pc), %0\n\t"
+	__asm__ ("lea _dl_start(%%pc), %0\n\t"
 	     "sub.l _dl_start@GOT.w(%%a5), %0"
 	     "sub.l _dl_start@GOT.w(%%a5), %0"
 	     : "=a" (addr));
 	     : "=a" (addr));
 	return addr;
 	return addr;

+ 1 - 1
ldso/ldso/mips/dl-sysdep.h

@@ -196,7 +196,7 @@ static inline ElfW(Addr)
 elf_machine_load_address (void)
 elf_machine_load_address (void)
 {
 {
 	ElfW(Addr) addr;
 	ElfW(Addr) addr;
-	asm ("        .set noreorder\n"
+	__asm__ ("        .set noreorder\n"
 	     "        " STRINGXP (PTR_LA) " %0, 0f\n"
 	     "        " STRINGXP (PTR_LA) " %0, 0f\n"
 	     "        bltzal $0, 0f\n"
 	     "        bltzal $0, 0f\n"
 	     "        nop\n"
 	     "        nop\n"

+ 9 - 9
ldso/ldso/powerpc/dl-sysdep.h

@@ -50,11 +50,11 @@
 #define OPCODE_SLWI(ra,rs,sh) OPCODE_RLWINM(ra,rs,sh,0,31-sh)
 #define OPCODE_SLWI(ra,rs,sh) OPCODE_RLWINM(ra,rs,sh,0,31-sh)
 
 
 
 
-#define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
-#define PPC_SYNC asm volatile ("sync" : : : "memory")
-#define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
-#define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
-#define PPC_DIE asm volatile ("tweq 0,0")
+#define PPC_DCBST(where) __asm__ __volatile__ ("dcbst 0,%0" : : "r"(where) : "memory")
+#define PPC_SYNC __asm__ __volatile__ ("sync" : : : "memory")
+#define PPC_ISYNC __asm__ __volatile__ ("sync; isync" : : : "memory")
+#define PPC_ICBI(where) __asm__ __volatile__ ("icbi 0,%0" : : "r"(where) : "memory")
+#define PPC_DIE __asm__ __volatile__ ("tweq 0,0")
 
 
 /* Here we define the magic numbers that this dynamic loader should accept */
 /* Here we define the magic numbers that this dynamic loader should accept */
 
 
@@ -95,13 +95,13 @@ ppc_got (void)
 {
 {
 	Elf32_Addr *got;
 	Elf32_Addr *got;
 #ifdef HAVE_ASM_PPC_REL16
 #ifdef HAVE_ASM_PPC_REL16
-	asm ("	bcl 20,31,1f\n"
+	__asm__ ("	bcl 20,31,1f\n"
 	     "1:mflr %0\n"
 	     "1:mflr %0\n"
 	     "	addis %0,%0,_GLOBAL_OFFSET_TABLE_-1b@ha\n"
 	     "	addis %0,%0,_GLOBAL_OFFSET_TABLE_-1b@ha\n"
 	     "	addi %0,%0,_GLOBAL_OFFSET_TABLE_-1b@l\n"
 	     "	addi %0,%0,_GLOBAL_OFFSET_TABLE_-1b@l\n"
 	     : "=b" (got) : : "lr");
 	     : "=b" (got) : : "lr");
 #else
 #else
-	asm (" bl _GLOBAL_OFFSET_TABLE_-4@local"
+	__asm__ (" bl _GLOBAL_OFFSET_TABLE_-4@local"
 	     : "=l" (got));
 	     : "=l" (got));
 #endif
 #endif
 	return got;
 	return got;
@@ -130,7 +130,7 @@ elf_machine_load_address (void)
        I think this is so that machines that do bl/blr pairing don't
        I think this is so that machines that do bl/blr pairing don't
        get confused.
        get confused.
 
 
-     asm ("bcl 20,31,0f ;"
+     __asm__ ("bcl 20,31,0f ;"
 	  "0: mflr 0 ;"
 	  "0: mflr 0 ;"
 	  "lis %0,0b@ha;"
 	  "lis %0,0b@ha;"
 	  "addi %0,%0,0b@l;"
 	  "addi %0,%0,0b@l;"
@@ -151,7 +151,7 @@ elf_machine_load_address (void)
      the address ourselves. That gives us the following code: */
      the address ourselves. That gives us the following code: */
 
 
   /* Get address of the 'b _DYNAMIC@local'...  */
   /* Get address of the 'b _DYNAMIC@local'...  */
-  asm ("bcl 20,31,0f;"
+  __asm__ ("bcl 20,31,0f;"
        "b _DYNAMIC@local;"
        "b _DYNAMIC@local;"
        "0:"
        "0:"
        : "=l"(branchaddr));
        : "=l"(branchaddr));

+ 5 - 5
ldso/ldso/sparc/dl-sysdep.h

@@ -109,8 +109,8 @@ sparc_mod(unsigned long m, unsigned long p)
    invoked from functions that have no GOT references, and thus the compiler
    invoked from functions that have no GOT references, and thus the compiler
    has no obligation to load the PIC register.  */
    has no obligation to load the PIC register.  */
 #define LOAD_PIC_REG(PIC_REG)   \
 #define LOAD_PIC_REG(PIC_REG)   \
-do {    register Elf32_Addr pc __asm("o7"); \
-        __asm("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t" \
+do {    register Elf32_Addr pc __asm__("o7"); \
+        __asm__("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t" \
               "call 1f\n\t" \
               "call 1f\n\t" \
               "add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n" \
               "add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n" \
               "1:\tadd %1, %0, %1" \
               "1:\tadd %1, %0, %1" \
@@ -123,7 +123,7 @@ do {    register Elf32_Addr pc __asm("o7"); \
 static inline Elf32_Addr
 static inline Elf32_Addr
 elf_machine_dynamic (void)
 elf_machine_dynamic (void)
 {
 {
-	register Elf32_Addr *got asm ("%l7");
+	register Elf32_Addr *got __asm__ ("%l7");
 	
 	
 	LOAD_PIC_REG (got);
 	LOAD_PIC_REG (got);
 	
 	
@@ -134,9 +134,9 @@ elf_machine_dynamic (void)
 static inline Elf32_Addr
 static inline Elf32_Addr
 elf_machine_load_address (void)
 elf_machine_load_address (void)
 {
 {
-	register Elf32_Addr *pc __asm ("%o7"), *got __asm ("%l7");
+	register Elf32_Addr *pc __asm__ ("%o7"), *got __asm ("%l7");
 	
 	
-	__asm ("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t"
+	__asm__ ("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t"
 	       "call 1f\n\t"
 	       "call 1f\n\t"
 	       " add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t"
 	       " add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t"
 	       "call _DYNAMIC\n\t"
 	       "call _DYNAMIC\n\t"

+ 1 - 1
libc/string/cris/memcpy.c

@@ -130,7 +130,7 @@ void *memcpy(void *pdst,
      here (beware: they may be moved to temporary registers).
      here (beware: they may be moved to temporary registers).
       This way, we do not have to save/move the registers around into
       This way, we do not have to save/move the registers around into
      temporaries; we can safely use them straight away.  */
      temporaries; we can safely use them straight away.  */
-    __asm__ volatile ("\
+    __asm__ __volatile__ ("\
 	.syntax no_register_prefix					\n\
 	.syntax no_register_prefix					\n\
 									\n\
 									\n\
         ;; Check that the register asm declaration got right.		\n\
         ;; Check that the register asm declaration got right.		\n\

+ 1 - 1
libc/string/cris/memset.c

@@ -124,7 +124,7 @@ void *memset(void *pdst,
      here (beware: they may be moved to temporary registers).
      here (beware: they may be moved to temporary registers).
       This way, we do not have to save/move the registers around into
       This way, we do not have to save/move the registers around into
      temporaries; we can safely use them straight away.  */
      temporaries; we can safely use them straight away.  */
-    __asm__ volatile ("								\n\
+    __asm__ __volatile__ ("								\n\
 	.syntax no_register_prefix						\n\
 	.syntax no_register_prefix						\n\
 										\n\
 										\n\
         ;; Check that the register asm declaration got right.			\n\
         ;; Check that the register asm declaration got right.			\n\

+ 1 - 1
libc/string/powerpc/memset.c

@@ -28,7 +28,7 @@ static inline int expand_byte_word(int c){
 	   c = c << 8 | c;
 	   c = c << 8 | c;
 	   c = c << 16 | c ;
 	   c = c << 16 | c ;
 	*/
 	*/
-	asm("rlwimi	%0,%0,8,16,23\n"
+	__asm__("rlwimi	%0,%0,8,16,23\n"
 	    "\trlwimi	%0,%0,16,0,15\n"
 	    "\trlwimi	%0,%0,16,0,15\n"
 	    : "=r" (c) : "0" (c));
 	    : "=r" (c) : "0" (c));
 	return c;
 	return c;

+ 3 - 3
libc/sysdeps/linux/alpha/bits/atomic.h

@@ -363,7 +363,7 @@ typedef uintmax_t uatomic_max_t;
 */
 */
 
 
 #ifndef UP
 #ifndef UP
-# define atomic_full_barrier()	__asm ("mb" : : : "memory");
-# define atomic_read_barrier()	__asm ("mb" : : : "memory");
-# define atomic_write_barrier()	__asm ("wmb" : : : "memory");
+# define atomic_full_barrier()	__asm__ ("mb" : : : "memory");
+# define atomic_read_barrier()	__asm__ ("mb" : : : "memory");
+# define atomic_write_barrier()	__asm__ ("wmb" : : : "memory");
 #endif
 #endif

+ 5 - 5
libc/sysdeps/linux/alpha/bits/mathinline.h

@@ -38,7 +38,7 @@
 # define isunordered(u, v)				\
 # define isunordered(u, v)				\
   (__extension__					\
   (__extension__					\
    ({ double __r, __u = (u), __v = (v);			\
    ({ double __r, __u = (u), __v = (v);			\
-      __asm ("cmptun/su %1,%2,%0\n\ttrapb"		\
+      __asm__ ("cmptun/su %1,%2,%0\n\ttrapb"		\
 	     : "=&f" (__r) : "f" (__u), "f"(__v));	\
 	     : "=&f" (__r) : "f" (__u), "f"(__v));	\
       __r != 0; }))
       __r != 0; }))
 #endif /* ISO C99 */
 #endif /* ISO C99 */
@@ -52,7 +52,7 @@ __MATH_INLINE TYPE							\
 __NTH (NAME (TYPE __x, TYPE __y))					\
 __NTH (NAME (TYPE __x, TYPE __y))					\
 {									\
 {									\
   TYPE __z;								\
   TYPE __z;								\
-  __asm ("cpys %1, %2, %0" : "=f" (__z) : "f" (__y), "f" (__x));	\
+  __asm__ ("cpys %1, %2, %0" : "=f" (__z) : "f" (__y), "f" (__x));	\
   return __z;								\
   return __z;								\
 }
 }
 
 
@@ -71,7 +71,7 @@ __MATH_INLINE TYPE					\
 __NTH (NAME (TYPE __x))					\
 __NTH (NAME (TYPE __x))					\
 {							\
 {							\
   TYPE __z;						\
   TYPE __z;						\
-  __asm ("cpys $f31, %1, %0" : "=f" (__z) : "f" (__x));	\
+  __asm__ ("cpys $f31, %1, %0" : "=f" (__z) : "f" (__x));	\
   return __z;						\
   return __z;						\
 }
 }
 
 
@@ -101,7 +101,7 @@ __NTH (__floorf (float __x))
 
 
       float __tmp1, __tmp2;
       float __tmp1, __tmp2;
 
 
-      __asm ("cvtst/s %3,%2\n\t"
+      __asm__ ("cvtst/s %3,%2\n\t"
 #ifdef _IEEE_FP_INEXACT
 #ifdef _IEEE_FP_INEXACT
 	     "cvttq/svim %2,%1\n\t"
 	     "cvttq/svim %2,%1\n\t"
 #else
 #else
@@ -120,7 +120,7 @@ __NTH (__floor (double __x))
   if (__x != 0 && fabs (__x) < 9007199254740992.0)  /* 1 << DBL_MANT_DIG */
   if (__x != 0 && fabs (__x) < 9007199254740992.0)  /* 1 << DBL_MANT_DIG */
     {
     {
       double __tmp1;
       double __tmp1;
-      __asm (
+      __asm__ (
 #ifdef _IEEE_FP_INEXACT
 #ifdef _IEEE_FP_INEXACT
 	     "cvttq/svim %2,%1\n\t"
 	     "cvttq/svim %2,%1\n\t"
 #else
 #else

+ 1 - 1
libc/sysdeps/linux/cris/__init_brk.c

@@ -15,7 +15,7 @@ __init_brk (void)
 	    /* Notice that we don't need to save/restore the GOT
 	    /* Notice that we don't need to save/restore the GOT
 	     * register since that is not call clobbered by the syscall.
 	     * register since that is not call clobbered by the syscall.
 	     */
 	     */
-	    asm ("clear.d $r10\n\t"
+	    __asm__ ("clear.d $r10\n\t"
 		 "movu.w " STR(__NR_brk) ",$r9\n\t"
 		 "movu.w " STR(__NR_brk) ",$r9\n\t"
 		 "break 13\n\t"
 		 "break 13\n\t"
 		 "move.d $r10, %0"
 		 "move.d $r10, %0"

+ 3 - 3
libc/sysdeps/linux/cris/bits/syscalls.h

@@ -66,11 +66,11 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \
 #define INLINE_SYSCALL(name, nr, args...)	\
 #define INLINE_SYSCALL(name, nr, args...)	\
   ({						\
   ({						\
      unsigned long __sys_res;			\
      unsigned long __sys_res;			\
-     register unsigned long __res asm ("r10");	\
+     register unsigned long __res __asm__ ("r10");	\
      LOAD_ARGS_c_##nr (args)			\
      LOAD_ARGS_c_##nr (args)			\
-     register unsigned long __callno asm ("r9")	\
+     register unsigned long __callno __asm__ ("r9")	\
        = SYS_ify (name);			\
        = SYS_ify (name);			\
-     asm volatile (LOAD_ARGS_asm_##nr (args)	\
+     __asm__ __volatile__ (LOAD_ARGS_asm_##nr (args)	\
 		   CHECK_ARGS_asm_##nr		\
 		   CHECK_ARGS_asm_##nr		\
 		   "break 13"			\
 		   "break 13"			\
 		   : "=r" (__res)		\
 		   : "=r" (__res)		\

+ 1 - 1
libc/sysdeps/linux/cris/brk.c

@@ -20,7 +20,7 @@ int brk(void * end_data_seg)
 		 * Notice that we don't need to save/restore the GOT
 		 * Notice that we don't need to save/restore the GOT
 		 * register since that is not call clobbered by the syscall.
 		 * register since that is not call clobbered by the syscall.
 		 */
 		 */
-		asm ("move.d %1,$r10\n\t"
+		__asm__ ("move.d %1,$r10\n\t"
 		     "movu.w " STR(__NR_brk) ",$r9\n\t"
 		     "movu.w " STR(__NR_brk) ",$r9\n\t"
 		     "break 13\n\t"
 		     "break 13\n\t"
 		     "move.d $r10, %0"
 		     "move.d $r10, %0"

+ 1 - 1
libc/sysdeps/linux/cris/sbrk.c

@@ -24,7 +24,7 @@ sbrk(intptr_t increment)
 		 * Notice that we don't need to save/restore the GOT
 		 * Notice that we don't need to save/restore the GOT
 		 * register since that is not call clobbered by the syscall.
 		 * register since that is not call clobbered by the syscall.
 		 */
 		 */
-		asm ("move.d %1,$r10\n\t"
+		__asm__ ("move.d %1,$r10\n\t"
 		     "movu.w " STR(__NR_brk) ",$r9\n\t"
 		     "movu.w " STR(__NR_brk) ",$r9\n\t"
 		     "break 13\n\t"
 		     "break 13\n\t"
 		     "move.d $r10, %0"
 		     "move.d $r10, %0"

+ 18 - 18
libc/sysdeps/linux/e1/bits/fenvinline.h

@@ -57,7 +57,7 @@
 #define fegetround()                     \
 #define fegetround()                     \
 ({                                       \
 ({                                       \
 	unsigned int tmp;                \
 	unsigned int tmp;                \
-	asm volatile("mov %0, SR"        \
+	__asm__ __volatile__("mov %0, SR"        \
 			:"=l"(tmp)       \
 			:"=l"(tmp)       \
 			:/*no input*/);  \
 			:/*no input*/);  \
 	tmp &= (3<<13);                  \
 	tmp &= (3<<13);                  \
@@ -70,7 +70,7 @@
 	unsigned int tmp = (3 << 13);    \
 	unsigned int tmp = (3 << 13);    \
 	while(1) {                       \
 	while(1) {                       \
 	/* Clear SR.FRM field */         \
 	/* Clear SR.FRM field */         \
-	asm volatile("andn SR, %0"       \
+	__asm__ __volatile__("andn SR, %0"       \
 			:/*no output*/   \
 			:/*no output*/   \
 			:"l"(tmp) );     \
 			:"l"(tmp) );     \
 	tmp &= round;                    \
 	tmp &= round;                    \
@@ -80,7 +80,7 @@
 		break;                   \
 		break;                   \
 	}                                \
 	}                                \
                                          \
                                          \
-	asm volatile("or SR, %0"         \
+	__asm__ __volatile__("or SR, %0"         \
 			:/*no input*/    \
 			:/*no input*/    \
 			:"l"(round) );   \
 			:"l"(round) );   \
 	tmp = 0;                         \
 	tmp = 0;                         \
@@ -100,7 +100,7 @@ static inline feclearexcept(int __excepts)
 	if( __excepts & (~0x1F00) )
 	if( __excepts & (~0x1F00) )
 		return -1;
 		return -1;
 
 
-	asm volatile("mov %0, SR"
+	__asm__ __volatile__("mov %0, SR"
 		     :"=l"(enabled_excepts)
 		     :"=l"(enabled_excepts)
 		     :/*no input*/ ); 
 		     :/*no input*/ ); 
 
 
@@ -112,7 +112,7 @@ static inline feclearexcept(int __excepts)
 	disabled_excepts &= __excepts;
 	disabled_excepts &= __excepts;
 
 
 	/* Clear accrued exceptions */
 	/* Clear accrued exceptions */
-	asm volatile("andn G2, %0\n\t"
+	__asm__ __volatile__("andn G2, %0\n\t"
 		     "andn G2, %1\n\t"
 		     "andn G2, %1\n\t"
 			:/*no output*/
 			:/*no output*/
 			:"l"(enabled_excepts),
 			:"l"(enabled_excepts),
@@ -133,7 +133,7 @@ inline int fetestexcept(int __excepts)
 	if( __excepts & (~0x1F00) )
 	if( __excepts & (~0x1F00) )
 		return -1;
 		return -1;
 
 
-	asm volatile("mov %0, SR"
+	__asm__ __volatile__("mov %0, SR"
 		     :"=l"(enabled_excepts)
 		     :"=l"(enabled_excepts)
 		     :/*no input*/ ); 
 		     :/*no input*/ ); 
 
 
@@ -141,7 +141,7 @@ inline int fetestexcept(int __excepts)
 	disabled_excepts = ~enabled_excepts;
 	disabled_excepts = ~enabled_excepts;
 	disabled_excepts &= 0x1F00;
 	disabled_excepts &= 0x1F00;
 
 
- 	asm volatile("mov %0, G2"
+ 	__asm__ __volatile__("mov %0, G2"
 		    :"=l"(G2)
 		    :"=l"(G2)
 		    :/*no input*/ );
 		    :/*no input*/ );
 
 
@@ -154,7 +154,7 @@ inline int fetestexcept(int __excepts)
 
 
 static inline int feraiseexcept(int __excepts)
 static inline int feraiseexcept(int __excepts)
 {
 {
-	asm volatile("or G2, %0"
+	__asm__ __volatile__("or G2, %0"
 			:/*no output*/
 			:/*no output*/
 			:"l"( __excepts >> 8  ) );
 			:"l"( __excepts >> 8  ) );
 	return 0;
 	return 0;
@@ -169,7 +169,7 @@ static inline int feraiseexcept(int __excepts)
 	int __tmpexcepts = __excepts;      \
 	int __tmpexcepts = __excepts;      \
                                            \
                                            \
 	while(1) {                         \
 	while(1) {                         \
-	    asm volatile("mov %0, SR"      \
+	    __asm__ __volatile__("mov %0, SR"      \
 		     :"=l"(__pexcepts)     \
 		     :"=l"(__pexcepts)     \
 		     :/*no input*/ );      \
 		     :/*no input*/ );      \
 	    __pexcepts &= 0x1F00;          \
 	    __pexcepts &= 0x1F00;          \
@@ -181,7 +181,7 @@ static inline int feraiseexcept(int __excepts)
 	        break;                     \
 	        break;                     \
 	    }                              \
 	    }                              \
 	                                   \
 	                                   \
-	    asm volatile("or SR, %0"       \
+	    __asm__ __volatile__("or SR, %0"       \
 			:/*no output*/     \
 			:/*no output*/     \
 			:"l"(__tmpexcepts) ); \
 			:"l"(__tmpexcepts) ); \
 	    __retval = __pexcepts;         \
 	    __retval = __pexcepts;         \
@@ -197,7 +197,7 @@ static inline int feraiseexcept(int __excepts)
 	int __tmpexcepts = __excepts;      \
 	int __tmpexcepts = __excepts;      \
 	                                   \
 	                                   \
 	while(1) {                         \
 	while(1) {                         \
-	    asm volatile("mov %0, SR"      \
+	    __asm__ __volatile__("mov %0, SR"      \
 		     :"=l"(__pexcepts)     \
 		     :"=l"(__pexcepts)     \
 		     :/*no input*/ );      \
 		     :/*no input*/ );      \
 	    __pexcepts &= 0x1F00;          \
 	    __pexcepts &= 0x1F00;          \
@@ -209,7 +209,7 @@ static inline int feraiseexcept(int __excepts)
 	        break;                     \
 	        break;                     \
 	    }                              \
 	    }                              \
 	                                   \
 	                                   \
-	    asm volatile("andn SR, %0"     \
+	    __asm__ __volatile__("andn SR, %0"     \
 			:/*no output*/     \
 			:/*no output*/     \
 			:"l"(__tmpexcepts) ); \
 			:"l"(__tmpexcepts) ); \
 	    __retval = __pexcepts;         \
 	    __retval = __pexcepts;         \
@@ -221,7 +221,7 @@ static inline int feraiseexcept(int __excepts)
 static inline int fegetexcept(int excepts)
 static inline int fegetexcept(int excepts)
 {
 {
 	unsigned int tmp;
 	unsigned int tmp;
-	asm volatile("mov %0, SR"
+	__asm__ __volatile__("mov %0, SR"
 		    :"=l"(tmp)
 		    :"=l"(tmp)
 		    :/*no input*/ );
 		    :/*no input*/ );
 	tmp &= 0x1F00;
 	tmp &= 0x1F00;
@@ -230,7 +230,7 @@ static inline int fegetexcept(int excepts)
 
 
 static inline int fegetenv(fenv_t *envp)
 static inline int fegetenv(fenv_t *envp)
 {
 {
-	asm volatile("mov %0, SR\n\t
+	__asm__ __volatile__("mov %0, SR\n\t
 		      mov %1, SR\n\t
 		      mov %1, SR\n\t
 		      mov %2, G2\n\t
 		      mov %2, G2\n\t
 		      mov %3, G2\n\t"
 		      mov %3, G2\n\t"
@@ -258,14 +258,14 @@ static inline int fegetenv(fenv_t *envp)
 ({                                                  \
 ({                                                  \
 	/* Clear FRM & FTE field of SR */           \
 	/* Clear FRM & FTE field of SR */           \
 	unsigned long clearSR = ( 127<<8 );         \
 	unsigned long clearSR = ( 127<<8 );         \
-	asm volatile("andn SR, %0\n\t"              \
+	__asm__ __volatile__("andn SR, %0\n\t"              \
 		     "or   SR, %1\n\t"              \
 		     "or   SR, %1\n\t"              \
 		     "or   SR, %2\n\t"              \
 		     "or   SR, %2\n\t"              \
 		     :/*no output*/                 \
 		     :/*no output*/                 \
 		     :"l"(clearSR),                 \
 		     :"l"(clearSR),                 \
 		      "l"(envp->round_mode),        \
 		      "l"(envp->round_mode),        \
 		      "l"(envp->trap_enabled) );    \
 		      "l"(envp->trap_enabled) );    \
-	asm volatile("andn G2, 0x1F1F\n\t"          \
+	__asm__ __volatile__("andn G2, 0x1F1F\n\t"          \
 		     "or   G2, %0\n\t"              \
 		     "or   G2, %0\n\t"              \
 		     "or   G2, %1\n\t"              \
 		     "or   G2, %1\n\t"              \
 		     :/*no output*/                 \
 		     :/*no output*/                 \
@@ -277,14 +277,14 @@ static inline int fegetenv(fenv_t *envp)
 #define feupdateenv(envp)                           \
 #define feupdateenv(envp)                           \
 ({                                                  \
 ({                                                  \
 	/* Clear FRM & FTE field of SR */           \
 	/* Clear FRM & FTE field of SR */           \
-	asm volatile(/* We dont clear the prev SR*/ \
+	__asm__ __volatile__(/* We dont clear the prev SR*/ \
 		     "or   SR, %1\n\t"              \
 		     "or   SR, %1\n\t"              \
 		     "or   SR, %2\n\t"              \
 		     "or   SR, %2\n\t"              \
 		     :/*no output*/                 \
 		     :/*no output*/                 \
 		     :"l"(clearSR),                 \
 		     :"l"(clearSR),                 \
 		      "l"(envp->round_mode),        \
 		      "l"(envp->round_mode),        \
 		      "l"(envp->accrued_except) );  \
 		      "l"(envp->accrued_except) );  \
-	asm volatile(/* We dont clear the prev SR*/ \
+	__asm__ __volatile__(/* We dont clear the prev SR*/ \
 		     "or   G2, %0\n\t"              \
 		     "or   G2, %0\n\t"              \
 		     "or   G2, %1\n\t"              \
 		     "or   G2, %1\n\t"              \
 		     :/*no output*/                 \
 		     :/*no output*/                 \

+ 2 - 2
libc/sysdeps/linux/e1/longjmp.c

@@ -27,7 +27,7 @@ void longjmp(jmp_buf state, int value )
 	e1newSP(state->__jmpbuf->SavedSP);
 	e1newSP(state->__jmpbuf->SavedSP);
 
 
 #define _state_ ((struct __jmp_buf_tag*)jmpbuf_ptr)
 #define _state_ ((struct __jmp_buf_tag*)jmpbuf_ptr)
-	asm volatile("mov L0, %0\n\t"
+	__asm__ __volatile__("mov L0, %0\n\t"
 		     "mov L1, %1\n\t"
 		     "mov L1, %1\n\t"
 		     "mov L2, %2\n\t"
 		     "mov L2, %2\n\t"
 		     "mov G3, %3\n\t"
 		     "mov G3, %3\n\t"
@@ -60,7 +60,7 @@ void siglongjmp(sigjmp_buf state, int value )
 	
 	
 
 
 #define _state_ ((struct __jmp_buf_tag*)jmpbuf_ptr)
 #define _state_ ((struct __jmp_buf_tag*)jmpbuf_ptr)
-	asm volatile("mov L0, %0\n\t"
+	__asm__ __volatile__("mov L0, %0\n\t"
 		     "mov L1, %1\n\t"
 		     "mov L1, %1\n\t"
 		     "mov L2, %2\n\t"
 		     "mov L2, %2\n\t"
 		     "mov G3, %3\n\t"
 		     "mov G3, %3\n\t"

+ 4 - 4
libc/sysdeps/linux/e1/setjmp.c

@@ -11,14 +11,14 @@ libc_hidden_proto(sigprocmask)
 
 
 int setjmp( jmp_buf state)
 int setjmp( jmp_buf state)
 {
 {
-	asm volatile(	"mov %0, G3\n\t"           
+	__asm__ __volatile__(	"mov %0, G3\n\t"           
 			"mov %1, G4\n\t" 
 			"mov %1, G4\n\t" 
 			:"=l"(state->__jmpbuf->G3), 
 			:"=l"(state->__jmpbuf->G3), 
 			 "=l"(state->__jmpbuf->G4) 
 			 "=l"(state->__jmpbuf->G4) 
 			:/*no input*/ 
 			:/*no input*/ 
 			:"%G3", "%G4" );
 			:"%G3", "%G4" );
 
 
-	asm volatile(   "setadr  %0\n\t"
+	__asm__ __volatile__(   "setadr  %0\n\t"
 			"mov %1, L1\n\t"
 			"mov %1, L1\n\t"
 			"mov %2, L2\n\t"
 			"mov %2, L2\n\t"
 			:"=l"(state->__jmpbuf->SavedSP),
 			:"=l"(state->__jmpbuf->SavedSP),
@@ -38,14 +38,14 @@ int sigsetjmp( sigjmp_buf state , int savesigs)
 	} else
 	} else
 		state->__mask_was_saved = 0;
 		state->__mask_was_saved = 0;
 
 
-	asm volatile(	"mov %0, G3\n\t"           
+	__asm__ __volatile__(	"mov %0, G3\n\t"           
 			"mov %1, G4\n\t" 
 			"mov %1, G4\n\t" 
 			:"=l"(state->__jmpbuf->G3), 
 			:"=l"(state->__jmpbuf->G3), 
 			 "=l"(state->__jmpbuf->G4) 
 			 "=l"(state->__jmpbuf->G4) 
 			:/*no input*/ 
 			:/*no input*/ 
 			:"%G3", "%G4" );
 			:"%G3", "%G4" );
 
 
-	asm volatile(   "setadr  %0\n\t"
+	__asm__ __volatile__(   "setadr  %0\n\t"
 			"mov %1, L2\n\t"
 			"mov %1, L2\n\t"
 			"mov %2, L3\n\t"
 			"mov %2, L3\n\t"
 			:"=l"(state->__jmpbuf->SavedSP),
 			:"=l"(state->__jmpbuf->SavedSP),

+ 1 - 1
libc/sysdeps/linux/h8300/brk.c

@@ -17,7 +17,7 @@ int brk (void *addr)
 {
 {
     void *newbrk;
     void *newbrk;
 
 
-    asm ("mov.l %2,er1\n\t"
+    __asm__ ("mov.l %2,er1\n\t"
 	 "mov.l %1,er0\n\t"
 	 "mov.l %1,er0\n\t"
 	 "trapa #0\n\t"
 	 "trapa #0\n\t"
 	 "mov.l er0,%0"
 	 "mov.l er0,%0"

+ 1 - 1
libc/sysdeps/linux/h8300/ptrace.c

@@ -11,7 +11,7 @@ ptrace(int request, int pid, int addr, int data)
 
 
 	if (request > 0 && request < 4) data = (int)&ret;
 	if (request > 0 && request < 4) data = (int)&ret;
 
 
-	__asm__ volatile ("sub.l er0,er0\n\t"
+	__asm__ __volatile__ ("sub.l er0,er0\n\t"
                           "mov.b %1,r0l\n\t"
                           "mov.b %1,r0l\n\t"
 			  "mov.l %2,er1\n\t"
 			  "mov.l %2,er1\n\t"
 			  "mov.l %3,er2\n\t"
 			  "mov.l %3,er2\n\t"

+ 2 - 2
libc/sysdeps/linux/hppa/bits/syscalls.h

@@ -48,10 +48,10 @@
 #define K_INLINE_SYSCALL(name, nr, args...)	({			\
 #define K_INLINE_SYSCALL(name, nr, args...)	({			\
 	long __sys_res;							\
 	long __sys_res;							\
 	{								\
 	{								\
-		register unsigned long __res asm("r28");		\
+		register unsigned long __res __asm__("r28");		\
 		K_LOAD_ARGS_##nr(args)					\
 		K_LOAD_ARGS_##nr(args)					\
 		/* FIXME: HACK stw/ldw r19 around syscall */		\
 		/* FIXME: HACK stw/ldw r19 around syscall */		\
-		asm volatile(						\
+		__asm__ __volatile__(						\
 			K_STW_ASM_PIC					\
 			K_STW_ASM_PIC					\
 			"	ble  0x100(%%sr2, %%r0)\n"		\
 			"	ble  0x100(%%sr2, %%r0)\n"		\
 			"	ldi %1, %%r20\n"			\
 			"	ldi %1, %%r20\n"			\

+ 2 - 2
libc/sysdeps/linux/hppa/syscall.c

@@ -45,9 +45,9 @@ syscall (long int __sysno, ...)
   va_end (args);
   va_end (args);
   
   
   {
   {
-    register unsigned long int __res asm("r28");
+    register unsigned long int __res __asm__("r28");
     K_LOAD_ARGS_6 (arg0, arg1, arg2, arg3, arg4, arg5)
     K_LOAD_ARGS_6 (arg0, arg1, arg2, arg3, arg4, arg5)
-    asm volatile (K_STW_ASM_PIC
+    __asm__ __volatile__ (K_STW_ASM_PIC
 		  "	ble  0x100(%%sr2, %%r0)	\n"
 		  "	ble  0x100(%%sr2, %%r0)	\n"
 		  "	copy %1, %%r20		\n"
 		  "	copy %1, %%r20		\n"
 		  K_LDW_ASM_PIC
 		  K_LDW_ASM_PIC

+ 1 - 1
libc/sysdeps/linux/i386/bits/atomic.h

@@ -366,4 +366,4 @@ typedef uintmax_t uatomic_max_t;
      __result; })
      __result; })
 
 
 
 
-#define atomic_delay() asm ("rep; nop")
+#define atomic_delay() __asm__ ("rep; nop")

+ 1 - 1
libc/sysdeps/linux/i386/bits/syscalls.h

@@ -165,7 +165,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \
 #define INTERNAL_SYSCALL(name, err, nr, args...) \
 #define INTERNAL_SYSCALL(name, err, nr, args...) \
   ({                                                                          \
   ({                                                                          \
     register unsigned int resultvar;                                          \
     register unsigned int resultvar;                                          \
-    asm volatile (                                                            \
+    __asm__ __volatile__ (                                                            \
     LOADARGS_##nr                                                             \
     LOADARGS_##nr                                                             \
     "movl %1, %%eax\n\t"                                                      \
     "movl %1, %%eax\n\t"                                                      \
     "int $0x80\n\t"                                                           \
     "int $0x80\n\t"                                                           \

+ 1 - 1
libc/sysdeps/linux/ia64/__syscall_error.c

@@ -13,7 +13,7 @@
 int __syscall_error(void) attribute_hidden;
 int __syscall_error(void) attribute_hidden;
 int __syscall_error(void)
 int __syscall_error(void)
 {
 {
-	register int err_no asm("%r8");
+	register int err_no __asm__("%r8");
 	__set_errno(err_no);
 	__set_errno(err_no);
 	return -1;
 	return -1;
 }
 }

+ 9 - 9
libc/sysdeps/linux/ia64/bits/syscalls.h

@@ -40,9 +40,9 @@
 
 
 #define _DO_SYSCALL(name, nr, args...) \
 #define _DO_SYSCALL(name, nr, args...) \
     LOAD_ARGS_##nr (args) \
     LOAD_ARGS_##nr (args) \
-    register long _r8 asm ("r8"); \
-    register long _r10 asm ("r10"); \
-    register long _r15 asm ("r15") = SYS_ify(name); \
+    register long _r8 __asm__ ("r8"); \
+    register long _r10 __asm__ ("r10"); \
+    register long _r15 __asm__ ("r15") = SYS_ify(name); \
     long _retval; \
     long _retval; \
     LOAD_REGS_##nr \
     LOAD_REGS_##nr \
     __asm __volatile ("break " ___IA64_BREAK_SYSCALL ";;\n\t" \
     __asm __volatile ("break " ___IA64_BREAK_SYSCALL ";;\n\t" \
@@ -61,37 +61,37 @@
   long _arg1 = (long) (a1);				\
   long _arg1 = (long) (a1);				\
   LOAD_ARGS_0 ()
   LOAD_ARGS_0 ()
 #define LOAD_REGS_1					\
 #define LOAD_REGS_1					\
-  register long _out0 asm ("out0") = _arg1;		\
+  register long _out0 __asm__ ("out0") = _arg1;		\
   LOAD_REGS_0
   LOAD_REGS_0
 #define LOAD_ARGS_2(a1, a2)				\
 #define LOAD_ARGS_2(a1, a2)				\
   long _arg2 = (long) (a2);				\
   long _arg2 = (long) (a2);				\
   LOAD_ARGS_1 (a1)
   LOAD_ARGS_1 (a1)
 #define LOAD_REGS_2					\
 #define LOAD_REGS_2					\
-  register long _out1 asm ("out1") = _arg2;		\
+  register long _out1 __asm__ ("out1") = _arg2;		\
   LOAD_REGS_1
   LOAD_REGS_1
 #define LOAD_ARGS_3(a1, a2, a3)				\
 #define LOAD_ARGS_3(a1, a2, a3)				\
   long _arg3 = (long) (a3);				\
   long _arg3 = (long) (a3);				\
   LOAD_ARGS_2 (a1, a2)
   LOAD_ARGS_2 (a1, a2)
 #define LOAD_REGS_3					\
 #define LOAD_REGS_3					\
-  register long _out2 asm ("out2") = _arg3;		\
+  register long _out2 __asm__ ("out2") = _arg3;		\
   LOAD_REGS_2
   LOAD_REGS_2
 #define LOAD_ARGS_4(a1, a2, a3, a4)			\
 #define LOAD_ARGS_4(a1, a2, a3, a4)			\
   long _arg4 = (long) (a4);				\
   long _arg4 = (long) (a4);				\
   LOAD_ARGS_3 (a1, a2, a3)
   LOAD_ARGS_3 (a1, a2, a3)
 #define LOAD_REGS_4					\
 #define LOAD_REGS_4					\
-  register long _out3 asm ("out3") = _arg4;		\
+  register long _out3 __asm__ ("out3") = _arg4;		\
   LOAD_REGS_3
   LOAD_REGS_3
 #define LOAD_ARGS_5(a1, a2, a3, a4, a5)			\
 #define LOAD_ARGS_5(a1, a2, a3, a4, a5)			\
   long _arg5 = (long) (a5);				\
   long _arg5 = (long) (a5);				\
   LOAD_ARGS_4 (a1, a2, a3, a4)
   LOAD_ARGS_4 (a1, a2, a3, a4)
 #define LOAD_REGS_5					\
 #define LOAD_REGS_5					\
-  register long _out4 asm ("out4") = _arg5;		\
+  register long _out4 __asm__ ("out4") = _arg5;		\
   LOAD_REGS_4
   LOAD_REGS_4
 #define LOAD_ARGS_6(a1, a2, a3, a4, a5, a6)		\
 #define LOAD_ARGS_6(a1, a2, a3, a4, a5, a6)		\
   long _arg6 = (long) (a6);	    			\
   long _arg6 = (long) (a6);	    			\
   LOAD_ARGS_5 (a1, a2, a3, a4, a5)
   LOAD_ARGS_5 (a1, a2, a3, a4, a5)
 #define LOAD_REGS_6					\
 #define LOAD_REGS_6					\
-  register long _out5 asm ("out5") = _arg6;		\
+  register long _out5 __asm__ ("out5") = _arg6;		\
   LOAD_REGS_5
   LOAD_REGS_5
 
 
 #define ASM_OUTARGS_0
 #define ASM_OUTARGS_0

+ 1 - 1
libc/sysdeps/linux/m68k/__syscall_error.c

@@ -13,7 +13,7 @@
 int __syscall_error(void) attribute_hidden;
 int __syscall_error(void) attribute_hidden;
 int __syscall_error(void)
 int __syscall_error(void)
 {
 {
-	register int err_no asm("%d0");
+	register int err_no __asm__("%d0");
 	__set_errno(-err_no);
 	__set_errno(-err_no);
 	return -1;
 	return -1;
 }
 }

+ 8 - 8
libc/sysdeps/linux/m68k/bits/mathinline.h

@@ -121,7 +121,7 @@
   __m81_defun (float_type, func, (float_type __mathop_x))		      \
   __m81_defun (float_type, func, (float_type __mathop_x))		      \
   {									      \
   {									      \
     float_type __result;						      \
     float_type __result;						      \
-    __asm("f" __STRING(op) "%.x %1, %0" : "=f" (__result) : "f" (__mathop_x));\
+    __asm__("f" __STRING(op) "%.x %1, %0" : "=f" (__result) : "f" (__mathop_x));\
     return __result;							      \
     return __result;							      \
   }
   }
 
 
@@ -222,7 +222,7 @@ __m81_defun (int, __CONCAT(__isinf,s), (float_type __value))	  	  \
   /* There is no branch-condition for infinity,				  \
   /* There is no branch-condition for infinity,				  \
      so we must extract and examine the condition codes manually.  */	  \
      so we must extract and examine the condition codes manually.  */	  \
   unsigned long int __fpsr;						  \
   unsigned long int __fpsr;						  \
-  __asm("ftst%.x %1\n"							  \
+  __asm__("ftst%.x %1\n"							  \
 	"fmove%.l %/fpsr, %0" : "=dm" (__fpsr) : "f" (__value));	  \
 	"fmove%.l %/fpsr, %0" : "=dm" (__fpsr) : "f" (__value));	  \
   return (__fpsr & (2 << 24)) ? (__fpsr & (8 << 24) ? -1 : 1) : 0;	  \
   return (__fpsr & (2 << 24)) ? (__fpsr & (8 << 24) ? -1 : 1) : 0;	  \
 }									  \
 }									  \
@@ -232,7 +232,7 @@ __m81_defun (int, __CONCAT(__finite,s), (float_type __value))	  	  \
   /* There is no branch-condition for infinity, so we must extract and	  \
   /* There is no branch-condition for infinity, so we must extract and	  \
      examine the condition codes manually.  */				  \
      examine the condition codes manually.  */				  \
   unsigned long int __fpsr;						  \
   unsigned long int __fpsr;						  \
-  __asm ("ftst%.x %1\n"							  \
+  __asm__ ("ftst%.x %1\n"							  \
 	 "fmove%.l %/fpsr, %0" : "=dm" (__fpsr) : "f" (__value));	  \
 	 "fmove%.l %/fpsr, %0" : "=dm" (__fpsr) : "f" (__value));	  \
   return (__fpsr & (3 << 24)) == 0;					  \
   return (__fpsr & (3 << 24)) == 0;					  \
 }									  \
 }									  \
@@ -241,7 +241,7 @@ __m81_defun (float_type, __CONCAT(__scalbn,s),				  \
 	     (float_type __x, int __n))					  \
 	     (float_type __x, int __n))					  \
 {									  \
 {									  \
   float_type __result;							  \
   float_type __result;							  \
-  __asm ("fscale%.l %1, %0" : "=f" (__result) : "dmi" (__n), "0" (__x));  \
+  __asm__ ("fscale%.l %1, %0" : "=f" (__result) : "dmi" (__n), "0" (__x));  \
   return __result;							  \
   return __result;							  \
 }
 }
 
 
@@ -258,7 +258,7 @@ __inline_functions(long double,l)
 __m81_defun (int, __CONCAT(__isnan,s), (float_type __value))	  	  \
 __m81_defun (int, __CONCAT(__isnan,s), (float_type __value))	  	  \
 {									  \
 {									  \
   char __result;							  \
   char __result;							  \
-  __asm("ftst%.x %1\n"							  \
+  __asm__("ftst%.x %1\n"							  \
 	"fsun %0" : "=dm" (__result) : "f" (__value));			  \
 	"fsun %0" : "=dm" (__result) : "f" (__value));			  \
   return __result;							  \
   return __result;							  \
 }
 }
@@ -280,7 +280,7 @@ __m81_defun (int, __CONCAT(__signbit,s), (float_type __value))	  	  \
   /* There is no branch-condition for the sign bit, so we must extract	  \
   /* There is no branch-condition for the sign bit, so we must extract	  \
      and examine the condition codes manually.  */			  \
      and examine the condition codes manually.  */			  \
   unsigned long int __fpsr;						  \
   unsigned long int __fpsr;						  \
-  __asm ("ftst%.x %1\n"							  \
+  __asm__ ("ftst%.x %1\n"							  \
 	 "fmove%.l %/fpsr, %0" : "=dm" (__fpsr) : "f" (__value));	  \
 	 "fmove%.l %/fpsr, %0" : "=dm" (__fpsr) : "f" (__value));	  \
   return (__fpsr >> 27) & 1;						  \
   return (__fpsr >> 27) & 1;						  \
 }									  \
 }									  \
@@ -308,7 +308,7 @@ __m81_defun (float_type, __CONCAT(__nearbyint,s), (float_type __x))	  \
 __m81_defun (long int, __CONCAT(__lrint,s), (float_type __x))		  \
 __m81_defun (long int, __CONCAT(__lrint,s), (float_type __x))		  \
 {									  \
 {									  \
   long int __result;							  \
   long int __result;							  \
-  __asm ("fmove%.l %1, %0" : "=dm" (__result) : "f" (__x));		  \
+  __asm__ ("fmove%.l %1, %0" : "=dm" (__result) : "f" (__x));		  \
   return __result;							  \
   return __result;							  \
 }									  \
 }									  \
 									  \
 									  \
@@ -333,7 +333,7 @@ __m81_inline void							\
 __m81_u(__CONCAT(__sincos,s))(float_type __x, float_type *__sinx,	\
 __m81_u(__CONCAT(__sincos,s))(float_type __x, float_type *__sinx,	\
 			      float_type *__cosx)			\
 			      float_type *__cosx)			\
 {									\
 {									\
-  __asm ("fsincos%.x %2,%1:%0"						\
+  __asm__ ("fsincos%.x %2,%1:%0"						\
 	 : "=f" (*__sinx), "=f" (*__cosx) : "f" (__x));			\
 	 : "=f" (*__sinx), "=f" (*__cosx) : "f" (__x));			\
 }
 }
 
 

+ 1 - 1
libc/sysdeps/linux/m68k/brk.c

@@ -18,7 +18,7 @@ int brk (void *addr)
 {
 {
     void *newbrk;
     void *newbrk;
 
 
-	__asm__ volatile ("movel %2,%/d1\n\t"
+	__asm__ __volatile__ ("movel %2,%/d1\n\t"
 			  "moveq %1,%/d0\n\t"
 			  "moveq %1,%/d0\n\t"
 			  "trap  #0\n\t"
 			  "trap  #0\n\t"
 			  "movel %/d0,%0"
 			  "movel %/d0,%0"

+ 1 - 1
libc/sysdeps/linux/m68k/fpu_control.h

@@ -93,7 +93,7 @@ typedef unsigned int fpu_control_t __attribute__ ((__mode__ (__SI__)));
 
 
 /* Macros for accessing the hardware control word.  */
 /* Macros for accessing the hardware control word.  */
 #define _FPU_GETCW(cw) __asm__ ("fmove%.l %!, %0" : "=dm" (cw))
 #define _FPU_GETCW(cw) __asm__ ("fmove%.l %!, %0" : "=dm" (cw))
-#define _FPU_SETCW(cw) __asm__ volatile ("fmove%.l %0, %!" : : "dm" (cw))
+#define _FPU_SETCW(cw) __asm__ __volatile__ ("fmove%.l %0, %!" : : "dm" (cw))
 
 
 #if 0
 #if 0
 /* Default control word set at startup.  */
 /* Default control word set at startup.  */

+ 6 - 6
libc/sysdeps/linux/microblaze/clone.c

@@ -19,19 +19,19 @@
 int
 int
 clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg)
 clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg)
 {
 {
-  register unsigned long rval asm (SYSCALL_RET) = -EINVAL;
+  register unsigned long rval __asm__ (SYSCALL_RET) = -EINVAL;
 
 
   if (fn && child_stack)
   if (fn && child_stack)
     {
     {
-      register unsigned long syscall asm (SYSCALL_NUM);
-      register unsigned long arg0 asm (SYSCALL_ARG0);
-      register unsigned long arg1 asm (SYSCALL_ARG1);
+      register unsigned long syscall __asm__ (SYSCALL_NUM);
+      register unsigned long arg0 __asm__ (SYSCALL_ARG0);
+      register unsigned long arg1 __asm__ (SYSCALL_ARG1);
 
 
       /* Clone this thread.  */
       /* Clone this thread.  */
       arg0 = flags;
       arg0 = flags;
       arg1 = (unsigned long)child_stack;
       arg1 = (unsigned long)child_stack;
       syscall = __NR_clone;
       syscall = __NR_clone;
-      asm volatile ("bralid r17, trap;nop;" 
+      __asm__ __volatile__ ("bralid r17, trap;nop;" 
 		    : "=r" (rval), "=r" (syscall)
 		    : "=r" (rval), "=r" (syscall)
 		    : "1" (syscall), "r" (arg0), "r" (arg1)
 		    : "1" (syscall), "r" (arg0), "r" (arg1)
 		    : SYSCALL_CLOBBERS);
 		    : SYSCALL_CLOBBERS);
@@ -41,7 +41,7 @@ clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg)
 	{
 	{
 	  arg0 = (*fn) (arg);
 	  arg0 = (*fn) (arg);
 	  syscall = __NR_exit;
 	  syscall = __NR_exit;
-	  asm volatile ("bralid r17, trap;nop;" 
+	  __asm__ __volatile__ ("bralid r17, trap;nop;" 
 			: "=r" (rval), "=r" (syscall)
 			: "=r" (rval), "=r" (syscall)
 			: "1" (syscall), "r" (arg0)
 			: "1" (syscall), "r" (arg0)
 			: SYSCALL_CLOBBERS);
 			: SYSCALL_CLOBBERS);

+ 9 - 9
libc/sysdeps/linux/microblaze/syscall.c

@@ -26,18 +26,18 @@ syscall (long num, arg_t a1, arg_t a2, arg_t a3, arg_t a4, arg_t a5, arg_t a6)
      off the stack even for (the majority of) system calls with fewer
      off the stack even for (the majority of) system calls with fewer
      arguments; hopefully this won't cause any problems.  A1-A4 are in
      arguments; hopefully this won't cause any problems.  A1-A4 are in
      registers, so they're OK.  */
      registers, so they're OK.  */
-  register arg_t a asm (SYSCALL_ARG0) = a1;
-  register arg_t b asm (SYSCALL_ARG1) = a2;
-  register arg_t c asm (SYSCALL_ARG2) = a3;
-  register arg_t d asm (SYSCALL_ARG3) = a4;
-  register arg_t e asm (SYSCALL_ARG4) = a5;
-  register arg_t f asm (SYSCALL_ARG5) = a6;
-  register unsigned long syscall asm (SYSCALL_NUM) = num;
-  register unsigned long ret asm (SYSCALL_RET);
+  register arg_t a __asm__ (SYSCALL_ARG0) = a1;
+  register arg_t b __asm__ (SYSCALL_ARG1) = a2;
+  register arg_t c __asm__ (SYSCALL_ARG2) = a3;
+  register arg_t d __asm__ (SYSCALL_ARG3) = a4;
+  register arg_t e __asm__ (SYSCALL_ARG4) = a5;
+  register arg_t f __asm__ (SYSCALL_ARG5) = a6;
+  register unsigned long syscall __asm__ (SYSCALL_NUM) = num;
+  register unsigned long ret __asm__ (SYSCALL_RET);
 	unsigned long ret_sav;
 	unsigned long ret_sav;
 
 
   *((unsigned long *)0xFFFF4004) = (unsigned int)('+');
   *((unsigned long *)0xFFFF4004) = (unsigned int)('+');
-  asm ("brlid r17, 08x; nop;" 
+  __asm__ ("brlid r17, 08x; nop;" 
        : "=r" (ret)
        : "=r" (ret)
        : "r" (syscall), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f)
        : "r" (syscall), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f)
        : SYSCALL_CLOBBERS);
        : SYSCALL_CLOBBERS);

+ 1 - 1
libc/sysdeps/linux/mips/brk.c

@@ -31,7 +31,7 @@ int brk (void *addr)
   {
   {
     register long int res __asm__ ("$2");
     register long int res __asm__ ("$2");
 
 
-    asm ("move\t$4,%2\n\t"
+    __asm__ ("move\t$4,%2\n\t"
 	 "li\t%0,%1\n\t"
 	 "li\t%0,%1\n\t"
 	 "syscall"		/* Perform the system call.  */
 	 "syscall"		/* Perform the system call.  */
 	 : "=r" (res)
 	 : "=r" (res)

+ 35 - 35
libc/sysdeps/linux/mips/setjmp_aux.c

@@ -39,29 +39,29 @@ __sigsetjmp_aux (jmp_buf env, int savemask, int sp, int fp)
 #if defined __UCLIBC_HAS_FLOATS__ && ! defined __UCLIBC_HAS_SOFT_FLOAT__
 #if defined __UCLIBC_HAS_FLOATS__ && ! defined __UCLIBC_HAS_SOFT_FLOAT__
   /* Store the floating point callee-saved registers...  */
   /* Store the floating point callee-saved registers...  */
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 #if _MIPS_SIM == _MIPS_SIM_ABI64
-  asm volatile ("s.d $f24, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[0]));
-  asm volatile ("s.d $f25, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[1]));
-  asm volatile ("s.d $f26, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[2]));
-  asm volatile ("s.d $f27, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[3]));
-  asm volatile ("s.d $f28, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[4]));
-  asm volatile ("s.d $f29, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[5]));
-  asm volatile ("s.d $f30, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[6]));
-  asm volatile ("s.d $f31, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[7]));
+  __asm__ __volatile__ ("s.d $f24, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[0]));
+  __asm__ __volatile__ ("s.d $f25, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[1]));
+  __asm__ __volatile__ ("s.d $f26, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[2]));
+  __asm__ __volatile__ ("s.d $f27, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[3]));
+  __asm__ __volatile__ ("s.d $f28, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[4]));
+  __asm__ __volatile__ ("s.d $f29, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[5]));
+  __asm__ __volatile__ ("s.d $f30, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[6]));
+  __asm__ __volatile__ ("s.d $f31, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[7]));
 #else /* O32 || N32 */
 #else /* O32 || N32 */
-  asm volatile ("s.d $f20, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[0]));
-  asm volatile ("s.d $f22, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[1]));
-  asm volatile ("s.d $f24, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[2]));
-  asm volatile ("s.d $f26, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[3]));
-  asm volatile ("s.d $f28, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[4]));
-  asm volatile ("s.d $f30, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[5]));
+  __asm__ __volatile__ ("s.d $f20, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[0]));
+  __asm__ __volatile__ ("s.d $f22, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[1]));
+  __asm__ __volatile__ ("s.d $f24, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[2]));
+  __asm__ __volatile__ ("s.d $f26, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[3]));
+  __asm__ __volatile__ ("s.d $f28, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[4]));
+  __asm__ __volatile__ ("s.d $f30, %0" : : "m" (env[0].__jmpbuf[0].__fpregs[5]));
 #endif /* O32 || N32 */
 #endif /* O32 || N32 */
 #endif
 #endif
 
 
   /* .. and the PC;  */
   /* .. and the PC;  */
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 #if _MIPS_SIM == _MIPS_SIM_ABI64
-  asm volatile ("sd $31, %0" : : "m" (env[0].__jmpbuf[0].__pc));
+  __asm__ __volatile__ ("sd $31, %0" : : "m" (env[0].__jmpbuf[0].__pc));
 #else
 #else
-  asm volatile ("sw $31, %0" : : "m" (env[0].__jmpbuf[0].__pc));
+  __asm__ __volatile__ ("sw $31, %0" : : "m" (env[0].__jmpbuf[0].__pc));
 #endif
 #endif
 
 
   /* .. and the stack pointer;  */
   /* .. and the stack pointer;  */
@@ -72,35 +72,35 @@ __sigsetjmp_aux (jmp_buf env, int savemask, int sp, int fp)
 
 
   /* .. and the GP; */
   /* .. and the GP; */
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 #if _MIPS_SIM == _MIPS_SIM_ABI64
-  asm volatile ("sd $gp, %0" : : "m" (env[0].__jmpbuf[0].__gp));
+  __asm__ __volatile__ ("sd $gp, %0" : : "m" (env[0].__jmpbuf[0].__gp));
 #else
 #else
-  asm volatile ("sw $gp, %0" : : "m" (env[0].__jmpbuf[0].__gp));
+  __asm__ __volatile__ ("sw $gp, %0" : : "m" (env[0].__jmpbuf[0].__gp));
 #endif
 #endif
 
 
   /* .. and the callee-saved registers; */
   /* .. and the callee-saved registers; */
 #if (_MIPS_SIM == _MIPS_SIM_ABI32)
 #if (_MIPS_SIM == _MIPS_SIM_ABI32)
-  asm volatile ("sw $16, %0" : : "m" (env[0].__jmpbuf[0].__regs[0]));
-  asm volatile ("sw $17, %0" : : "m" (env[0].__jmpbuf[0].__regs[1]));
-  asm volatile ("sw $18, %0" : : "m" (env[0].__jmpbuf[0].__regs[2]));
-  asm volatile ("sw $19, %0" : : "m" (env[0].__jmpbuf[0].__regs[3]));
-  asm volatile ("sw $20, %0" : : "m" (env[0].__jmpbuf[0].__regs[4]));
-  asm volatile ("sw $21, %0" : : "m" (env[0].__jmpbuf[0].__regs[5]));
-  asm volatile ("sw $22, %0" : : "m" (env[0].__jmpbuf[0].__regs[6]));
-  asm volatile ("sw $23, %0" : : "m" (env[0].__jmpbuf[0].__regs[7]));
+  __asm__ __volatile__ ("sw $16, %0" : : "m" (env[0].__jmpbuf[0].__regs[0]));
+  __asm__ __volatile__ ("sw $17, %0" : : "m" (env[0].__jmpbuf[0].__regs[1]));
+  __asm__ __volatile__ ("sw $18, %0" : : "m" (env[0].__jmpbuf[0].__regs[2]));
+  __asm__ __volatile__ ("sw $19, %0" : : "m" (env[0].__jmpbuf[0].__regs[3]));
+  __asm__ __volatile__ ("sw $20, %0" : : "m" (env[0].__jmpbuf[0].__regs[4]));
+  __asm__ __volatile__ ("sw $21, %0" : : "m" (env[0].__jmpbuf[0].__regs[5]));
+  __asm__ __volatile__ ("sw $22, %0" : : "m" (env[0].__jmpbuf[0].__regs[6]));
+  __asm__ __volatile__ ("sw $23, %0" : : "m" (env[0].__jmpbuf[0].__regs[7]));
 #else	/* N32 || N64 */
 #else	/* N32 || N64 */
-  asm volatile ("sd $16, %0" : : "m" (env[0].__jmpbuf[0].__regs[0]));
-  asm volatile ("sd $17, %0" : : "m" (env[0].__jmpbuf[0].__regs[1]));
-  asm volatile ("sd $18, %0" : : "m" (env[0].__jmpbuf[0].__regs[2]));
-  asm volatile ("sd $19, %0" : : "m" (env[0].__jmpbuf[0].__regs[3]));
-  asm volatile ("sd $20, %0" : : "m" (env[0].__jmpbuf[0].__regs[4]));
-  asm volatile ("sd $21, %0" : : "m" (env[0].__jmpbuf[0].__regs[5]));
-  asm volatile ("sd $22, %0" : : "m" (env[0].__jmpbuf[0].__regs[6]));
-  asm volatile ("sd $23, %0" : : "m" (env[0].__jmpbuf[0].__regs[7]));
+  __asm__ __volatile__ ("sd $16, %0" : : "m" (env[0].__jmpbuf[0].__regs[0]));
+  __asm__ __volatile__ ("sd $17, %0" : : "m" (env[0].__jmpbuf[0].__regs[1]));
+  __asm__ __volatile__ ("sd $18, %0" : : "m" (env[0].__jmpbuf[0].__regs[2]));
+  __asm__ __volatile__ ("sd $19, %0" : : "m" (env[0].__jmpbuf[0].__regs[3]));
+  __asm__ __volatile__ ("sd $20, %0" : : "m" (env[0].__jmpbuf[0].__regs[4]));
+  __asm__ __volatile__ ("sd $21, %0" : : "m" (env[0].__jmpbuf[0].__regs[5]));
+  __asm__ __volatile__ ("sd $22, %0" : : "m" (env[0].__jmpbuf[0].__regs[6]));
+  __asm__ __volatile__ ("sd $23, %0" : : "m" (env[0].__jmpbuf[0].__regs[7]));
 #endif	/* N32 || N64 */
 #endif	/* N32 || N64 */
 
 
 #if defined __UCLIBC_HAS_FLOATS__ && ! defined __UCLIBC_HAS_SOFT_FLOAT__
 #if defined __UCLIBC_HAS_FLOATS__ && ! defined __UCLIBC_HAS_SOFT_FLOAT__
   /* .. and finally get and reconstruct the floating point csr.  */
   /* .. and finally get and reconstruct the floating point csr.  */
-  asm ("cfc1 %0, $31" : "=r" (env[0].__jmpbuf[0].__fpc_csr));
+  __asm__ ("cfc1 %0, $31" : "=r" (env[0].__jmpbuf[0].__fpc_csr));
 #endif
 #endif
 
 
   /* Save the signal mask if requested.  */
   /* Save the signal mask if requested.  */

+ 3 - 3
libc/sysdeps/linux/mips/sigaction.c

@@ -36,10 +36,10 @@ libc_hidden_proto(memcpy)
 #if _MIPS_SIM != _ABIO32
 #if _MIPS_SIM != _ABIO32
 
 
 # ifdef __NR_rt_sigreturn
 # ifdef __NR_rt_sigreturn
-static void restore_rt (void) asm ("__restore_rt");
+static void restore_rt (void) __asm__ ("__restore_rt");
 # endif
 # endif
 # ifdef __NR_sigreturn
 # ifdef __NR_sigreturn
-static void restore (void) asm ("__restore");
+static void restore (void) __asm__ ("__restore");
 # endif
 # endif
 #endif
 #endif
 
 
@@ -81,7 +81,7 @@ int __libc_sigaction (int sig, const struct sigaction *act, struct sigaction *oa
 
 
 
 
 #else
 #else
-extern void restore (void) asm ("__restore") attribute_hidden;
+extern void restore (void) __asm__ ("__restore") attribute_hidden;
 
 
 /* If ACT is not NULL, change the action for SIG to *ACT.
 /* If ACT is not NULL, change the action for SIG to *ACT.
    If OACT is not NULL, put the old action for SIG in *OACT.  */
    If OACT is not NULL, put the old action for SIG in *OACT.  */

+ 3 - 3
libc/sysdeps/linux/nios/brk.c

@@ -28,10 +28,10 @@ libc_hidden_proto(brk)
 int brk (void *addr)
 int brk (void *addr)
 {
 {
     void *newbrk;
     void *newbrk;
-    register int g1 asm("%g1") = __NR_brk;
-    register void *o0 asm("%o0") = addr;
+    register int g1 __asm__("%g1") = __NR_brk;
+    register void *o0 __asm__("%o0") = addr;
 
 
-    asm volatile ("trap 63\n\t" : "=r"(newbrk) : "0"(o0), "r"(g1));
+    __asm__ __volatile__ ("trap 63\n\t" : "=r"(newbrk) : "0"(o0), "r"(g1));
 
 
     __curbrk = newbrk;
     __curbrk = newbrk;
 
 

+ 4 - 4
libc/sysdeps/linux/nios2/brk.c

@@ -28,11 +28,11 @@ libc_hidden_proto(brk)
 int brk (void *addr)
 int brk (void *addr)
 {
 {
     void *newbrk;
     void *newbrk;
-    register int r2 asm("r2") = TRAP_ID_SYSCALL;
-    register int r3 asm("r3") = __NR_brk;
-    register void *r4 asm("r4") = addr;
+    register int r2 __asm__("r2") = TRAP_ID_SYSCALL;
+    register int r3 __asm__("r3") = __NR_brk;
+    register void *r4 __asm__("r4") = addr;
 
 
-    asm volatile ("trap\n\t" : "=r"(newbrk) : "0"(r2), "r"(r3), "r"(r4));
+    __asm__ __volatile__ ("trap\n\t" : "=r"(newbrk) : "0"(r2), "r"(r3), "r"(r4));
 
 
     __curbrk = newbrk;
     __curbrk = newbrk;
 
 

+ 6 - 6
libc/sysdeps/linux/nios2/clone.c

@@ -19,19 +19,19 @@
 
 
 int clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg, ...)
 int clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg, ...)
 {
 {
-  register unsigned long rval asm ("r2") = -EINVAL;
+  register unsigned long rval __asm__ ("r2") = -EINVAL;
 
 
   if (fn && child_stack) {
   if (fn && child_stack) {
-      register unsigned long syscall asm ("r3");
-      register unsigned long arg0 asm ("r4");
-      register unsigned long arg1 asm ("r5");
+      register unsigned long syscall __asm__ ("r3");
+      register unsigned long arg0 __asm__ ("r4");
+      register unsigned long arg1 __asm__ ("r5");
 
 
       /* Clone this thread.  */
       /* Clone this thread.  */
       rval = TRAP_ID_SYSCALL;
       rval = TRAP_ID_SYSCALL;
       syscall = __NR_clone;
       syscall = __NR_clone;
       arg0 = flags;
       arg0 = flags;
       arg1 = (unsigned long)child_stack;
       arg1 = (unsigned long)child_stack;
-      asm volatile ("trap "
+      __asm__ __volatile__ ("trap "
          : "=r" (rval), "=r" (syscall)
          : "=r" (rval), "=r" (syscall)
          : "0" (rval),"1" (syscall), "r" (arg0), "r" (arg1)
          : "0" (rval),"1" (syscall), "r" (arg0), "r" (arg1)
          );
          );
@@ -40,7 +40,7 @@ int clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg, ...)
          /* In child thread, call fn and exit.  */
          /* In child thread, call fn and exit.  */
          arg0 = (*fn) (arg);
          arg0 = (*fn) (arg);
          syscall = __NR_exit;
          syscall = __NR_exit;
-         asm volatile ("trap "
+         __asm__ __volatile__ ("trap "
           : "=r" (rval), "=r" (syscall)
           : "=r" (rval), "=r" (syscall)
           : "1" (syscall), "r" (arg0));
           : "1" (syscall), "r" (arg0));
       }
       }

+ 9 - 9
libc/sysdeps/linux/nios2/syscall.c

@@ -26,16 +26,16 @@
 
 
 long syscall(long sysnum, long a, long b, long c, long d, long e, long f)
 long syscall(long sysnum, long a, long b, long c, long d, long e, long f)
 {
 {
-    register long _r2 asm("r2")=(long)TRAP_ID_SYSCALL;
-    register long _r3 asm("r3")=(long)sysnum;
+    register long _r2 __asm__("r2")=(long)TRAP_ID_SYSCALL;
+    register long _r3 __asm__("r3")=(long)sysnum;
    
    
-    register long _r4 asm("r4")=(long)(a);
-    register long _r5 asm("r5")=(long)(b);
-    register long _r6 asm("r6")=(long)(c);
-    register long _r7 asm("r7")=(long)(d);
-    register long _r8 asm("r8")=(long)(e);
-    register long _r9 asm("r9")=(long)(f);
-    asm volatile(
+    register long _r4 __asm__("r4")=(long)(a);
+    register long _r5 __asm__("r5")=(long)(b);
+    register long _r6 __asm__("r6")=(long)(c);
+    register long _r7 __asm__("r7")=(long)(d);
+    register long _r8 __asm__("r8")=(long)(e);
+    register long _r9 __asm__("r9")=(long)(f);
+    __asm__ __volatile__(
 	    "trap "
 	    "trap "
 	    : "=r"(_r2), "=r"(_r3)
 	    : "=r"(_r2), "=r"(_r3)
 	    : "0"(_r2), "1"(_r3),
 	    : "0"(_r2), "1"(_r3),

+ 4 - 4
libc/sysdeps/linux/powerpc/bits/atomic.h

@@ -235,7 +235,7 @@
 /*
 /*
  * All powerpc64 processors support the new "light weight"  sync (lwsync).
  * All powerpc64 processors support the new "light weight"  sync (lwsync).
  */
  */
-# define atomic_read_barrier()	__asm ("lwsync" ::: "memory")
+# define atomic_read_barrier()	__asm__ ("lwsync" ::: "memory")
 /*
 /*
  * "light weight" sync can also be used for the release barrier.
  * "light weight" sync can also be used for the release barrier.
  */
  */
@@ -340,7 +340,7 @@
  * sync (lwsync).  So the only safe option is to use normal sync
  * sync (lwsync).  So the only safe option is to use normal sync
  * for all powerpc32 applications.
  * for all powerpc32 applications.
  */
  */
-# define atomic_read_barrier()	__asm ("sync" ::: "memory")
+# define atomic_read_barrier()	__asm__ ("sync" ::: "memory")
 
 
 #endif
 #endif
 
 
@@ -387,8 +387,8 @@ typedef uintmax_t uatomic_max_t;
 # endif
 # endif
 #endif
 #endif
 
 
-#define atomic_full_barrier()	__asm ("sync" ::: "memory")
-#define atomic_write_barrier()	__asm ("eieio" ::: "memory")
+#define atomic_full_barrier()	__asm__ ("sync" ::: "memory")
+#define atomic_write_barrier()	__asm__ ("eieio" ::: "memory")
 
 
 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval)	      \
 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval)	      \
   ({									      \
   ({									      \

+ 2 - 2
libc/sysdeps/linux/powerpc/fpu_control.h

@@ -57,11 +57,11 @@ typedef unsigned int fpu_control_t __attribute__ ((__mode__ (__SI__)));
 /* Macros for accessing the hardware control word.  */
 /* Macros for accessing the hardware control word.  */
 #define _FPU_GETCW(__cw) ({ \
 #define _FPU_GETCW(__cw) ({ \
   unsigned int env; \
   unsigned int env; \
-  asm volatile ("mfspefscr %0" : "=r" (env)); \
+  __asm__ __volatile__ ("mfspefscr %0" : "=r" (env)); \
   (__cw) = env; })
   (__cw) = env; })
 #define _FPU_SETCW(__cw) ({ \
 #define _FPU_SETCW(__cw) ({ \
   unsigned int env = __cw; \
   unsigned int env = __cw; \
-  asm volatile ("mtspefscr %0" : : "r" (env)); })
+  __asm__ __volatile__ ("mtspefscr %0" : : "r" (env)); })
 #else
 #else
 #define _FPU_RESERVED 0xffffff00 /* These bits are reserved are not changed. */
 #define _FPU_RESERVED 0xffffff00 /* These bits are reserved are not changed. */
 /* IEEE:  same as above, but (some) exceptions;
 /* IEEE:  same as above, but (some) exceptions;

+ 4 - 4
libc/sysdeps/linux/sparc/bits/mathinline.h

@@ -198,7 +198,7 @@ __MATH_INLINE double
 __NTH (sqrt (double __x))
 __NTH (sqrt (double __x))
 {
 {
   register double __r;
   register double __r;
-  __asm ("fsqrtd %1,%0" : "=f" (__r) : "f" (__x));
+  __asm__ ("fsqrtd %1,%0" : "=f" (__r) : "f" (__x));
   return __r;
   return __r;
 }
 }
 
 
@@ -206,7 +206,7 @@ __MATH_INLINE float
 __NTH (sqrtf (float __x))
 __NTH (sqrtf (float __x))
 {
 {
   register float __r;
   register float __r;
-  __asm ("fsqrts %1,%0" : "=f" (__r) : "f" (__x));
+  __asm__ ("fsqrts %1,%0" : "=f" (__r) : "f" (__x));
   return __r;
   return __r;
 }
 }
 
 
@@ -236,7 +236,7 @@ __MATH_INLINE double
 __ieee754_sqrt (double __x)
 __ieee754_sqrt (double __x)
 {
 {
   register double __r;
   register double __r;
-  __asm ("fsqrtd %1,%0" : "=f" (__r) : "f" (__x));
+  __asm__ ("fsqrtd %1,%0" : "=f" (__r) : "f" (__x));
   return __r;
   return __r;
 }
 }
 
 
@@ -244,7 +244,7 @@ __MATH_INLINE float
 __ieee754_sqrtf (float __x)
 __ieee754_sqrtf (float __x)
 {
 {
   register float __r;
   register float __r;
-  __asm ("fsqrts %1,%0" : "=f" (__r) : "f" (__x));
+  __asm__ ("fsqrts %1,%0" : "=f" (__r) : "f" (__x));
   return __r;
   return __r;
 }
 }
 
 

+ 1 - 1
libc/sysdeps/linux/sparc/brk.c

@@ -33,7 +33,7 @@ int brk (void *addr)
     {
     {
 	register void *o0 __asm__("%o0") = addr;
 	register void *o0 __asm__("%o0") = addr;
 	register int g1 __asm__("%g1") = 17 ;
 	register int g1 __asm__("%g1") = 17 ;
-	__asm ("t 0x10" : "=r"(o0) : "r"(g1), "0"(o0) : "cc");
+	__asm__ ("t 0x10" : "=r"(o0) : "r"(g1), "0"(o0) : "cc");
 	newbrk = o0;
 	newbrk = o0;
     }
     }
 
 

+ 6 - 6
libc/sysdeps/linux/v850/clone.c

@@ -17,19 +17,19 @@
 int
 int
 clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg)
 clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg)
 {
 {
-  register unsigned long rval asm (SYSCALL_RET) = -EINVAL;
+  register unsigned long rval __asm__ (SYSCALL_RET) = -EINVAL;
 
 
   if (fn && child_stack)
   if (fn && child_stack)
     {
     {
-      register unsigned long syscall asm (SYSCALL_NUM);
-      register unsigned long arg0 asm (SYSCALL_ARG0);
-      register unsigned long arg1 asm (SYSCALL_ARG1);
+      register unsigned long syscall __asm__ (SYSCALL_NUM);
+      register unsigned long arg0 __asm__ (SYSCALL_ARG0);
+      register unsigned long arg1 __asm__ (SYSCALL_ARG1);
 
 
       /* Clone this thread.  */
       /* Clone this thread.  */
       arg0 = flags;
       arg0 = flags;
       arg1 = (unsigned long)child_stack;
       arg1 = (unsigned long)child_stack;
       syscall = __NR_clone;
       syscall = __NR_clone;
-      asm volatile ("trap " SYSCALL_SHORT_TRAP
+      __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP
 		    : "=r" (rval), "=r" (syscall)
 		    : "=r" (rval), "=r" (syscall)
 		    : "1" (syscall), "r" (arg0), "r" (arg1)
 		    : "1" (syscall), "r" (arg0), "r" (arg1)
 		    : SYSCALL_SHORT_CLOBBERS);
 		    : SYSCALL_SHORT_CLOBBERS);
@@ -39,7 +39,7 @@ clone (int (*fn)(void *arg), void *child_stack, int flags, void *arg)
 	{
 	{
 	  arg0 = (*fn) (arg);
 	  arg0 = (*fn) (arg);
 	  syscall = __NR_exit;
 	  syscall = __NR_exit;
-	  asm volatile ("trap " SYSCALL_SHORT_TRAP
+	  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP
 			: "=r" (rval), "=r" (syscall)
 			: "=r" (rval), "=r" (syscall)
 			: "1" (syscall), "r" (arg0)
 			: "1" (syscall), "r" (arg0)
 			: SYSCALL_SHORT_CLOBBERS);
 			: SYSCALL_SHORT_CLOBBERS);

+ 9 - 9
libc/sysdeps/linux/v850/syscall.c

@@ -25,16 +25,16 @@ syscall (long num, arg_t a1, arg_t a2, arg_t a3, arg_t a4, arg_t a5, arg_t a6)
      off the stack even for (the majority of) system calls with fewer
      off the stack even for (the majority of) system calls with fewer
      arguments; hopefully this won't cause any problems.  A1-A4 are in
      arguments; hopefully this won't cause any problems.  A1-A4 are in
      registers, so they're OK.  */
      registers, so they're OK.  */
-  register arg_t a asm (SYSCALL_ARG0) = a1;
-  register arg_t b asm (SYSCALL_ARG1) = a2;
-  register arg_t c asm (SYSCALL_ARG2) = a3;
-  register arg_t d asm (SYSCALL_ARG3) = a4;
-  register arg_t e asm (SYSCALL_ARG4) = a5;
-  register arg_t f asm (SYSCALL_ARG5) = a6;
-  register unsigned long syscall asm (SYSCALL_NUM) = num;
-  register unsigned long ret asm (SYSCALL_RET);
+  register arg_t a __asm__ (SYSCALL_ARG0) = a1;
+  register arg_t b __asm__ (SYSCALL_ARG1) = a2;
+  register arg_t c __asm__ (SYSCALL_ARG2) = a3;
+  register arg_t d __asm__ (SYSCALL_ARG3) = a4;
+  register arg_t e __asm__ (SYSCALL_ARG4) = a5;
+  register arg_t f __asm__ (SYSCALL_ARG5) = a6;
+  register unsigned long syscall __asm__ (SYSCALL_NUM) = num;
+  register unsigned long ret __asm__ (SYSCALL_RET);
 
 
-  asm ("trap " SYSCALL_LONG_TRAP
+  __asm__ ("trap " SYSCALL_LONG_TRAP
        : "=r" (ret)
        : "=r" (ret)
        : "r" (syscall), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f)
        : "r" (syscall), "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f)
        : SYSCALL_CLOBBERS);
        : SYSCALL_CLOBBERS);

+ 1 - 1
libc/sysdeps/linux/x86_64/bits/atomic.h

@@ -321,4 +321,4 @@ typedef uintmax_t uatomic_max_t;
      __result; })
      __result; })
 
 
 
 
-#define atomic_delay() asm ("rep; nop")
+#define atomic_delay() __asm__ ("rep; nop")

+ 7 - 7
libc/sysdeps/linux/x86_64/bits/syscalls.h

@@ -141,7 +141,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \
     unsigned long resultvar;						      \
     unsigned long resultvar;						      \
     LOAD_ARGS_##nr (args)						      \
     LOAD_ARGS_##nr (args)						      \
     LOAD_REGS_##nr							      \
     LOAD_REGS_##nr							      \
-    asm volatile (							      \
+    __asm__ __volatile__ (							      \
     "syscall\n\t"							      \
     "syscall\n\t"							      \
     : "=a" (resultvar)							      \
     : "=a" (resultvar)							      \
     : "0" (name) ASM_ARGS_##nr : "memory", "cc", "r11", "cx");		      \
     : "0" (name) ASM_ARGS_##nr : "memory", "cc", "r11", "cx");		      \
@@ -165,7 +165,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \
   long int __arg1 = (long) (a1);			\
   long int __arg1 = (long) (a1);			\
   LOAD_ARGS_0 ()
   LOAD_ARGS_0 ()
 #define LOAD_REGS_1					\
 #define LOAD_REGS_1					\
-  register long int _a1 asm ("rdi") = __arg1;		\
+  register long int _a1 __asm__ ("rdi") = __arg1;		\
   LOAD_REGS_0
   LOAD_REGS_0
 #define ASM_ARGS_1	ASM_ARGS_0, "r" (_a1)
 #define ASM_ARGS_1	ASM_ARGS_0, "r" (_a1)
 
 
@@ -173,7 +173,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \
   long int __arg2 = (long) (a2);			\
   long int __arg2 = (long) (a2);			\
   LOAD_ARGS_1 (a1)
   LOAD_ARGS_1 (a1)
 #define LOAD_REGS_2					\
 #define LOAD_REGS_2					\
-  register long int _a2 asm ("rsi") = __arg2;		\
+  register long int _a2 __asm__ ("rsi") = __arg2;		\
   LOAD_REGS_1
   LOAD_REGS_1
 #define ASM_ARGS_2	ASM_ARGS_1, "r" (_a2)
 #define ASM_ARGS_2	ASM_ARGS_1, "r" (_a2)
 
 
@@ -181,7 +181,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \
   long int __arg3 = (long) (a3);			\
   long int __arg3 = (long) (a3);			\
   LOAD_ARGS_2 (a1, a2)
   LOAD_ARGS_2 (a1, a2)
 #define LOAD_REGS_3					\
 #define LOAD_REGS_3					\
-  register long int _a3 asm ("rdx") = __arg3;		\
+  register long int _a3 __asm__ ("rdx") = __arg3;		\
   LOAD_REGS_2
   LOAD_REGS_2
 #define ASM_ARGS_3	ASM_ARGS_2, "r" (_a3)
 #define ASM_ARGS_3	ASM_ARGS_2, "r" (_a3)
 
 
@@ -189,7 +189,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \
   long int __arg4 = (long) (a4);			\
   long int __arg4 = (long) (a4);			\
   LOAD_ARGS_3 (a1, a2, a3)
   LOAD_ARGS_3 (a1, a2, a3)
 #define LOAD_REGS_4					\
 #define LOAD_REGS_4					\
-  register long int _a4 asm ("r10") = __arg4;		\
+  register long int _a4 __asm__ ("r10") = __arg4;		\
   LOAD_REGS_3
   LOAD_REGS_3
 #define ASM_ARGS_4	ASM_ARGS_3, "r" (_a4)
 #define ASM_ARGS_4	ASM_ARGS_3, "r" (_a4)
 
 
@@ -197,7 +197,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \
   long int __arg5 = (long) (a5);			\
   long int __arg5 = (long) (a5);			\
   LOAD_ARGS_4 (a1, a2, a3, a4)
   LOAD_ARGS_4 (a1, a2, a3, a4)
 #define LOAD_REGS_5					\
 #define LOAD_REGS_5					\
-  register long int _a5 asm ("r8") = __arg5;		\
+  register long int _a5 __asm__ ("r8") = __arg5;		\
   LOAD_REGS_4
   LOAD_REGS_4
 #define ASM_ARGS_5	ASM_ARGS_4, "r" (_a5)
 #define ASM_ARGS_5	ASM_ARGS_4, "r" (_a5)
 
 
@@ -205,7 +205,7 @@ return (type) (INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6)); \
   long int __arg6 = (long) (a6);			\
   long int __arg6 = (long) (a6);			\
   LOAD_ARGS_5 (a1, a2, a3, a4, a5)
   LOAD_ARGS_5 (a1, a2, a3, a4, a5)
 #define LOAD_REGS_6					\
 #define LOAD_REGS_6					\
-  register long int _a6 asm ("r9") = __arg6;		\
+  register long int _a6 __asm__ ("r9") = __arg6;		\
   LOAD_REGS_5
   LOAD_REGS_5
 #define ASM_ARGS_6	ASM_ARGS_5, "r" (_a6)
 #define ASM_ARGS_6	ASM_ARGS_5, "r" (_a6)
 
 

+ 1 - 1
libc/sysdeps/linux/x86_64/brk.c

@@ -29,7 +29,7 @@ int brk (void *addr)
 {
 {
 	void *__unbounded newbrk;
 	void *__unbounded newbrk;
 
 
-	asm ("syscall\n"
+	__asm__ ("syscall\n"
 	     : "=a" (newbrk)
 	     : "=a" (newbrk)
 	     : "0" (__NR_brk), "D" (__ptrvalue (addr))
 	     : "0" (__NR_brk), "D" (__ptrvalue (addr))
 	     : "r11","rcx","memory");
 	     : "r11","rcx","memory");

+ 4 - 4
libc/sysdeps/linux/x86_64/sigaction.c

@@ -38,8 +38,8 @@ extern __typeof(sigaction) __libc_sigaction;
 #ifdef __NR_rt_sigaction
 #ifdef __NR_rt_sigaction
 /* Using the hidden attribute here does not change the code but it
 /* Using the hidden attribute here does not change the code but it
    helps to avoid warnings.  */
    helps to avoid warnings.  */
-extern void restore_rt (void) asm ("__restore_rt") attribute_hidden;
-extern void restore (void) asm ("__restore") attribute_hidden;
+extern void restore_rt (void) __asm__ ("__restore_rt") attribute_hidden;
+extern void restore (void) __asm__ ("__restore") attribute_hidden;
 
 
 libc_hidden_proto(memcpy)
 libc_hidden_proto(memcpy)
 
 
@@ -74,7 +74,7 @@ __libc_sigaction (int sig, const struct sigaction *act, struct sigaction *oact)
 }
 }
 #else
 #else
 
 
-extern void restore (void) asm ("__restore") attribute_hidden;
+extern void restore (void) __asm__ ("__restore") attribute_hidden;
 
 
 /* If ACT is not NULL, change the action for SIG to *ACT.
 /* If ACT is not NULL, change the action for SIG to *ACT.
    If OACT is not NULL, put the old action for SIG in *OACT.  */
    If OACT is not NULL, put the old action for SIG in *OACT.  */
@@ -98,7 +98,7 @@ __libc_sigaction (int sig, const struct sigaction *act, struct sigaction *oact)
 		kact.sa_restorer = &restore;
 		kact.sa_restorer = &restore;
 	}
 	}
 
 
-	asm volatile ("syscall\n"
+	__asm__ __volatile__ ("syscall\n"
 	              : "=a" (result)
 	              : "=a" (result)
 	              : "0" (__NR_sigaction), "mr" (sig),
 	              : "0" (__NR_sigaction), "mr" (sig),
 	                "c" (act ? __ptrvalue (&kact) : 0),
 	                "c" (act ? __ptrvalue (&kact) : 0),

+ 2 - 2
libc/sysdeps/linux/xtensa/bits/syscalls.h

@@ -53,7 +53,7 @@
 #include <errno.h>
 #include <errno.h>
 
 
 #define STR(s) #s
 #define STR(s) #s
-#define LD_ARG(n,ar)	register int _a##n asm (STR(a##n)) = (int) (ar)
+#define LD_ARG(n,ar)	register int _a##n __asm__ (STR(a##n)) = (int) (ar)
 
 
 #define LD_ARGS_0()
 #define LD_ARGS_0()
 #define LD_ARGS_1(a0)			LD_ARG(6,a0)
 #define LD_ARGS_1(a0)			LD_ARG(6,a0)
@@ -90,7 +90,7 @@
 #define INTERNAL_SYSCALL_NCS(name, err, nr, args...)			      \
 #define INTERNAL_SYSCALL_NCS(name, err, nr, args...)			      \
   ({ LD_ARG(2, name);							      \
   ({ LD_ARG(2, name);							      \
      LD_ARGS_##nr(args);						      \
      LD_ARGS_##nr(args);						      \
-     asm volatile ("syscall\n" 						      \
+     __asm__ __volatile__ ("syscall\n" 						      \
 	 	   : "=a" (_a2)						      \
 	 	   : "=a" (_a2)						      \
 	 	   : ASM_ARGS_##nr					      \
 	 	   : ASM_ARGS_##nr					      \
 	 	   : "memory");						      \
 	 	   : "memory");						      \

+ 2 - 2
libm/powerpc/classic/s_ceil.c

@@ -75,9 +75,9 @@ double ceil ( double x )
 				return ( x );
 				return ( x );
 			else
 			else
 				{			                // inexact case
 				{			                // inexact case
-				asm ("mffs %0" : "=f" (OldEnvironment.dbl));
+				__asm__ ("mffs %0" : "=f" (OldEnvironment.dbl));
 				OldEnvironment.words.lo |= 0x02000000ul;
 				OldEnvironment.words.lo |= 0x02000000ul;
-				asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+				__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
 				if ( target )
 				if ( target )
 					return ( 1.0 );
 					return ( 1.0 );
 				else
 				else

+ 2 - 2
libm/powerpc/classic/s_floor.c

@@ -75,9 +75,9 @@ double floor ( double x )
 				return ( x );
 				return ( x );
 			else
 			else
 				{			                // inexact case
 				{			                // inexact case
-				asm ("mffs %0" : "=f" (OldEnvironment.dbl));
+				__asm__ ("mffs %0" : "=f" (OldEnvironment.dbl));
 				OldEnvironment.words.lo |= 0x02000000ul;
 				OldEnvironment.words.lo |= 0x02000000ul;
-				asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+				__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
 				if ( target )
 				if ( target )
 					return ( 0.0 );
 					return ( 0.0 );
 				else
 				else

+ 21 - 21
libm/powerpc/classic/s_modf.c

@@ -104,12 +104,12 @@ long int rinttol ( double x )
                   return ( ( long ) argument.words.lo );
                   return ( ( long ) argument.words.lo );
                   }
                   }
 
 
-		asm ("mffs %0" : "=f" (OldEnvironment.dbl));	// get environment
+		__asm__ ("mffs %0" : "=f" (OldEnvironment.dbl));	// get environment
 
 
             if ( xHead > 0x41dffffful )
             if ( xHead > 0x41dffffful )
                   {                                    // x is safely out of long range
                   {                                    // x is safely out of long range
                   OldEnvironment.words.lo |= SET_INVALID;
                   OldEnvironment.words.lo |= SET_INVALID;
-			asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+			__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
                   return ( LONG_MAX );
                   return ( LONG_MAX );
                   }
                   }
 
 
@@ -121,7 +121,7 @@ long int rinttol ( double x )
             if ( y > ( double ) LONG_MAX )
             if ( y > ( double ) LONG_MAX )
                   {                                    // out of range of long
                   {                                    // out of range of long
                   OldEnvironment.words.lo |= SET_INVALID;
                   OldEnvironment.words.lo |= SET_INVALID;
-			asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+			__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
                   return ( LONG_MAX );
                   return ( LONG_MAX );
                   }
                   }
             argument.dbl = y + doubleToLong;           // in range
             argument.dbl = y + doubleToLong;           // in range
@@ -138,12 +138,12 @@ long int rinttol ( double x )
             return ( ( long ) argument.words.lo );
             return ( ( long ) argument.words.lo );
             }
             }
 
 
-	asm ("mffs %0" : "=f" (OldEnvironment.dbl));	// get environment
+	__asm__ ("mffs %0" : "=f" (OldEnvironment.dbl));	// get environment
 
 
       if ( xHead > 0x41e00000ul )
       if ( xHead > 0x41e00000ul )
             {                                          // x is safely out of long range
             {                                          // x is safely out of long range
             OldEnvironment.words.lo |= SET_INVALID;
             OldEnvironment.words.lo |= SET_INVALID;
-		asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+		__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
             return ( LONG_MIN );
             return ( LONG_MIN );
             }
             }
 
 
@@ -155,7 +155,7 @@ long int rinttol ( double x )
       if ( y < ( double ) LONG_MIN )
       if ( y < ( double ) LONG_MIN )
             {                                          // out of range of long
             {                                          // out of range of long
             OldEnvironment.words.lo |= SET_INVALID;
             OldEnvironment.words.lo |= SET_INVALID;
-		asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+		__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
             return ( LONG_MIN );
             return ( LONG_MIN );
             }
             }
       argument.dbl = y + doubleToLong;                       // in range
       argument.dbl = y + doubleToLong;                       // in range
@@ -193,9 +193,9 @@ long int roundtol ( double x )
 *     Is x is out of long range or NaN?                                        *
 *     Is x is out of long range or NaN?                                        *
 *******************************************************************************/
 *******************************************************************************/
 		{
 		{
-		asm ("mffs %0" : "=f" (OldEnvironment.dbl));	// get environment
+		__asm__ ("mffs %0" : "=f" (OldEnvironment.dbl));	// get environment
 		OldEnvironment.words.lo |= SET_INVALID;
 		OldEnvironment.words.lo |= SET_INVALID;
-		asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+		__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
 		if ( target )			              	// pin result
 		if ( target )			              	// pin result
 			return ( LONG_MAX );
 			return ( LONG_MAX );
 		else
 		else
@@ -215,11 +215,11 @@ long int roundtol ( double x )
 			y = ( x + doubleToLong ) - doubleToLong; 	// round at binary point
 			y = ( x + doubleToLong ) - doubleToLong; 	// round at binary point
 			if ( y != x )
 			if ( y != x )
 				{		                    	// inexact case
 				{		                    	// inexact case
-				asm ("mffs %0" : "=f" (OldEnvironment.dbl));	// save environment
-				asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( kTZ.dbl )); // truncate rounding
+				__asm__ ("mffs %0" : "=f" (OldEnvironment.dbl));	// save environment
+				__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( kTZ.dbl )); // truncate rounding
 				z = x + 0.5;		        	// truncate x + 0.5
 				z = x + 0.5;		        	// truncate x + 0.5
 				argument.dbl = z + doubleToLong;
 				argument.dbl = z + doubleToLong;
-				asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+				__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
 				return ( ( long ) argument.words.lo );
 				return ( ( long ) argument.words.lo );
 				}
 				}
 
 
@@ -229,9 +229,9 @@ long int roundtol ( double x )
 /*******************************************************************************
 /*******************************************************************************
 *     Rounded positive x is out of the range of a long.                        *
 *     Rounded positive x is out of the range of a long.                        *
 *******************************************************************************/
 *******************************************************************************/
-		asm ("mffs %0" : "=f" (OldEnvironment.dbl));
+		__asm__ ("mffs %0" : "=f" (OldEnvironment.dbl));
 		OldEnvironment.words.lo |= SET_INVALID;
 		OldEnvironment.words.lo |= SET_INVALID;
-		asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+		__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
 		return ( LONG_MAX );		              	// return pinned result
 		return ( LONG_MAX );		              	// return pinned result
 		}
 		}
 /*******************************************************************************
 /*******************************************************************************
@@ -245,11 +245,11 @@ long int roundtol ( double x )
 		y = ( x + doubleToLong ) - doubleToLong;	  	// round at binary point
 		y = ( x + doubleToLong ) - doubleToLong;	  	// round at binary point
 		if ( y != x )
 		if ( y != x )
 			{			                    	// inexact case
 			{			                    	// inexact case
-			asm ("mffs %0" : "=f" (OldEnvironment.dbl));	// save environment
-			asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( kUP.dbl )); // round up
+			__asm__ ("mffs %0" : "=f" (OldEnvironment.dbl));	// save environment
+			__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( kUP.dbl )); // round up
 			z = x - 0.5;		              	// truncate x - 0.5
 			z = x - 0.5;		              	// truncate x - 0.5
 			argument.dbl = z + doubleToLong;
 			argument.dbl = z + doubleToLong;
-			asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+			__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
 			return ( ( long ) argument.words.lo );
 			return ( ( long ) argument.words.lo );
 			}
 			}
 
 
@@ -259,9 +259,9 @@ long int roundtol ( double x )
 /*******************************************************************************
 /*******************************************************************************
 *     Rounded negative x is out of the range of a long.                        *
 *     Rounded negative x is out of the range of a long.                        *
 *******************************************************************************/
 *******************************************************************************/
-	asm ("mffs %0" : "=f" (OldEnvironment.dbl));
+	__asm__ ("mffs %0" : "=f" (OldEnvironment.dbl));
 	OldEnvironment.words.lo |= SET_INVALID;
 	OldEnvironment.words.lo |= SET_INVALID;
-	asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+	__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
 	return ( LONG_MIN );			              	// return pinned result
 	return ( LONG_MIN );			              	// return pinned result
 	}
 	}
 
 
@@ -311,15 +311,15 @@ double modf ( double x, double *iptr )
 /*******************************************************************************
 /*******************************************************************************
 *     Is 1.0 < |x| < 2.0^52?                                                   *
 *     Is 1.0 < |x| < 2.0^52?                                                   *
 *******************************************************************************/
 *******************************************************************************/
-			asm ("mffs %0" : "=f" (OldEnvironment));	// save environment
+			__asm__ ("mffs %0" : "=f" (OldEnvironment));	// save environment
 			// round toward zero
 			// round toward zero
-			asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( TOWARDZERO.dbl ));
+			__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( TOWARDZERO.dbl ));
             if ( signBit == 0ul )                         // truncate to integer
             if ( signBit == 0ul )                         // truncate to integer
                   xtrunc = ( x + twoTo52 ) - twoTo52;
                   xtrunc = ( x + twoTo52 ) - twoTo52;
             else
             else
                   xtrunc = ( x - twoTo52 ) + twoTo52;
                   xtrunc = ( x - twoTo52 ) + twoTo52;
 		// restore caller's env
 		// restore caller's env
-		asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment ));
+		__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment ));
             *iptr = xtrunc;                               // store integral part
             *iptr = xtrunc;                               // store integral part
             if ( x != xtrunc )                            // nonzero fraction
             if ( x != xtrunc )                            // nonzero fraction
                   return ( x - xtrunc );
                   return ( x - xtrunc );

+ 2 - 2
libm/powerpc/classic/s_nearbyint.c

@@ -23,7 +23,7 @@ double nearbyint ( double x )
 
 
 	y = twoTo52;
 	y = twoTo52;
 
 
-	asm ("mffs %0" : "=f" (OldEnvironment));	/* get the environement */
+	__asm__ ("mffs %0" : "=f" (OldEnvironment));	/* get the environement */
 
 
       if ( fabs ( x ) >= y )                          /* huge case is exact */
       if ( fabs ( x ) >= y )                          /* huge case is exact */
             return x;
             return x;
@@ -32,7 +32,7 @@ double nearbyint ( double x )
       if ( y == 0.0 )                        /* zero results mirror sign of x */
       if ( y == 0.0 )                        /* zero results mirror sign of x */
             y = copysign ( y, x );
             y = copysign ( y, x );
 //	restore old flags
 //	restore old flags
-	asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment ));
+	__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment ));
       return ( y );
       return ( y );
 	}
 	}
 libm_hidden_def(nearbyint)
 libm_hidden_def(nearbyint)

+ 3 - 3
libm/powerpc/classic/s_round.c

@@ -51,7 +51,7 @@ double round ( double x )
 *     Is |x| < 1.0?                                                           *
 *     Is |x| < 1.0?                                                           *
 *******************************************************************************/
 *******************************************************************************/
                   {
                   {
-			asm ("mffs %0" : "=f" (OldEnvironment.dbl));	// get environment
+			__asm__ ("mffs %0" : "=f" (OldEnvironment.dbl));	// get environment
                   if ( xHead < 0x3fe00000ul )
                   if ( xHead < 0x3fe00000ul )
 /*******************************************************************************
 /*******************************************************************************
 *     Is |x| < 0.5?                                                           *
 *     Is |x| < 0.5?                                                           *
@@ -59,7 +59,7 @@ double round ( double x )
                         {
                         {
                         if ( ( xHead | argument.words.lo ) != 0ul )
                         if ( ( xHead | argument.words.lo ) != 0ul )
                               OldEnvironment.words.lo |= 0x02000000ul;
                               OldEnvironment.words.lo |= 0x02000000ul;
-				asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+				__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
                         if ( target )
                         if ( target )
                               return ( 0.0 );
                               return ( 0.0 );
                         else
                         else
@@ -69,7 +69,7 @@ double round ( double x )
 *     Is 0.5 ² |x| < 1.0?                                                      *
 *     Is 0.5 ² |x| < 1.0?                                                      *
 *******************************************************************************/
 *******************************************************************************/
                   OldEnvironment.words.lo |= 0x02000000ul;
                   OldEnvironment.words.lo |= 0x02000000ul;
-			asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+			__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
                   if ( target )
                   if ( target )
                         return ( 1.0 );
                         return ( 1.0 );
                   else
                   else

+ 2 - 2
libm/powerpc/classic/s_trunc.c

@@ -51,9 +51,9 @@ double trunc ( double x )
 			{
 			{
 			if ( ( xhi | argument.words.lo ) != 0ul )
 			if ( ( xhi | argument.words.lo ) != 0ul )
 				{                             	// raise deserved INEXACT
 				{                             	// raise deserved INEXACT
-				asm ("mffs %0" : "=f" (OldEnvironment.dbl));
+				__asm__ ("mffs %0" : "=f" (OldEnvironment.dbl));
 				OldEnvironment.words.lo |= 0x02000000ul;
 				OldEnvironment.words.lo |= 0x02000000ul;
-				asm ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
+				__asm__ ("mtfsf 255,%0" : /*NULLOUT*/ : /*IN*/ "f" ( OldEnvironment.dbl ));
 				}
 				}
 			if ( target )	                  	// return properly signed zero
 			if ( target )	                  	// return properly signed zero
 				return ( 0.0 );
 				return ( 0.0 );

+ 2 - 2
libm/powerpc/e500/fpu/fenv_libc.h

@@ -28,11 +28,11 @@ extern int __feraiseexcept_internal (int __excepts);
 /* Equivalent to fegetenv, but returns a fenv_t instead of taking a
 /* Equivalent to fegetenv, but returns a fenv_t instead of taking a
    pointer.  */
    pointer.  */
 #define fegetenv_register() \
 #define fegetenv_register() \
-        ({ unsigned fscr; asm volatile ("mfspefscr %0" : "=r" (fscr)); fscr; })
+        ({ unsigned fscr; __asm__ __volatile__ ("mfspefscr %0" : "=r" (fscr)); fscr; })
 
 
 /* Equivalent to fesetenv, but takes a fenv_t instead of a pointer.  */
 /* Equivalent to fesetenv, but takes a fenv_t instead of a pointer.  */
 #define fesetenv_register(fscr) \
 #define fesetenv_register(fscr) \
-	({ asm volatile ("mtspefscr %0" : : "r" (fscr)); })
+	({ __asm__ __volatile__ ("mtspefscr %0" : : "r" (fscr)); })
 
 
 typedef union
 typedef union
 {
 {

+ 5 - 5
libm/powerpc/e500/spe-raise.c

@@ -33,33 +33,33 @@ __FERAISEEXCEPT_INTERNAL (int excepts)
     {
     {
       /* ?? Does not set sticky bit ?? */
       /* ?? Does not set sticky bit ?? */
       /* 0 / 0 */
       /* 0 / 0 */
-      asm volatile ("efsdiv %0,%0,%1" : : "r" (0), "r" (0));
+      __asm__ __volatile__ ("efsdiv %0,%0,%1" : : "r" (0), "r" (0));
     }
     }
 
 
   if ((FE_DIVBYZERO & excepts) != 0)
   if ((FE_DIVBYZERO & excepts) != 0)
     {
     {
       /* 1.0 / 0.0 */
       /* 1.0 / 0.0 */
-      asm volatile ("efsdiv %0,%0,%1" : : "r" (1.0F), "r" (0));
+      __asm__ __volatile__ ("efsdiv %0,%0,%1" : : "r" (1.0F), "r" (0));
     }
     }
 
 
   if ((FE_OVERFLOW & excepts) != 0)
   if ((FE_OVERFLOW & excepts) != 0)
     {
     {
       /* ?? Does not set sticky bit ?? */
       /* ?? Does not set sticky bit ?? */
       /* Largest normalized number plus itself.  */
       /* Largest normalized number plus itself.  */
-      asm volatile ("efsadd %0,%0,%1" : : "r" (0x7f7fffff), "r" (0x7f7fffff));
+      __asm__ __volatile__ ("efsadd %0,%0,%1" : : "r" (0x7f7fffff), "r" (0x7f7fffff));
     }
     }
 
 
   if ((FE_UNDERFLOW & excepts) != 0)
   if ((FE_UNDERFLOW & excepts) != 0)
     {
     {
       /* ?? Does not set sticky bit ?? */
       /* ?? Does not set sticky bit ?? */
       /* Smallest normalized number times itself.  */
       /* Smallest normalized number times itself.  */
-      asm volatile ("efsmul %0,%0,%1" : : "r" (0x800000), "r" (0x800000));
+      __asm__ __volatile__ ("efsmul %0,%0,%1" : : "r" (0x800000), "r" (0x800000));
     }
     }
 
 
   if ((FE_INEXACT & excepts) != 0)
   if ((FE_INEXACT & excepts) != 0)
     {
     {
       /* Smallest normalized minus 1.0 raises the inexact flag.  */
       /* Smallest normalized minus 1.0 raises the inexact flag.  */
-      asm volatile ("efssub %0,%0,%1" : : "r" (0x00800000), "r" (1.0F));
+      __asm__ __volatile__ ("efssub %0,%0,%1" : : "r" (0x00800000), "r" (1.0F));
     }
     }
 
 
   /* Success.  */
   /* Success.  */

+ 1 - 1
libpthread/linuxthreads.old/sysdeps/bfin/pt-machine.h

@@ -70,7 +70,7 @@ __compare_and_swap (long int *p, long int oldval, long int newval)
 #ifdef SHARED
 #ifdef SHARED
 # define PTHREAD_STATIC_FN_REQUIRE(name)
 # define PTHREAD_STATIC_FN_REQUIRE(name)
 #else
 #else
-# define PTHREAD_STATIC_FN_REQUIRE(name) __asm (".globl " "_"#name);
+# define PTHREAD_STATIC_FN_REQUIRE(name) __asm__ (".globl " "_"#name);
 #endif
 #endif
 
 
 #endif /* pt-machine.h */
 #endif /* pt-machine.h */

+ 2 - 2
libpthread/linuxthreads.old/sysdeps/frv/pt-machine.h

@@ -35,7 +35,7 @@ PT_EI long int
 testandset (int *spinlock)
 testandset (int *spinlock)
 {
 {
   int i = 1;
   int i = 1;
-  asm ("swap%I0 %M0, %1" : "+m"(*(volatile int *)spinlock), "+r"(i));
+  __asm__ ("swap%I0 %M0, %1" : "+m"(*(volatile int *)spinlock), "+r"(i));
   return i;
   return i;
 }
 }
 
 
@@ -53,7 +53,7 @@ extern char __stacksize;
 #define WRITE_MEMORY_BARRIER() __asm__ __volatile__("membar" : : : "memory")
 #define WRITE_MEMORY_BARRIER() __asm__ __volatile__("membar" : : : "memory")
 
 
 /* Return the thread descriptor for the current thread.  */
 /* Return the thread descriptor for the current thread.  */
-register struct _pthread_descr_struct *THREAD_SELF asm ("gr29");
+register struct _pthread_descr_struct *THREAD_SELF __asm__ ("gr29");
 #define THREAD_SELF THREAD_SELF
 #define THREAD_SELF THREAD_SELF
 
 
 /* Initialize the thread-unique value.  */
 /* Initialize the thread-unique value.  */

+ 4 - 4
libpthread/linuxthreads.old/sysdeps/i386/tls.h

@@ -104,7 +104,7 @@ typedef struct
     { nr, (unsigned long int) (descr), 0xfffff /* 4GB in pages */,	      \
     { nr, (unsigned long int) (descr), 0xfffff /* 4GB in pages */,	      \
       1, 0, 0, 1, 0, 1, 0 };						      \
       1, 0, 0, 1, 0, 1, 0 };						      \
   int result;								      \
   int result;								      \
-  asm volatile (TLS_LOAD_EBX						      \
+  __asm__ __volatile__ (TLS_LOAD_EBX						      \
 		"int $0x80\n\t"						      \
 		"int $0x80\n\t"						      \
 		TLS_LOAD_EBX						      \
 		TLS_LOAD_EBX						      \
 		: "=a" (result)						      \
 		: "=a" (result)						      \
@@ -125,9 +125,9 @@ typedef struct
   int result;								      \
   int result;								      \
   if (secondcall)							      \
   if (secondcall)							      \
     ldt_entry.entry_number = ({ int _gs;				      \
     ldt_entry.entry_number = ({ int _gs;				      \
-				asm ("movw %%gs, %w0" : "=q" (_gs));	      \
+				__asm__ ("movw %%gs, %w0" : "=q" (_gs));	      \
 				(_gs & 0xffff) >> 3; });		      \
 				(_gs & 0xffff) >> 3; });		      \
-  asm volatile (TLS_LOAD_EBX						      \
+  __asm__ __volatile__ (TLS_LOAD_EBX						      \
 		"int $0x80\n\t"						      \
 		"int $0x80\n\t"						      \
 		TLS_LOAD_EBX						      \
 		TLS_LOAD_EBX						      \
 		: "=a" (result), "=m" (ldt_entry.entry_number)		      \
 		: "=a" (result), "=m" (ldt_entry.entry_number)		      \
@@ -167,7 +167,7 @@ typedef struct
     __gs = TLS_SETUP_GS_SEGMENT (_descr, secondcall);			      \
     __gs = TLS_SETUP_GS_SEGMENT (_descr, secondcall);			      \
     if (__builtin_expect (__gs, 7) != -1)				      \
     if (__builtin_expect (__gs, 7) != -1)				      \
       {									      \
       {									      \
-	asm ("movw %w0, %%gs" : : "q" (__gs));				      \
+	__asm__ ("movw %w0, %%gs" : : "q" (__gs));				      \
 	__gs = 0;							      \
 	__gs = 0;							      \
       }									      \
       }									      \
     __gs;								      \
     __gs;								      \

+ 6 - 6
libpthread/linuxthreads.old/sysdeps/i386/useldt.h

@@ -71,7 +71,7 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
       1, 0, 0, 1, 0, 1, 0 };						      \
       1, 0, 0, 1, 0, 1, 0 };						      \
   if (__modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0)		      \
   if (__modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0)		      \
     abort ();								      \
     abort ();								      \
-  asm ("movw %w0, %%gs" : : "q" (nr * 8 + 7));				      \
+  __asm__ ("movw %w0, %%gs" : : "q" (nr * 8 + 7));				      \
 })
 })
 
 
 #ifdef __PIC__
 #ifdef __PIC__
@@ -97,21 +97,21 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
   int __gs;								      \
   int __gs;								      \
   if (DO_SET_THREAD_AREA_REUSE (nr))					      \
   if (DO_SET_THREAD_AREA_REUSE (nr))					      \
     {									      \
     {									      \
-      asm ("movw %%gs, %w0" : "=q" (__gs));				      \
+      __asm__ ("movw %%gs, %w0" : "=q" (__gs));				      \
       struct modify_ldt_ldt_s ldt_entry =				      \
       struct modify_ldt_ldt_s ldt_entry =				      \
 	{ (__gs & 0xffff) >> 3,						      \
 	{ (__gs & 0xffff) >> 3,						      \
 	  (unsigned long int) (descr), 0xfffff /* 4GB in pages */,	      \
 	  (unsigned long int) (descr), 0xfffff /* 4GB in pages */,	      \
 	  1, 0, 0, 1, 0, 1, 0 };					      \
 	  1, 0, 0, 1, 0, 1, 0 };					      \
 									      \
 									      \
       int __result;							      \
       int __result;							      \
-      __asm (USETLS_LOAD_EBX						      \
+      __asm__ (USETLS_LOAD_EBX						      \
 	     "movl %2, %%eax\n\t"					      \
 	     "movl %2, %%eax\n\t"					      \
 	     "int $0x80\n\t"						      \
 	     "int $0x80\n\t"						      \
 	     USETLS_LOAD_EBX						      \
 	     USETLS_LOAD_EBX						      \
 	     : "&a" (__result)						      \
 	     : "&a" (__result)						      \
 	     : USETLS_EBX_ARG (&ldt_entry), "i" (__NR_set_thread_area));      \
 	     : USETLS_EBX_ARG (&ldt_entry), "i" (__NR_set_thread_area));      \
       if (__result == 0)						      \
       if (__result == 0)						      \
-	asm ("movw %w0, %%gs" :: "q" (__gs));				      \
+	__asm__ ("movw %w0, %%gs" :: "q" (__gs));				      \
       else								      \
       else								      \
 	__gs = -1;							      \
 	__gs = -1;							      \
     }									      \
     }									      \
@@ -122,7 +122,7 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
 	  (unsigned long int) (descr), 0xfffff /* 4GB in pages */,	      \
 	  (unsigned long int) (descr), 0xfffff /* 4GB in pages */,	      \
 	  1, 0, 0, 1, 0, 1, 0 };					      \
 	  1, 0, 0, 1, 0, 1, 0 };					      \
       int __result;							      \
       int __result;							      \
-      __asm (USETLS_LOAD_EBX						      \
+      __asm__ (USETLS_LOAD_EBX						      \
 	     "movl %2, %%eax\n\t"					      \
 	     "movl %2, %%eax\n\t"					      \
 	     "int $0x80\n\t"						      \
 	     "int $0x80\n\t"						      \
 	     USETLS_LOAD_EBX						      \
 	     USETLS_LOAD_EBX						      \
@@ -131,7 +131,7 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
       if (__result == 0)						      \
       if (__result == 0)						      \
 	{								      \
 	{								      \
 	  __gs = (ldt_entry.entry_number << 3) + 3;			      \
 	  __gs = (ldt_entry.entry_number << 3) + 3;			      \
-	  asm ("movw %w0, %%gs" : : "q" (__gs));			      \
+	  __asm__ ("movw %w0, %%gs" : : "q" (__gs));			      \
 	}								      \
 	}								      \
       else								      \
       else								      \
 	__gs = -1;							      \
 	__gs = -1;							      \

+ 1 - 1
libpthread/linuxthreads.old/sysdeps/sh/tls.h

@@ -100,7 +100,7 @@ typedef struct
     /* For now the thread descriptor is at the same address.  */	      \
     /* For now the thread descriptor is at the same address.  */	      \
     head->self = _descr;						      \
     head->self = _descr;						      \
 									      \
 									      \
-    asm ("ldc %0,gbr" : : "r" (_descr));				      \
+    __asm__ ("ldc %0,gbr" : : "r" (_descr));				      \
 									      \
 									      \
     0;									      \
     0;									      \
   })
   })

+ 1 - 1
libpthread/linuxthreads.old/sysdeps/x86_64/tls.h

@@ -108,7 +108,7 @@ typedef struct
     /* For now the thread descriptor is at the same address.  */	      \
     /* For now the thread descriptor is at the same address.  */	      \
     head->self = _descr;						      \
     head->self = _descr;						      \
 									      \
 									      \
-    asm volatile ("syscall"						      \
+    __asm__ __volatile__ ("syscall"						      \
 		  : "=a" (_result)					      \
 		  : "=a" (_result)					      \
 		  : "0" ((unsigned long int) __NR_arch_prctl),		      \
 		  : "0" ((unsigned long int) __NR_arch_prctl),		      \
 		    "D" ((unsigned long int) ARCH_SET_FS),		      \
 		    "D" ((unsigned long int) ARCH_SET_FS),		      \

+ 1 - 1
test/math/libm-test.inc

@@ -310,7 +310,7 @@ fpstack_test (const char *test_name)
   static int old_stack;
   static int old_stack;
   int sw;
   int sw;
 
 
-  asm ("fnstsw" : "=a" (sw));
+  __asm__ ("fnstsw" : "=a" (sw));
   sw >>= 11;
   sw >>= 11;
   sw &= 7;
   sw &= 7;