sysdep.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. /* Copyright (C) 1992,1997-2003,2004,2005,2006 Free Software Foundation, Inc.
  2. This file is part of the GNU C Library.
  3. The GNU C Library is free software; you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public
  5. License as published by the Free Software Foundation; either
  6. version 2.1 of the License, or (at your option) any later version.
  7. The GNU C Library is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public
  12. License along with the GNU C Library; if not, write to the Free
  13. Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  14. 02111-1307 USA. */
  15. #ifndef _LINUX_POWERPC_SYSDEP_H
  16. #define _LINUX_POWERPC_SYSDEP_H 1
  17. #include <sysdeps/unix/powerpc/sysdep.h>
  18. #include <tls.h>
  19. /* Some systen calls got renamed over time, but retained the same semantics.
  20. Handle them here so they can be catched by both C and assembler stubs in
  21. glibc. */
  22. #ifdef __NR_pread64
  23. # ifdef __NR_pread
  24. # error "__NR_pread and __NR_pread64 both defined???"
  25. # endif
  26. # define __NR_pread __NR_pread64
  27. #endif
  28. #ifdef __NR_pwrite64
  29. # ifdef __NR_pwrite
  30. # error "__NR_pwrite and __NR_pwrite64 both defined???"
  31. # endif
  32. # define __NR_pwrite __NR_pwrite64
  33. #endif
  34. /* For Linux we can use the system call table in the header file
  35. /usr/include/asm/unistd.h
  36. of the kernel. But these symbols do not follow the SYS_* syntax
  37. so we have to redefine the `SYS_ify' macro here. */
  38. #undef SYS_ify
  39. #ifdef __STDC__
  40. # define SYS_ify(syscall_name) __NR_##syscall_name
  41. #else
  42. # define SYS_ify(syscall_name) __NR_/**/syscall_name
  43. #endif
  44. #ifndef __ASSEMBLER__
  45. # include <errno.h>
  46. # ifdef SHARED
  47. # define INLINE_VSYSCALL(name, nr, args...) \
  48. ({ \
  49. __label__ out; \
  50. __label__ iserr; \
  51. INTERNAL_SYSCALL_DECL (sc_err); \
  52. long int sc_ret; \
  53. \
  54. if (__vdso_##name != NULL) \
  55. { \
  56. sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, sc_err, nr, ##args); \
  57. if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
  58. goto out; \
  59. if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS) \
  60. goto iserr; \
  61. } \
  62. \
  63. sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args); \
  64. if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
  65. { \
  66. iserr: \
  67. __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
  68. sc_ret = -1L; \
  69. } \
  70. out: \
  71. sc_ret; \
  72. })
  73. # else
  74. # define INLINE_VSYSCALL(name, nr, args...) \
  75. INLINE_SYSCALL (name, nr, ##args)
  76. # endif
  77. # ifdef SHARED
  78. # define INTERNAL_VSYSCALL(name, err, nr, args...) \
  79. ({ \
  80. __label__ out; \
  81. long int v_ret; \
  82. \
  83. if (__vdso_##name != NULL) \
  84. { \
  85. v_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args); \
  86. if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err) \
  87. || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS) \
  88. goto out; \
  89. } \
  90. v_ret = INTERNAL_SYSCALL (name, err, nr, ##args); \
  91. out: \
  92. v_ret; \
  93. })
  94. # else
  95. # define INTERNAL_VSYSCALL(name, err, nr, args...) \
  96. INTERNAL_SYSCALL (name, err, nr, ##args)
  97. # endif
  98. # define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, nr, args...) \
  99. ({ \
  100. long int sc_ret = ENOSYS; \
  101. \
  102. if (__vdso_##name != NULL) \
  103. sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args); \
  104. else \
  105. err = 1 << 28; \
  106. sc_ret; \
  107. })
  108. /* List of system calls which are supported as vsyscalls. */
  109. # define HAVE_CLOCK_GETRES_VSYSCALL 1
  110. # define HAVE_CLOCK_GETTIME_VSYSCALL 1
  111. /* Define a macro which expands inline into the wrapper code for a VDSO
  112. call. This use is for internal calls that do not need to handle errors
  113. normally. It will never touch errno.
  114. On powerpc a system call basically clobbers the same registers like a
  115. function call, with the exception of LR (which is needed for the
  116. "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
  117. an error return status). */
  118. # define INTERNAL_VSYSCALL_NCS(funcptr, err, nr, args...) \
  119. ({ \
  120. register void *r0 __asm__ ("r0"); \
  121. register long int r3 __asm__ ("r3"); \
  122. register long int r4 __asm__ ("r4"); \
  123. register long int r5 __asm__ ("r5"); \
  124. register long int r6 __asm__ ("r6"); \
  125. register long int r7 __asm__ ("r7"); \
  126. register long int r8 __asm__ ("r8"); \
  127. register long int r9 __asm__ ("r9"); \
  128. register long int r10 __asm__ ("r10"); \
  129. register long int r11 __asm__ ("r11"); \
  130. register long int r12 __asm__ ("r12"); \
  131. LOADARGS_##nr (funcptr, args); \
  132. __asm__ __volatile__ \
  133. ("mtctr %0\n\t" \
  134. "bctrl\n\t" \
  135. "mfcr %0" \
  136. : "=&r" (r0), \
  137. "=&r" (r3), "=&r" (r4), "=&r" (r5), "=&r" (r6), "=&r" (r7), \
  138. "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12) \
  139. : ASM_INPUT_##nr \
  140. : "cr0", "ctr", "lr", "memory"); \
  141. err = (long int) r0; \
  142. (int) r3; \
  143. })
  144. # undef INLINE_SYSCALL
  145. # define INLINE_SYSCALL(name, nr, args...) \
  146. ({ \
  147. INTERNAL_SYSCALL_DECL (sc_err); \
  148. long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \
  149. if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
  150. { \
  151. __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
  152. sc_ret = -1L; \
  153. } \
  154. sc_ret; \
  155. })
  156. /* Define a macro which expands inline into the wrapper code for a system
  157. call. This use is for internal calls that do not need to handle errors
  158. normally. It will never touch errno.
  159. On powerpc a system call basically clobbers the same registers like a
  160. function call, with the exception of LR (which is needed for the
  161. "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
  162. an error return status). */
  163. # undef INTERNAL_SYSCALL_DECL
  164. # define INTERNAL_SYSCALL_DECL(err) long int err
  165. # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
  166. ({ \
  167. register long int r0 __asm__ ("r0"); \
  168. register long int r3 __asm__ ("r3"); \
  169. register long int r4 __asm__ ("r4"); \
  170. register long int r5 __asm__ ("r5"); \
  171. register long int r6 __asm__ ("r6"); \
  172. register long int r7 __asm__ ("r7"); \
  173. register long int r8 __asm__ ("r8"); \
  174. register long int r9 __asm__ ("r9"); \
  175. register long int r10 __asm__ ("r10"); \
  176. register long int r11 __asm__ ("r11"); \
  177. register long int r12 __asm__ ("r12"); \
  178. LOADARGS_##nr(name, args); \
  179. __asm__ __volatile__ \
  180. ("sc \n\t" \
  181. "mfcr %0" \
  182. : "=&r" (r0), \
  183. "=&r" (r3), "=&r" (r4), "=&r" (r5), "=&r" (r6), "=&r" (r7), \
  184. "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12) \
  185. : ASM_INPUT_##nr \
  186. : "cr0", "ctr", "memory"); \
  187. err = r0; \
  188. (int) r3; \
  189. })
  190. # undef INTERNAL_SYSCALL
  191. # define INTERNAL_SYSCALL(name, err, nr, args...) \
  192. INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, ##args)
  193. # undef INTERNAL_SYSCALL_ERROR_P
  194. # define INTERNAL_SYSCALL_ERROR_P(val, err) \
  195. ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
  196. # undef INTERNAL_SYSCALL_ERRNO
  197. # define INTERNAL_SYSCALL_ERRNO(val, err) (val)
  198. # define LOADARGS_0(name, dummy) \
  199. r0 = name
  200. # define LOADARGS_1(name, __arg1) \
  201. long int arg1 = (long int) (__arg1); \
  202. LOADARGS_0(name, 0); \
  203. extern void __illegally_sized_syscall_arg1 (void); \
  204. if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 4) \
  205. __illegally_sized_syscall_arg1 (); \
  206. r3 = arg1
  207. # define LOADARGS_2(name, __arg1, __arg2) \
  208. long int arg2 = (long int) (__arg2); \
  209. LOADARGS_1(name, __arg1); \
  210. extern void __illegally_sized_syscall_arg2 (void); \
  211. if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 4) \
  212. __illegally_sized_syscall_arg2 (); \
  213. r4 = arg2
  214. # define LOADARGS_3(name, __arg1, __arg2, __arg3) \
  215. long int arg3 = (long int) (__arg3); \
  216. LOADARGS_2(name, __arg1, __arg2); \
  217. extern void __illegally_sized_syscall_arg3 (void); \
  218. if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 4) \
  219. __illegally_sized_syscall_arg3 (); \
  220. r5 = arg3
  221. # define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
  222. long int arg4 = (long int) (__arg4); \
  223. LOADARGS_3(name, __arg1, __arg2, __arg3); \
  224. extern void __illegally_sized_syscall_arg4 (void); \
  225. if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 4) \
  226. __illegally_sized_syscall_arg4 (); \
  227. r6 = arg4
  228. # define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
  229. long int arg5 = (long int) (__arg5); \
  230. LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
  231. extern void __illegally_sized_syscall_arg5 (void); \
  232. if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 4) \
  233. __illegally_sized_syscall_arg5 (); \
  234. r7 = arg5
  235. # define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
  236. long int arg6 = (long int) (__arg6); \
  237. LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
  238. extern void __illegally_sized_syscall_arg6 (void); \
  239. if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 4) \
  240. __illegally_sized_syscall_arg6 (); \
  241. r8 = arg6
  242. # define ASM_INPUT_0 "0" (r0)
  243. # define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
  244. # define ASM_INPUT_2 ASM_INPUT_1, "2" (r4)
  245. # define ASM_INPUT_3 ASM_INPUT_2, "3" (r5)
  246. # define ASM_INPUT_4 ASM_INPUT_3, "4" (r6)
  247. # define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
  248. # define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
  249. #endif /* __ASSEMBLER__ */
  250. /* Pointer mangling support. */
  251. #if defined NOT_IN_libc && defined IS_IN_rtld
  252. /* We cannot use the thread descriptor because in ld.so we use setjmp
  253. earlier than the descriptor is initialized. */
  254. #else
  255. # ifdef __ASSEMBLER__
  256. # define PTR_MANGLE(reg, tmpreg) \
  257. lwz tmpreg,POINTER_GUARD(r2); \
  258. xor reg,tmpreg,reg
  259. # define PTR_MANGLE2(reg, tmpreg) \
  260. xor reg,tmpreg,reg
  261. # define PTR_MANGLE3(destreg, reg, tmpreg) \
  262. lwz tmpreg,POINTER_GUARD(r2); \
  263. xor destreg,tmpreg,reg
  264. # define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
  265. # define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
  266. # define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
  267. # else
  268. # define PTR_MANGLE(var) \
  269. (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
  270. # define PTR_DEMANGLE(var) PTR_MANGLE (var)
  271. # endif
  272. #endif
  273. #endif /* linux/powerpc/powerpc32/sysdep.h */