syscalls.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. /*
  2. * Copyright (C) 2016 Andes Technology, Inc.
  3. * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
  4. */
  5. #ifndef _BITS_SYSCALLS_H
  6. #define _BITS_SYSCALLS_H
  7. #ifndef _SYSCALL_H
  8. # error "Never use <bits/syscalls.h> directly; include <sys/syscall.h> instead."
  9. #endif
  10. #ifndef __ASSEMBLER__
  11. #include <errno.h>
  12. #undef INTERNAL_SYSCALL_ERROR_P
  13. #define INTERNAL_SYSCALL_ERROR_P(val, err) ((unsigned int) (val) >= 0xfffff001u)
  14. #undef INTERNAL_SYSCALL_ERRNO
  15. #define INTERNAL_SYSCALL_ERRNO(val, err) (-(val))
  16. #define X(x) #x
  17. #define Y(x) X(x)
  18. #define __issue_syscall(syscall_name) \
  19. " syscall " Y(syscall_name) "; \n"
  20. #define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
  21. (__extension__ \
  22. ({ \
  23. register long __result __asm__("$r0"); \
  24. register long _sys_num __asm__("$r8"); \
  25. \
  26. LOAD_ARGS_##nr (name, args) \
  27. _sys_num = (name); \
  28. \
  29. __asm__ volatile ( \
  30. __issue_syscall (name) \
  31. : "=r" (__result) \
  32. : "r"(_sys_num) ASM_ARGS_##nr \
  33. : "$lp", "memory"); \
  34. __result; \
  35. }) \
  36. )
  37. /* Macros for setting up inline __asm__ input regs */
  38. #define ASM_ARGS_0
  39. #define ASM_ARGS_1 ASM_ARGS_0, "r" (__result)
  40. #define ASM_ARGS_2 ASM_ARGS_1, "r" (_arg2)
  41. #define ASM_ARGS_3 ASM_ARGS_2, "r" (_arg3)
  42. #define ASM_ARGS_4 ASM_ARGS_3, "r" (_arg4)
  43. #define ASM_ARGS_5 ASM_ARGS_4, "r" (_arg5)
  44. #define ASM_ARGS_6 ASM_ARGS_5, "r" (_arg6)
  45. #define ASM_ARGS_7 ASM_ARGS_6, "r" (_arg7)
  46. /* Macros for converting sys-call wrapper args into sys call args */
  47. #define LOAD_ARGS_0(name, arg) \
  48. _sys_num = (long) (name); \
  49. #define LOAD_ARGS_1(name, arg1) \
  50. __result = (long) (arg1); \
  51. LOAD_ARGS_0 (name, arg1)
  52. /*
  53. * Note that the use of _tmpX might look superflous, however it is needed
  54. * to ensure that register variables are not clobbered if arg happens to be
  55. * a function call itself. e.g. sched_setaffinity() calling getpid() for arg2
  56. *
  57. * Also this specific order of recursive calling is important to segregate
  58. * the tmp args evaluation (function call case described above) and assigment
  59. * of register variables
  60. */
  61. #define LOAD_ARGS_2(name, arg1, arg2) \
  62. long _tmp2 = (long) (arg2); \
  63. LOAD_ARGS_1 (name, arg1) \
  64. register long _arg2 __asm__ ("$r1") = _tmp2;
  65. #define LOAD_ARGS_3(name, arg1, arg2, arg3) \
  66. long _tmp3 = (long) (arg3); \
  67. LOAD_ARGS_2 (name, arg1, arg2) \
  68. register long _arg3 __asm__ ("$r2") = _tmp3;
  69. #define LOAD_ARGS_4(name, arg1, arg2, arg3, arg4) \
  70. long _tmp4 = (long) (arg4); \
  71. LOAD_ARGS_3 (name, arg1, arg2, arg3) \
  72. register long _arg4 __asm__ ("$r3") = _tmp4;
  73. #define LOAD_ARGS_5(name, arg1, arg2, arg3, arg4, arg5) \
  74. long _tmp5 = (long) (arg5); \
  75. LOAD_ARGS_4 (name, arg1, arg2, arg3, arg4) \
  76. register long _arg5 __asm__ ("$r4") = _tmp5;
  77. #define LOAD_ARGS_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \
  78. long _tmp6 = (long) (arg6); \
  79. LOAD_ARGS_5 (name, arg1, arg2, arg3, arg4, arg5) \
  80. register long _arg6 __asm__ ("$r5") = _tmp6;
  81. #define LOAD_ARGS_7(name, arg1, arg2, arg3, arg4, arg5, arg6, arg7)\
  82. long _tmp7 = (long) (arg7); \
  83. LOAD_ARGS_6 (name, arg1, arg2, arg3, arg4, arg5, arg6) \
  84. register long _arg7 __asm__ ("$r6") = _tmp7;
  85. #endif /* ! __ASSEMBLER__ */
  86. #endif /* _BITS_SYSCALLS_H */