sanitizer_syscall_linux_aarch64.inc 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. //===-- sanitizer_syscall_linux_aarch64.inc --------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Implementations of internal_syscall and internal_iserror for Linux/aarch64.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #define SYSCALL(name) __NR_ ## name
  13. static uptr __internal_syscall(u64 nr) {
  14. register u64 x8 asm("x8") = nr;
  15. register u64 x0 asm("x0");
  16. asm volatile("svc 0"
  17. : "=r"(x0)
  18. : "r"(x8)
  19. : "memory", "cc");
  20. return x0;
  21. }
  22. #define __internal_syscall0(n) \
  23. (__internal_syscall)(n)
  24. static uptr __internal_syscall(u64 nr, u64 arg1) {
  25. register u64 x8 asm("x8") = nr;
  26. register u64 x0 asm("x0") = arg1;
  27. asm volatile("svc 0"
  28. : "=r"(x0)
  29. : "r"(x8), "0"(x0)
  30. : "memory", "cc");
  31. return x0;
  32. }
  33. #define __internal_syscall1(n, a1) \
  34. (__internal_syscall)(n, (u64)(a1))
  35. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
  36. register u64 x8 asm("x8") = nr;
  37. register u64 x0 asm("x0") = arg1;
  38. register u64 x1 asm("x1") = arg2;
  39. asm volatile("svc 0"
  40. : "=r"(x0)
  41. : "r"(x8), "0"(x0), "r"(x1)
  42. : "memory", "cc");
  43. return x0;
  44. }
  45. #define __internal_syscall2(n, a1, a2) \
  46. (__internal_syscall)(n, (u64)(a1), (long)(a2))
  47. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
  48. register u64 x8 asm("x8") = nr;
  49. register u64 x0 asm("x0") = arg1;
  50. register u64 x1 asm("x1") = arg2;
  51. register u64 x2 asm("x2") = arg3;
  52. asm volatile("svc 0"
  53. : "=r"(x0)
  54. : "r"(x8), "0"(x0), "r"(x1), "r"(x2)
  55. : "memory", "cc");
  56. return x0;
  57. }
  58. #define __internal_syscall3(n, a1, a2, a3) \
  59. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
  60. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
  61. u64 arg4) {
  62. register u64 x8 asm("x8") = nr;
  63. register u64 x0 asm("x0") = arg1;
  64. register u64 x1 asm("x1") = arg2;
  65. register u64 x2 asm("x2") = arg3;
  66. register u64 x3 asm("x3") = arg4;
  67. asm volatile("svc 0"
  68. : "=r"(x0)
  69. : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3)
  70. : "memory", "cc");
  71. return x0;
  72. }
  73. #define __internal_syscall4(n, a1, a2, a3, a4) \
  74. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
  75. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
  76. u64 arg4, long arg5) {
  77. register u64 x8 asm("x8") = nr;
  78. register u64 x0 asm("x0") = arg1;
  79. register u64 x1 asm("x1") = arg2;
  80. register u64 x2 asm("x2") = arg3;
  81. register u64 x3 asm("x3") = arg4;
  82. register u64 x4 asm("x4") = arg5;
  83. asm volatile("svc 0"
  84. : "=r"(x0)
  85. : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4)
  86. : "memory", "cc");
  87. return x0;
  88. }
  89. #define __internal_syscall5(n, a1, a2, a3, a4, a5) \
  90. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
  91. (u64)(a5))
  92. static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
  93. u64 arg4, long arg5, long arg6) {
  94. register u64 x8 asm("x8") = nr;
  95. register u64 x0 asm("x0") = arg1;
  96. register u64 x1 asm("x1") = arg2;
  97. register u64 x2 asm("x2") = arg3;
  98. register u64 x3 asm("x3") = arg4;
  99. register u64 x4 asm("x4") = arg5;
  100. register u64 x5 asm("x5") = arg6;
  101. asm volatile("svc 0"
  102. : "=r"(x0)
  103. : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4), "r"(x5)
  104. : "memory", "cc");
  105. return x0;
  106. }
  107. #define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
  108. (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
  109. (u64)(a5), (long)(a6))
  110. #define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
  111. #define __SYSCALL_NARGS(...) \
  112. __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
  113. #define __SYSCALL_CONCAT_X(a, b) a##b
  114. #define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
  115. #define __SYSCALL_DISP(b, ...) \
  116. __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
  117. #define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
  118. // Helper function used to avoid cobbler errno.
  119. bool internal_iserror(uptr retval, int *rverrno) {
  120. if (retval >= (uptr)-4095) {
  121. if (rverrno)
  122. *rverrno = -retval;
  123. return true;
  124. }
  125. return false;
  126. }