linux-atomic.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. /* Linux-specific atomic operations for PA Linux.
  2. Copyright (C) 2008-2022 Free Software Foundation, Inc.
  3. Based on code contributed by CodeSourcery for ARM EABI Linux.
  4. Modifications for PA Linux by Helge Deller <deller@gmx.de>
  5. This file is part of GCC.
  6. GCC is free software; you can redistribute it and/or modify it under
  7. the terms of the GNU General Public License as published by the Free
  8. Software Foundation; either version 3, or (at your option) any later
  9. version.
  10. GCC is distributed in the hope that it will be useful, but WITHOUT ANY
  11. WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  13. for more details.
  14. Under Section 7 of GPL version 3, you are granted additional
  15. permissions described in the GCC Runtime Library Exception, version
  16. 3.1, as published by the Free Software Foundation.
  17. You should have received a copy of the GNU General Public License and
  18. a copy of the GCC Runtime Library Exception along with this program;
  19. see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  20. <http://www.gnu.org/licenses/>. */
  21. #define EFAULT 14
  22. #define EBUSY 16
  23. #define ENOSYS 251
  24. #define _ASM_EFAULT "-14"
  25. typedef unsigned char u8;
  26. typedef short unsigned int u16;
  27. #ifdef __LP64__
  28. typedef long unsigned int u64;
  29. #else
  30. typedef long long unsigned int u64;
  31. #endif
  32. /* PA-RISC 2.0 supports out-of-order execution for loads and stores.
  33. Thus, we need to synchonize memory accesses. For more info, see:
  34. "Advanced Performance Features of the 64-bit PA-8000" by Doug Hunt.
  35. We implement byte, short and int versions of each atomic operation
  36. using the kernel helper defined below. There is no support for
  37. 64-bit operations yet. */
  38. /* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
  39. #define LWS_CAS (sizeof(long) == 4 ? 0 : 1)
  40. /* Kernel helper for compare-and-exchange a 32-bit value. */
  41. static inline long
  42. __kernel_cmpxchg (volatile void *mem, int oldval, int newval)
  43. {
  44. register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
  45. register int lws_old asm("r25") = oldval;
  46. register int lws_new asm("r24") = newval;
  47. register long lws_ret asm("r28");
  48. register long lws_errno asm("r21");
  49. asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
  50. "ldi %2, %%r20 \n\t"
  51. "cmpiclr,<> " _ASM_EFAULT ", %%r21, %%r0\n\t"
  52. "iitlbp %%r0,(%%sr0, %%r0) \n\t"
  53. : "=r" (lws_ret), "=r" (lws_errno)
  54. : "i" (LWS_CAS), "r" (lws_mem), "r" (lws_old), "r" (lws_new)
  55. : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
  56. );
  57. /* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains
  58. the old value from memory. If this value is equal to OLDVAL, the
  59. new value was written to memory. If not, return -EBUSY. */
  60. if (!lws_errno && lws_ret != oldval)
  61. return -EBUSY;
  62. return lws_errno;
  63. }
  64. static inline long
  65. __kernel_cmpxchg2 (volatile void *mem, const void *oldval, const void *newval,
  66. int val_size)
  67. {
  68. register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
  69. register unsigned long lws_old asm("r25") = (unsigned long) oldval;
  70. register unsigned long lws_new asm("r24") = (unsigned long) newval;
  71. register int lws_size asm("r23") = val_size;
  72. register long lws_ret asm("r28");
  73. register long lws_errno asm("r21");
  74. asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
  75. "ldi %6, %%r20 \n\t"
  76. "cmpiclr,<> " _ASM_EFAULT ", %%r21, %%r0\n\t"
  77. "iitlbp %%r0,(%%sr0, %%r0) \n\t"
  78. : "=r" (lws_ret), "=r" (lws_errno), "+r" (lws_mem),
  79. "+r" (lws_old), "+r" (lws_new), "+r" (lws_size)
  80. : "i" (2)
  81. : "r1", "r20", "r22", "r29", "r31", "fr4", "memory"
  82. );
  83. /* If the kernel LWS call is successful, lws_ret contains 0. */
  84. if (__builtin_expect (lws_ret == 0, 1))
  85. return 0;
  86. /* If the kernel LWS call fails with no error, return -EBUSY */
  87. if (__builtin_expect (!lws_errno, 0))
  88. return -EBUSY;
  89. return lws_errno;
  90. }
  91. #define HIDDEN __attribute__ ((visibility ("hidden")))
  92. /* Big endian masks */
  93. #define INVERT_MASK_1 24
  94. #define INVERT_MASK_2 16
  95. #define MASK_1 0xffu
  96. #define MASK_2 0xffffu
  97. #define FETCH_AND_OP_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
  98. TYPE HIDDEN \
  99. __sync_fetch_and_##OP##_##WIDTH (volatile void *ptr, TYPE val) \
  100. { \
  101. TYPE tmp, newval; \
  102. long failure; \
  103. \
  104. do { \
  105. tmp = __atomic_load_n ((volatile TYPE *)ptr, __ATOMIC_RELAXED); \
  106. newval = PFX_OP (tmp INF_OP val); \
  107. failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
  108. } while (failure != 0); \
  109. \
  110. return tmp; \
  111. }
  112. FETCH_AND_OP_2 (add, , +, u64, 8, 3)
  113. FETCH_AND_OP_2 (sub, , -, u64, 8, 3)
  114. FETCH_AND_OP_2 (or, , |, u64, 8, 3)
  115. FETCH_AND_OP_2 (and, , &, u64, 8, 3)
  116. FETCH_AND_OP_2 (xor, , ^, u64, 8, 3)
  117. FETCH_AND_OP_2 (nand, ~, &, u64, 8, 3)
  118. FETCH_AND_OP_2 (add, , +, u16, 2, 1)
  119. FETCH_AND_OP_2 (sub, , -, u16, 2, 1)
  120. FETCH_AND_OP_2 (or, , |, u16, 2, 1)
  121. FETCH_AND_OP_2 (and, , &, u16, 2, 1)
  122. FETCH_AND_OP_2 (xor, , ^, u16, 2, 1)
  123. FETCH_AND_OP_2 (nand, ~, &, u16, 2, 1)
  124. FETCH_AND_OP_2 (add, , +, u8, 1, 0)
  125. FETCH_AND_OP_2 (sub, , -, u8, 1, 0)
  126. FETCH_AND_OP_2 (or, , |, u8, 1, 0)
  127. FETCH_AND_OP_2 (and, , &, u8, 1, 0)
  128. FETCH_AND_OP_2 (xor, , ^, u8, 1, 0)
  129. FETCH_AND_OP_2 (nand, ~, &, u8, 1, 0)
  130. #define OP_AND_FETCH_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
  131. TYPE HIDDEN \
  132. __sync_##OP##_and_fetch_##WIDTH (volatile void *ptr, TYPE val) \
  133. { \
  134. TYPE tmp, newval; \
  135. long failure; \
  136. \
  137. do { \
  138. tmp = __atomic_load_n ((volatile TYPE *)ptr, __ATOMIC_RELAXED); \
  139. newval = PFX_OP (tmp INF_OP val); \
  140. failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
  141. } while (failure != 0); \
  142. \
  143. return PFX_OP (tmp INF_OP val); \
  144. }
  145. OP_AND_FETCH_2 (add, , +, u64, 8, 3)
  146. OP_AND_FETCH_2 (sub, , -, u64, 8, 3)
  147. OP_AND_FETCH_2 (or, , |, u64, 8, 3)
  148. OP_AND_FETCH_2 (and, , &, u64, 8, 3)
  149. OP_AND_FETCH_2 (xor, , ^, u64, 8, 3)
  150. OP_AND_FETCH_2 (nand, ~, &, u64, 8, 3)
  151. OP_AND_FETCH_2 (add, , +, u16, 2, 1)
  152. OP_AND_FETCH_2 (sub, , -, u16, 2, 1)
  153. OP_AND_FETCH_2 (or, , |, u16, 2, 1)
  154. OP_AND_FETCH_2 (and, , &, u16, 2, 1)
  155. OP_AND_FETCH_2 (xor, , ^, u16, 2, 1)
  156. OP_AND_FETCH_2 (nand, ~, &, u16, 2, 1)
  157. OP_AND_FETCH_2 (add, , +, u8, 1, 0)
  158. OP_AND_FETCH_2 (sub, , -, u8, 1, 0)
  159. OP_AND_FETCH_2 (or, , |, u8, 1, 0)
  160. OP_AND_FETCH_2 (and, , &, u8, 1, 0)
  161. OP_AND_FETCH_2 (xor, , ^, u8, 1, 0)
  162. OP_AND_FETCH_2 (nand, ~, &, u8, 1, 0)
  163. #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
  164. unsigned int HIDDEN \
  165. __sync_fetch_and_##OP##_4 (volatile void *ptr, unsigned int val) \
  166. { \
  167. unsigned int tmp; \
  168. long failure; \
  169. \
  170. do { \
  171. tmp = __atomic_load_n ((volatile unsigned int *)ptr, \
  172. __ATOMIC_RELAXED); \
  173. failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
  174. } while (failure != 0); \
  175. \
  176. return tmp; \
  177. }
  178. FETCH_AND_OP_WORD (add, , +)
  179. FETCH_AND_OP_WORD (sub, , -)
  180. FETCH_AND_OP_WORD (or, , |)
  181. FETCH_AND_OP_WORD (and, , &)
  182. FETCH_AND_OP_WORD (xor, , ^)
  183. FETCH_AND_OP_WORD (nand, ~, &)
  184. #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
  185. unsigned int HIDDEN \
  186. __sync_##OP##_and_fetch_4 (volatile void *ptr, unsigned int val) \
  187. { \
  188. unsigned int tmp; \
  189. long failure; \
  190. \
  191. do { \
  192. tmp = __atomic_load_n ((volatile unsigned int *)ptr, \
  193. __ATOMIC_RELAXED); \
  194. failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
  195. } while (failure != 0); \
  196. \
  197. return PFX_OP (tmp INF_OP val); \
  198. }
  199. OP_AND_FETCH_WORD (add, , +)
  200. OP_AND_FETCH_WORD (sub, , -)
  201. OP_AND_FETCH_WORD (or, , |)
  202. OP_AND_FETCH_WORD (and, , &)
  203. OP_AND_FETCH_WORD (xor, , ^)
  204. OP_AND_FETCH_WORD (nand, ~, &)
  205. typedef unsigned char bool;
  206. #define COMPARE_AND_SWAP_2(TYPE, WIDTH, INDEX) \
  207. TYPE HIDDEN \
  208. __sync_val_compare_and_swap_##WIDTH (volatile void *ptr, TYPE oldval, \
  209. TYPE newval) \
  210. { \
  211. TYPE actual_oldval; \
  212. long fail; \
  213. \
  214. while (1) \
  215. { \
  216. actual_oldval = __atomic_load_n ((volatile TYPE *)ptr, \
  217. __ATOMIC_RELAXED); \
  218. \
  219. if (__builtin_expect (oldval != actual_oldval, 0)) \
  220. return actual_oldval; \
  221. \
  222. fail = __kernel_cmpxchg2 (ptr, &actual_oldval, &newval, INDEX); \
  223. \
  224. if (__builtin_expect (!fail, 1)) \
  225. return actual_oldval; \
  226. } \
  227. } \
  228. \
  229. _Bool HIDDEN \
  230. __sync_bool_compare_and_swap_##WIDTH (volatile void *ptr, \
  231. TYPE oldval, TYPE newval) \
  232. { \
  233. long failure = __kernel_cmpxchg2 (ptr, &oldval, &newval, INDEX); \
  234. return (failure == 0); \
  235. }
  236. COMPARE_AND_SWAP_2 (u64, 8, 3)
  237. COMPARE_AND_SWAP_2 (u16, 2, 1)
  238. COMPARE_AND_SWAP_2 (u8, 1, 0)
  239. unsigned int HIDDEN
  240. __sync_val_compare_and_swap_4 (volatile void *ptr, unsigned int oldval,
  241. unsigned int newval)
  242. {
  243. long fail;
  244. unsigned int actual_oldval;
  245. while (1)
  246. {
  247. actual_oldval = __atomic_load_n ((volatile unsigned int *)ptr,
  248. __ATOMIC_RELAXED);
  249. if (__builtin_expect (oldval != actual_oldval, 0))
  250. return actual_oldval;
  251. fail = __kernel_cmpxchg (ptr, actual_oldval, newval);
  252. if (__builtin_expect (!fail, 1))
  253. return actual_oldval;
  254. }
  255. }
  256. _Bool HIDDEN
  257. __sync_bool_compare_and_swap_4 (volatile void *ptr, unsigned int oldval,
  258. unsigned int newval)
  259. {
  260. long failure = __kernel_cmpxchg (ptr, oldval, newval);
  261. return (failure == 0);
  262. }
  263. #define SYNC_LOCK_TEST_AND_SET_2(TYPE, WIDTH, INDEX) \
  264. TYPE HIDDEN \
  265. __sync_lock_test_and_set_##WIDTH (volatile void *ptr, TYPE val) \
  266. { \
  267. TYPE oldval; \
  268. long failure; \
  269. \
  270. do { \
  271. oldval = __atomic_load_n ((volatile TYPE *)ptr, \
  272. __ATOMIC_RELAXED); \
  273. failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
  274. } while (failure != 0); \
  275. \
  276. return oldval; \
  277. }
  278. SYNC_LOCK_TEST_AND_SET_2 (u64, 8, 3)
  279. SYNC_LOCK_TEST_AND_SET_2 (u16, 2, 1)
  280. SYNC_LOCK_TEST_AND_SET_2 (u8, 1, 0)
  281. unsigned int HIDDEN
  282. __sync_lock_test_and_set_4 (volatile void *ptr, unsigned int val)
  283. {
  284. long failure;
  285. unsigned int oldval;
  286. do {
  287. oldval = __atomic_load_n ((volatile unsigned int *)ptr, __ATOMIC_RELAXED);
  288. failure = __kernel_cmpxchg (ptr, oldval, val);
  289. } while (failure != 0);
  290. return oldval;
  291. }
  292. #define SYNC_LOCK_RELEASE_1(TYPE, WIDTH, INDEX) \
  293. void HIDDEN \
  294. __sync_lock_release_##WIDTH (volatile void *ptr) \
  295. { \
  296. TYPE oldval, val = 0; \
  297. long failure; \
  298. \
  299. do { \
  300. oldval = __atomic_load_n ((volatile TYPE *)ptr, \
  301. __ATOMIC_RELAXED); \
  302. failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
  303. } while (failure != 0); \
  304. }
  305. SYNC_LOCK_RELEASE_1 (u64, 8, 3)
  306. SYNC_LOCK_RELEASE_1 (u16, 2, 1)
  307. SYNC_LOCK_RELEASE_1 (u8, 1, 0)
  308. void HIDDEN
  309. __sync_lock_release_4 (volatile void *ptr)
  310. {
  311. long failure;
  312. unsigned int oldval;
  313. do {
  314. oldval = __atomic_load_n ((volatile unsigned int *)ptr, __ATOMIC_RELAXED);
  315. failure = __kernel_cmpxchg (ptr, oldval, 0);
  316. } while (failure != 0);
  317. }