sanitizer_atomic_msvc.h 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. //===-- sanitizer_atomic_msvc.h ---------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
  10. // Not intended for direct inclusion. Include sanitizer_atomic.h.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #ifndef SANITIZER_ATOMIC_MSVC_H
  14. #define SANITIZER_ATOMIC_MSVC_H
  15. extern "C" void _ReadWriteBarrier();
  16. #pragma intrinsic(_ReadWriteBarrier)
  17. extern "C" void _mm_mfence();
  18. #pragma intrinsic(_mm_mfence)
  19. extern "C" void _mm_pause();
  20. #pragma intrinsic(_mm_pause)
  21. extern "C" char _InterlockedExchange8(char volatile *Addend, char Value);
  22. #pragma intrinsic(_InterlockedExchange8)
  23. extern "C" short _InterlockedExchange16(short volatile *Addend, short Value);
  24. #pragma intrinsic(_InterlockedExchange16)
  25. extern "C" long _InterlockedExchange(long volatile *Addend, long Value);
  26. #pragma intrinsic(_InterlockedExchange)
  27. extern "C" long _InterlockedExchangeAdd(long volatile *Addend, long Value);
  28. #pragma intrinsic(_InterlockedExchangeAdd)
  29. extern "C" char _InterlockedCompareExchange8(char volatile *Destination,
  30. char Exchange, char Comparand);
  31. #pragma intrinsic(_InterlockedCompareExchange8)
  32. extern "C" short _InterlockedCompareExchange16(short volatile *Destination,
  33. short Exchange, short Comparand);
  34. #pragma intrinsic(_InterlockedCompareExchange16)
  35. extern "C" long long _InterlockedCompareExchange64(
  36. long long volatile *Destination, long long Exchange, long long Comparand);
  37. #pragma intrinsic(_InterlockedCompareExchange64)
  38. extern "C" void *_InterlockedCompareExchangePointer(
  39. void *volatile *Destination,
  40. void *Exchange, void *Comparand);
  41. #pragma intrinsic(_InterlockedCompareExchangePointer)
  42. extern "C" long __cdecl _InterlockedCompareExchange(long volatile *Destination,
  43. long Exchange,
  44. long Comparand);
  45. #pragma intrinsic(_InterlockedCompareExchange)
  46. #ifdef _WIN64
  47. extern "C" long long _InterlockedExchangeAdd64(long long volatile *Addend,
  48. long long Value);
  49. #pragma intrinsic(_InterlockedExchangeAdd64)
  50. #endif
  51. namespace __sanitizer {
  52. inline void atomic_signal_fence(memory_order) {
  53. _ReadWriteBarrier();
  54. }
  55. inline void atomic_thread_fence(memory_order) {
  56. _mm_mfence();
  57. }
  58. inline void proc_yield(int cnt) {
  59. for (int i = 0; i < cnt; i++)
  60. _mm_pause();
  61. }
  62. template<typename T>
  63. inline typename T::Type atomic_load(
  64. const volatile T *a, memory_order mo) {
  65. DCHECK(mo & (memory_order_relaxed | memory_order_consume
  66. | memory_order_acquire | memory_order_seq_cst));
  67. DCHECK(!((uptr)a % sizeof(*a)));
  68. typename T::Type v;
  69. // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
  70. if (mo == memory_order_relaxed) {
  71. v = a->val_dont_use;
  72. } else {
  73. atomic_signal_fence(memory_order_seq_cst);
  74. v = a->val_dont_use;
  75. atomic_signal_fence(memory_order_seq_cst);
  76. }
  77. return v;
  78. }
  79. template<typename T>
  80. inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
  81. DCHECK(mo & (memory_order_relaxed | memory_order_release
  82. | memory_order_seq_cst));
  83. DCHECK(!((uptr)a % sizeof(*a)));
  84. // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
  85. if (mo == memory_order_relaxed) {
  86. a->val_dont_use = v;
  87. } else {
  88. atomic_signal_fence(memory_order_seq_cst);
  89. a->val_dont_use = v;
  90. atomic_signal_fence(memory_order_seq_cst);
  91. }
  92. if (mo == memory_order_seq_cst)
  93. atomic_thread_fence(memory_order_seq_cst);
  94. }
  95. inline u32 atomic_fetch_add(volatile atomic_uint32_t *a,
  96. u32 v, memory_order mo) {
  97. (void)mo;
  98. DCHECK(!((uptr)a % sizeof(*a)));
  99. return (u32)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
  100. (long)v);
  101. }
  102. inline uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
  103. uptr v, memory_order mo) {
  104. (void)mo;
  105. DCHECK(!((uptr)a % sizeof(*a)));
  106. #ifdef _WIN64
  107. return (uptr)_InterlockedExchangeAdd64((volatile long long *)&a->val_dont_use,
  108. (long long)v);
  109. #else
  110. return (uptr)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
  111. (long)v);
  112. #endif
  113. }
  114. inline u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
  115. u32 v, memory_order mo) {
  116. (void)mo;
  117. DCHECK(!((uptr)a % sizeof(*a)));
  118. return (u32)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
  119. -(long)v);
  120. }
  121. inline uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
  122. uptr v, memory_order mo) {
  123. (void)mo;
  124. DCHECK(!((uptr)a % sizeof(*a)));
  125. #ifdef _WIN64
  126. return (uptr)_InterlockedExchangeAdd64((volatile long long *)&a->val_dont_use,
  127. -(long long)v);
  128. #else
  129. return (uptr)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
  130. -(long)v);
  131. #endif
  132. }
  133. inline u8 atomic_exchange(volatile atomic_uint8_t *a,
  134. u8 v, memory_order mo) {
  135. (void)mo;
  136. DCHECK(!((uptr)a % sizeof(*a)));
  137. return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v);
  138. }
  139. inline u16 atomic_exchange(volatile atomic_uint16_t *a,
  140. u16 v, memory_order mo) {
  141. (void)mo;
  142. DCHECK(!((uptr)a % sizeof(*a)));
  143. return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v);
  144. }
  145. inline u32 atomic_exchange(volatile atomic_uint32_t *a,
  146. u32 v, memory_order mo) {
  147. (void)mo;
  148. DCHECK(!((uptr)a % sizeof(*a)));
  149. return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
  150. }
  151. inline bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
  152. u8 *cmp,
  153. u8 xchgv,
  154. memory_order mo) {
  155. (void)mo;
  156. DCHECK(!((uptr)a % sizeof(*a)));
  157. u8 cmpv = *cmp;
  158. #ifdef _WIN64
  159. u8 prev = (u8)_InterlockedCompareExchange8(
  160. (volatile char*)&a->val_dont_use, (char)xchgv, (char)cmpv);
  161. #else
  162. u8 prev;
  163. __asm {
  164. mov al, cmpv
  165. mov ecx, a
  166. mov dl, xchgv
  167. lock cmpxchg [ecx], dl
  168. mov prev, al
  169. }
  170. #endif
  171. if (prev == cmpv)
  172. return true;
  173. *cmp = prev;
  174. return false;
  175. }
  176. inline bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
  177. uptr *cmp,
  178. uptr xchg,
  179. memory_order mo) {
  180. uptr cmpv = *cmp;
  181. uptr prev = (uptr)_InterlockedCompareExchangePointer(
  182. (void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
  183. if (prev == cmpv)
  184. return true;
  185. *cmp = prev;
  186. return false;
  187. }
  188. inline bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
  189. u16 *cmp,
  190. u16 xchg,
  191. memory_order mo) {
  192. u16 cmpv = *cmp;
  193. u16 prev = (u16)_InterlockedCompareExchange16(
  194. (volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv);
  195. if (prev == cmpv)
  196. return true;
  197. *cmp = prev;
  198. return false;
  199. }
  200. inline bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
  201. u32 *cmp,
  202. u32 xchg,
  203. memory_order mo) {
  204. u32 cmpv = *cmp;
  205. u32 prev = (u32)_InterlockedCompareExchange(
  206. (volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv);
  207. if (prev == cmpv)
  208. return true;
  209. *cmp = prev;
  210. return false;
  211. }
  212. inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
  213. u64 *cmp,
  214. u64 xchg,
  215. memory_order mo) {
  216. u64 cmpv = *cmp;
  217. u64 prev = (u64)_InterlockedCompareExchange64(
  218. (volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv);
  219. if (prev == cmpv)
  220. return true;
  221. *cmp = prev;
  222. return false;
  223. }
  224. template<typename T>
  225. inline bool atomic_compare_exchange_weak(volatile T *a,
  226. typename T::Type *cmp,
  227. typename T::Type xchg,
  228. memory_order mo) {
  229. return atomic_compare_exchange_strong(a, cmp, xchg, mo);
  230. }
  231. } // namespace __sanitizer
  232. #endif // SANITIZER_ATOMIC_CLANG_H