user_atomic.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. /*-
  2. * Copyright (c) 2009-2010 Brad Penoff
  3. * Copyright (c) 2009-2010 Humaira Kamal
  4. * Copyright (c) 2011-2012 Irene Ruengeler
  5. * Copyright (c) 2011-2012 Michael Tuexen
  6. *
  7. * All rights reserved.
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions
  11. * are met:
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  19. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  20. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  21. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  22. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  23. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  24. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  25. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  26. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  27. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  28. * SUCH DAMAGE.
  29. */
  30. #ifndef _USER_ATOMIC_H_
  31. #define _USER_ATOMIC_H_
  32. /* __Userspace__ version of sys/i386/include/atomic.h goes here */
  33. /* TODO In the future, might want to not use i386 specific assembly.
  34. * The options include:
  35. * - implement them generically (but maybe not truly atomic?) in userspace
  36. * - have ifdef's for __Userspace_arch_ perhaps (OS isn't enough...)
  37. */
  38. #include <stdio.h>
  39. #include <sys/types.h>
  40. #if defined(__APPLE__) || defined(_WIN32)
  41. #if defined(_WIN32)
  42. #define atomic_add_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val)
  43. #define atomic_fetchadd_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val)
  44. #define atomic_subtract_int(addr, val) InterlockedExchangeAdd((LPLONG)addr,-((LONG)val))
  45. #define atomic_cmpset_int(dst, exp, src) InterlockedCompareExchange((LPLONG)dst, src, exp)
  46. #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (InterlockedExchangeAdd((LPLONG)addr, (-1L)) == 1)
  47. #else
  48. #include <libkern/OSAtomic.h>
  49. #define atomic_add_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr)
  50. #define atomic_fetchadd_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr)
  51. #define atomic_subtract_int(addr, val) OSAtomicAdd32Barrier(-val, (int32_t *)addr)
  52. #define atomic_cmpset_int(dst, exp, src) OSAtomicCompareAndSwapIntBarrier(exp, src, (int *)dst)
  53. #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 0)
  54. #endif
  55. #if defined(INVARIANTS)
  56. #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
  57. { \
  58. int32_t newval; \
  59. newval = atomic_fetchadd_int(addr, -val); \
  60. if (newval < 0) { \
  61. panic("Counter goes negative"); \
  62. } \
  63. }
  64. #else
  65. #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
  66. { \
  67. int32_t newval; \
  68. newval = atomic_fetchadd_int(addr, -val); \
  69. if (newval < 0) { \
  70. *addr = 0; \
  71. } \
  72. }
  73. #endif
  74. #if defined(_WIN32)
  75. static void atomic_init(void) {} /* empty when we are not using atomic_mtx */
  76. #else
  77. static inline void atomic_init(void) {} /* empty when we are not using atomic_mtx */
  78. #endif
  79. #else
  80. /* Using gcc built-in functions for atomic memory operations
  81. Reference: http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
  82. Requires gcc version 4.1.0
  83. compile with -march=i486
  84. */
  85. /*Atomically add V to *P.*/
  86. #define atomic_add_int(P, V) (void) __sync_fetch_and_add(P, V)
  87. /*Atomically subtrace V from *P.*/
  88. #define atomic_subtract_int(P, V) (void) __sync_fetch_and_sub(P, V)
  89. /*
  90. * Atomically add the value of v to the integer pointed to by p and return
  91. * the previous value of *p.
  92. */
  93. #define atomic_fetchadd_int(p, v) __sync_fetch_and_add(p, v)
  94. /* Following explanation from src/sys/i386/include/atomic.h,
  95. * for atomic compare and set
  96. *
  97. * if (*dst == exp) *dst = src (all 32 bit words)
  98. *
  99. * Returns 0 on failure, non-zero on success
  100. */
  101. #define atomic_cmpset_int(dst, exp, src) __sync_bool_compare_and_swap(dst, exp, src)
  102. #define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 1)
  103. #if defined(INVARIANTS)
  104. #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
  105. { \
  106. int32_t oldval; \
  107. oldval = atomic_fetchadd_int(addr, -val); \
  108. if (oldval < val) { \
  109. panic("Counter goes negative"); \
  110. } \
  111. }
  112. #else
  113. #define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
  114. { \
  115. int32_t oldval; \
  116. oldval = atomic_fetchadd_int(addr, -val); \
  117. if (oldval < val) { \
  118. *addr = 0; \
  119. } \
  120. }
  121. #endif
  122. static inline void atomic_init(void) {} /* empty when we are not using atomic_mtx */
  123. #endif
  124. #if 0 /* using libatomic_ops */
  125. #include "user_include/atomic_ops.h"
  126. /*Atomically add incr to *P, and return the original value of *P.*/
  127. #define atomic_add_int(P, V) AO_fetch_and_add((AO_t*)P, V)
  128. #define atomic_subtract_int(P, V) AO_fetch_and_add((AO_t*)P, -(V))
  129. /*
  130. * Atomically add the value of v to the integer pointed to by p and return
  131. * the previous value of *p.
  132. */
  133. #define atomic_fetchadd_int(p, v) AO_fetch_and_add((AO_t*)p, v)
  134. /* Atomically compare *addr to old_val, and replace *addr by new_val
  135. if the first comparison succeeds. Returns nonzero if the comparison
  136. succeeded and *addr was updated.
  137. */
  138. /* Following Explanation from src/sys/i386/include/atomic.h, which
  139. matches that of AO_compare_and_swap above.
  140. * Atomic compare and set, used by the mutex functions
  141. *
  142. * if (*dst == exp) *dst = src (all 32 bit words)
  143. *
  144. * Returns 0 on failure, non-zero on success
  145. */
  146. #define atomic_cmpset_int(dst, exp, src) AO_compare_and_swap((AO_t*)dst, exp, src)
  147. static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
  148. #endif /* closing #if for libatomic */
  149. #if 0 /* using atomic_mtx */
  150. #include <pthread.h>
  151. extern userland_mutex_t atomic_mtx;
  152. #if defined(_WIN32)
  153. static inline void atomic_init() {
  154. InitializeCriticalSection(&atomic_mtx);
  155. }
  156. static inline void atomic_destroy() {
  157. DeleteCriticalSection(&atomic_mtx);
  158. }
  159. static inline void atomic_lock() {
  160. EnterCriticalSection(&atomic_mtx);
  161. }
  162. static inline void atomic_unlock() {
  163. LeaveCriticalSection(&atomic_mtx);
  164. }
  165. #else
  166. static inline void atomic_init() {
  167. pthread_mutexattr_t mutex_attr;
  168. pthread_mutexattr_init(&mutex_attr);
  169. #ifdef INVARIANTS
  170. pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_ERRORCHECK);
  171. #endif
  172. pthread_mutex_init(&accept_mtx, &mutex_attr);
  173. pthread_mutexattr_destroy(&mutex_attr);
  174. }
  175. static inline void atomic_destroy() {
  176. (void)pthread_mutex_destroy(&atomic_mtx);
  177. }
  178. static inline void atomic_lock() {
  179. #ifdef INVARIANTS
  180. KASSERT(pthread_mutex_lock(&atomic_mtx) == 0, ("atomic_lock: atomic_mtx already locked"))
  181. #else
  182. (void)pthread_mutex_lock(&atomic_mtx);
  183. #endif
  184. }
  185. static inline void atomic_unlock() {
  186. #ifdef INVARIANTS
  187. KASSERT(pthread_mutex_unlock(&atomic_mtx) == 0, ("atomic_unlock: atomic_mtx not locked"))
  188. #else
  189. (void)pthread_mutex_unlock(&atomic_mtx);
  190. #endif
  191. }
  192. #endif
  193. /*
  194. * For userland, always use lock prefixes so that the binaries will run
  195. * on both SMP and !SMP systems.
  196. */
  197. #define MPLOCKED "lock ; "
  198. /*
  199. * Atomically add the value of v to the integer pointed to by p and return
  200. * the previous value of *p.
  201. */
  202. static __inline u_int
  203. atomic_fetchadd_int(volatile void *n, u_int v)
  204. {
  205. int *p = (int *) n;
  206. atomic_lock();
  207. __asm __volatile(
  208. " " MPLOCKED " "
  209. " xaddl %0, %1 ; "
  210. "# atomic_fetchadd_int"
  211. : "+r" (v), /* 0 (result) */
  212. "=m" (*p) /* 1 */
  213. : "m" (*p)); /* 2 */
  214. atomic_unlock();
  215. return (v);
  216. }
  217. #ifdef CPU_DISABLE_CMPXCHG
  218. static __inline int
  219. atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
  220. {
  221. u_char res;
  222. atomic_lock();
  223. __asm __volatile(
  224. " pushfl ; "
  225. " cli ; "
  226. " cmpl %3,%4 ; "
  227. " jne 1f ; "
  228. " movl %2,%1 ; "
  229. "1: "
  230. " sete %0 ; "
  231. " popfl ; "
  232. "# atomic_cmpset_int"
  233. : "=q" (res), /* 0 */
  234. "=m" (*dst) /* 1 */
  235. : "r" (src), /* 2 */
  236. "r" (exp), /* 3 */
  237. "m" (*dst) /* 4 */
  238. : "memory");
  239. atomic_unlock();
  240. return (res);
  241. }
  242. #else /* !CPU_DISABLE_CMPXCHG */
  243. static __inline int
  244. atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
  245. {
  246. atomic_lock();
  247. u_char res;
  248. __asm __volatile(
  249. " " MPLOCKED " "
  250. " cmpxchgl %2,%1 ; "
  251. " sete %0 ; "
  252. "1: "
  253. "# atomic_cmpset_int"
  254. : "=a" (res), /* 0 */
  255. "=m" (*dst) /* 1 */
  256. : "r" (src), /* 2 */
  257. "a" (exp), /* 3 */
  258. "m" (*dst) /* 4 */
  259. : "memory");
  260. atomic_unlock();
  261. return (res);
  262. }
  263. #endif /* CPU_DISABLE_CMPXCHG */
  264. #define atomic_add_int(P, V) do { \
  265. atomic_lock(); \
  266. (*(u_int *)(P) += (V)); \
  267. atomic_unlock(); \
  268. } while(0)
  269. #define atomic_subtract_int(P, V) do { \
  270. atomic_lock(); \
  271. (*(u_int *)(P) -= (V)); \
  272. atomic_unlock(); \
  273. } while(0)
  274. #endif
  275. #endif