sctp_process_lock.h 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. /*-
  2. * SPDX-License-Identifier: BSD-3-Clause
  3. *
  4. * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
  5. * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
  6. * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * a) Redistributions of source code must retain the above copyright notice,
  12. * this list of conditions and the following disclaimer.
  13. *
  14. * b) Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in
  16. * the documentation and/or other materials provided with the distribution.
  17. *
  18. * c) Neither the name of Cisco Systems, Inc. nor the names of its
  19. * contributors may be used to endorse or promote products derived
  20. * from this software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  23. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  24. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  32. * THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #ifndef __sctp_process_lock_h__
  35. #define __sctp_process_lock_h__
  36. /*
  37. * Need to yet define five atomic fuctions or
  38. * their equivalant.
  39. * - atomic_add_int(&foo, val) - add atomically the value
  40. * - atomic_fetchadd_int(&foo, val) - does same as atomic_add_int
  41. * but value it was is returned.
  42. * - atomic_subtract_int(&foo, val) - can be made from atomic_add_int()
  43. *
  44. * - atomic_cmpset_int(&foo, value, newvalue) - Does a set of newvalue
  45. * in foo if and only if
  46. * foo is value. Returns 0
  47. * on success.
  48. */
  49. #ifdef SCTP_PER_SOCKET_LOCKING
  50. /*
  51. * per socket level locking
  52. */
  53. #if defined(_WIN32)
  54. /* Lock for INFO stuff */
  55. #define SCTP_INP_INFO_LOCK_INIT()
  56. #define SCTP_INP_INFO_RLOCK()
  57. #define SCTP_INP_INFO_RUNLOCK()
  58. #define SCTP_INP_INFO_WLOCK()
  59. #define SCTP_INP_INFO_WUNLOCK()
  60. #define SCTP_INP_INFO_LOCK_ASSERT()
  61. #define SCTP_INP_INFO_RLOCK_ASSERT()
  62. #define SCTP_INP_INFO_WLOCK_ASSERT()
  63. #define SCTP_INP_INFO_LOCK_DESTROY()
  64. #define SCTP_IPI_COUNT_INIT()
  65. #define SCTP_IPI_COUNT_DESTROY()
  66. #else
  67. #define SCTP_INP_INFO_LOCK_INIT()
  68. #define SCTP_INP_INFO_RLOCK()
  69. #define SCTP_INP_INFO_RUNLOCK()
  70. #define SCTP_INP_INFO_WLOCK()
  71. #define SCTP_INP_INFO_WUNLOCK()
  72. #define SCTP_INP_INFO_LOCK_ASSERT()
  73. #define SCTP_INP_INFO_RLOCK_ASSERT()
  74. #define SCTP_INP_INFO_WLOCK_ASSERT()
  75. #define SCTP_INP_INFO_LOCK_DESTROY()
  76. #define SCTP_IPI_COUNT_INIT()
  77. #define SCTP_IPI_COUNT_DESTROY()
  78. #endif
  79. /* Lock for INP */
  80. #define SCTP_INP_LOCK_INIT(_inp)
  81. #define SCTP_INP_LOCK_DESTROY(_inp)
  82. #define SCTP_INP_RLOCK(_inp)
  83. #define SCTP_INP_RUNLOCK(_inp)
  84. #define SCTP_INP_WLOCK(_inp)
  85. #define SCTP_INP_WUNLOCK(_inp)
  86. #define SCTP_INP_RLOCK_ASSERT(_inp)
  87. #define SCTP_INP_WLOCK_ASSERT(_inp)
  88. #define SCTP_INP_INCR_REF(_inp)
  89. #define SCTP_INP_DECR_REF(_inp)
  90. #define SCTP_ASOC_CREATE_LOCK_INIT(_inp)
  91. #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp)
  92. #define SCTP_ASOC_CREATE_LOCK(_inp)
  93. #define SCTP_ASOC_CREATE_UNLOCK(_inp)
  94. #define SCTP_INP_READ_INIT(_inp)
  95. #define SCTP_INP_READ_DESTROY(_inp)
  96. #define SCTP_INP_READ_LOCK(_inp)
  97. #define SCTP_INP_READ_UNLOCK(_inp)
  98. /* Lock for TCB */
  99. #define SCTP_TCB_LOCK_INIT(_tcb)
  100. #define SCTP_TCB_LOCK_DESTROY(_tcb)
  101. #define SCTP_TCB_LOCK(_tcb)
  102. #define SCTP_TCB_TRYLOCK(_tcb) 1
  103. #define SCTP_TCB_UNLOCK(_tcb)
  104. #define SCTP_TCB_UNLOCK_IFOWNED(_tcb)
  105. #define SCTP_TCB_LOCK_ASSERT(_tcb)
  106. #else
  107. /*
  108. * per tcb level locking
  109. */
  110. #define SCTP_IPI_COUNT_INIT()
  111. #if defined(_WIN32)
  112. #define SCTP_WQ_ADDR_INIT() \
  113. InitializeCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
  114. #define SCTP_WQ_ADDR_DESTROY() \
  115. DeleteCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
  116. #define SCTP_WQ_ADDR_LOCK() \
  117. EnterCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
  118. #define SCTP_WQ_ADDR_UNLOCK() \
  119. LeaveCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
  120. #define SCTP_WQ_ADDR_LOCK_ASSERT()
  121. #if WINVER < 0x0600
  122. #define SCTP_INP_INFO_LOCK_INIT() \
  123. InitializeCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
  124. #define SCTP_INP_INFO_LOCK_DESTROY() \
  125. DeleteCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
  126. #define SCTP_INP_INFO_RLOCK() \
  127. EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
  128. #define SCTP_INP_INFO_TRYLOCK() \
  129. TryEnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
  130. #define SCTP_INP_INFO_WLOCK() \
  131. EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
  132. #define SCTP_INP_INFO_RUNLOCK() \
  133. LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
  134. #define SCTP_INP_INFO_WUNLOCK() \
  135. LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
  136. #define SCTP_INP_INFO_LOCK_ASSERT()
  137. #define SCTP_INP_INFO_RLOCK_ASSERT()
  138. #define SCTP_INP_INFO_WLOCK_ASSERT()
  139. #else
  140. #define SCTP_INP_INFO_LOCK_INIT() \
  141. InitializeSRWLock(&SCTP_BASE_INFO(ipi_ep_mtx))
  142. #define SCTP_INP_INFO_LOCK_DESTROY()
  143. #define SCTP_INP_INFO_RLOCK() \
  144. AcquireSRWLockShared(&SCTP_BASE_INFO(ipi_ep_mtx))
  145. #define SCTP_INP_INFO_TRYLOCK() \
  146. TryAcquireSRWLockShared(&SCTP_BASE_INFO(ipi_ep_mtx))
  147. #define SCTP_INP_INFO_WLOCK() \
  148. AcquireSRWLockExclusive(&SCTP_BASE_INFO(ipi_ep_mtx))
  149. #define SCTP_INP_INFO_RUNLOCK() \
  150. ReleaseSRWLockShared(&SCTP_BASE_INFO(ipi_ep_mtx))
  151. #define SCTP_INP_INFO_WUNLOCK() \
  152. ReleaseSRWLockExclusive(&SCTP_BASE_INFO(ipi_ep_mtx))
  153. #define SCTP_INP_INFO_LOCK_ASSERT()
  154. #define SCTP_INP_INFO_RLOCK_ASSERT()
  155. #define SCTP_INP_INFO_WLOCK_ASSERT()
  156. #endif
  157. #define SCTP_IP_PKTLOG_INIT() \
  158. InitializeCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
  159. #define SCTP_IP_PKTLOG_DESTROY () \
  160. DeleteCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
  161. #define SCTP_IP_PKTLOG_LOCK() \
  162. EnterCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
  163. #define SCTP_IP_PKTLOG_UNLOCK() \
  164. LeaveCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
  165. /*
  166. * The INP locks we will use for locking an SCTP endpoint, so for example if
  167. * we want to change something at the endpoint level for example random_store
  168. * or cookie secrets we lock the INP level.
  169. */
  170. #define SCTP_INP_READ_INIT(_inp) \
  171. InitializeCriticalSection(&(_inp)->inp_rdata_mtx)
  172. #define SCTP_INP_READ_DESTROY(_inp) \
  173. DeleteCriticalSection(&(_inp)->inp_rdata_mtx)
  174. #define SCTP_INP_READ_LOCK(_inp) \
  175. EnterCriticalSection(&(_inp)->inp_rdata_mtx)
  176. #define SCTP_INP_READ_UNLOCK(_inp) \
  177. LeaveCriticalSection(&(_inp)->inp_rdata_mtx)
  178. #define SCTP_INP_LOCK_INIT(_inp) \
  179. InitializeCriticalSection(&(_inp)->inp_mtx)
  180. #define SCTP_INP_LOCK_DESTROY(_inp) \
  181. DeleteCriticalSection(&(_inp)->inp_mtx)
  182. #ifdef SCTP_LOCK_LOGGING
  183. #define SCTP_INP_RLOCK(_inp) do { \
  184. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
  185. sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
  186. EnterCriticalSection(&(_inp)->inp_mtx); \
  187. } while (0)
  188. #define SCTP_INP_WLOCK(_inp) do { \
  189. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
  190. sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
  191. EnterCriticalSection(&(_inp)->inp_mtx); \
  192. } while (0)
  193. #else
  194. #define SCTP_INP_RLOCK(_inp) \
  195. EnterCriticalSection(&(_inp)->inp_mtx)
  196. #define SCTP_INP_WLOCK(_inp) \
  197. EnterCriticalSection(&(_inp)->inp_mtx)
  198. #endif
  199. #define SCTP_INP_RLOCK_ASSERT(_tcb)
  200. #define SCTP_INP_WLOCK_ASSERT(_tcb)
  201. #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
  202. #define SCTP_INP_DECR_REF(_inp) atomic_subtract_int(&((_inp)->refcount), 1)
  203. #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
  204. InitializeCriticalSection(&(_inp)->inp_create_mtx)
  205. #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
  206. DeleteCriticalSection(&(_inp)->inp_create_mtx)
  207. #ifdef SCTP_LOCK_LOGGING
  208. #define SCTP_ASOC_CREATE_LOCK(_inp) do { \
  209. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
  210. sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE); \
  211. EnterCriticalSection(&(_inp)->inp_create_mtx); \
  212. } while (0)
  213. #else
  214. #define SCTP_ASOC_CREATE_LOCK(_inp) \
  215. EnterCriticalSection(&(_inp)->inp_create_mtx)
  216. #endif
  217. #define SCTP_INP_RUNLOCK(_inp) \
  218. LeaveCriticalSection(&(_inp)->inp_mtx)
  219. #define SCTP_INP_WUNLOCK(_inp) \
  220. LeaveCriticalSection(&(_inp)->inp_mtx)
  221. #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
  222. LeaveCriticalSection(&(_inp)->inp_create_mtx)
  223. /*
  224. * For the majority of things (once we have found the association) we will
  225. * lock the actual association mutex. This will protect all the assoiciation
  226. * level queues and streams and such. We will need to lock the socket layer
  227. * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
  228. * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
  229. */
  230. #define SCTP_TCB_LOCK_INIT(_tcb) \
  231. InitializeCriticalSection(&(_tcb)->tcb_mtx)
  232. #define SCTP_TCB_LOCK_DESTROY(_tcb) \
  233. DeleteCriticalSection(&(_tcb)->tcb_mtx)
  234. #ifdef SCTP_LOCK_LOGGING
  235. #define SCTP_TCB_LOCK(_tcb) do { \
  236. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
  237. sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB); \
  238. EnterCriticalSection(&(_tcb)->tcb_mtx); \
  239. } while (0)
  240. #else
  241. #define SCTP_TCB_LOCK(_tcb) \
  242. EnterCriticalSection(&(_tcb)->tcb_mtx)
  243. #endif
  244. #define SCTP_TCB_TRYLOCK(_tcb) ((TryEnterCriticalSection(&(_tcb)->tcb_mtx)))
  245. #define SCTP_TCB_UNLOCK(_tcb) \
  246. LeaveCriticalSection(&(_tcb)->tcb_mtx)
  247. #define SCTP_TCB_LOCK_ASSERT(_tcb)
  248. #else /* all Userspaces except Windows */
  249. #define SCTP_WQ_ADDR_INIT() \
  250. (void)pthread_mutex_init(&SCTP_BASE_INFO(wq_addr_mtx), &SCTP_BASE_VAR(mtx_attr))
  251. #define SCTP_WQ_ADDR_DESTROY() \
  252. (void)pthread_mutex_destroy(&SCTP_BASE_INFO(wq_addr_mtx))
  253. #ifdef INVARIANTS
  254. #define SCTP_WQ_ADDR_LOCK() \
  255. KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s:%d: wq_addr_mtx already locked", __FILE__, __LINE__))
  256. #define SCTP_WQ_ADDR_UNLOCK() \
  257. KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s:%d: wq_addr_mtx not locked", __FILE__, __LINE__))
  258. #else
  259. #define SCTP_WQ_ADDR_LOCK() \
  260. (void)pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx))
  261. #define SCTP_WQ_ADDR_UNLOCK() \
  262. (void)pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx))
  263. #endif
  264. #define SCTP_WQ_ADDR_LOCK_ASSERT() \
  265. KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(wq_addr_mtx)) == EBUSY, ("%s:%d: wq_addr_mtx not locked", __FILE__, __LINE__))
  266. #define SCTP_INP_INFO_LOCK_INIT() \
  267. (void)pthread_rwlock_init(&SCTP_BASE_INFO(ipi_ep_mtx), &SCTP_BASE_VAR(rwlock_attr))
  268. #define SCTP_INP_INFO_LOCK_DESTROY() \
  269. (void)pthread_rwlock_destroy(&SCTP_BASE_INFO(ipi_ep_mtx))
  270. #ifdef INVARIANTS
  271. #define SCTP_INP_INFO_RLOCK() \
  272. KASSERT(pthread_rwlock_rdlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s%d: ipi_ep_mtx already locked", __FILE__, __LINE__))
  273. #define SCTP_INP_INFO_WLOCK() \
  274. KASSERT(pthread_rwlock_wrlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s:%d: ipi_ep_mtx already locked", __FILE__, __LINE__))
  275. #define SCTP_INP_INFO_RUNLOCK() \
  276. KASSERT(pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s:%d: ipi_ep_mtx not locked", __FILE__, __LINE__))
  277. #define SCTP_INP_INFO_WUNLOCK() \
  278. KASSERT(pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s:%d: ipi_ep_mtx not locked", __FILE__, __LINE__))
  279. #else
  280. #define SCTP_INP_INFO_RLOCK() \
  281. (void)pthread_rwlock_rdlock(&SCTP_BASE_INFO(ipi_ep_mtx))
  282. #define SCTP_INP_INFO_WLOCK() \
  283. (void)pthread_rwlock_wrlock(&SCTP_BASE_INFO(ipi_ep_mtx))
  284. #define SCTP_INP_INFO_RUNLOCK() \
  285. (void)pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
  286. #define SCTP_INP_INFO_WUNLOCK() \
  287. (void)pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
  288. #endif
  289. #define SCTP_INP_INFO_LOCK_ASSERT()
  290. #define SCTP_INP_INFO_RLOCK_ASSERT()
  291. #define SCTP_INP_INFO_WLOCK_ASSERT()
  292. #define SCTP_INP_INFO_TRYLOCK() \
  293. (!(pthread_rwlock_tryrdlock(&SCTP_BASE_INFO(ipi_ep_mtx))))
  294. #define SCTP_IP_PKTLOG_INIT() \
  295. (void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), &SCTP_BASE_VAR(mtx_attr))
  296. #define SCTP_IP_PKTLOG_DESTROY() \
  297. (void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx))
  298. #ifdef INVARIANTS
  299. #define SCTP_IP_PKTLOG_LOCK() \
  300. KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s:%d: ipi_pktlog_mtx already locked", __FILE__, __LINE__))
  301. #define SCTP_IP_PKTLOG_UNLOCK() \
  302. KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s:%d: ipi_pktlog_mtx not locked", __FILE__, __LINE__))
  303. #else
  304. #define SCTP_IP_PKTLOG_LOCK() \
  305. (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
  306. #define SCTP_IP_PKTLOG_UNLOCK() \
  307. (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
  308. #endif
  309. /*
  310. * The INP locks we will use for locking an SCTP endpoint, so for example if
  311. * we want to change something at the endpoint level for example random_store
  312. * or cookie secrets we lock the INP level.
  313. */
  314. #define SCTP_INP_READ_INIT(_inp) \
  315. (void)pthread_mutex_init(&(_inp)->inp_rdata_mtx, &SCTP_BASE_VAR(mtx_attr))
  316. #define SCTP_INP_READ_DESTROY(_inp) \
  317. (void)pthread_mutex_destroy(&(_inp)->inp_rdata_mtx)
  318. #ifdef INVARIANTS
  319. #define SCTP_INP_READ_LOCK(_inp) \
  320. KASSERT(pthread_mutex_lock(&(_inp)->inp_rdata_mtx) == 0, ("%s:%d: inp_rdata_mtx already locked", __FILE__, __LINE__))
  321. #define SCTP_INP_READ_UNLOCK(_inp) \
  322. KASSERT(pthread_mutex_unlock(&(_inp)->inp_rdata_mtx) == 0, ("%s:%d: inp_rdata_mtx not locked", __FILE__, __LINE__))
  323. #else
  324. #define SCTP_INP_READ_LOCK(_inp) \
  325. (void)pthread_mutex_lock(&(_inp)->inp_rdata_mtx)
  326. #define SCTP_INP_READ_UNLOCK(_inp) \
  327. (void)pthread_mutex_unlock(&(_inp)->inp_rdata_mtx)
  328. #endif
  329. #define SCTP_INP_LOCK_INIT(_inp) \
  330. (void)pthread_mutex_init(&(_inp)->inp_mtx, &SCTP_BASE_VAR(mtx_attr))
  331. #define SCTP_INP_LOCK_DESTROY(_inp) \
  332. (void)pthread_mutex_destroy(&(_inp)->inp_mtx)
  333. #ifdef INVARIANTS
  334. #ifdef SCTP_LOCK_LOGGING
  335. #define SCTP_INP_RLOCK(_inp) do { \
  336. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
  337. sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
  338. KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx already locked", __FILE__, __LINE__)); \
  339. } while (0)
  340. #define SCTP_INP_WLOCK(_inp) do { \
  341. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
  342. sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
  343. KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx already locked", __FILE__, __LINE__)); \
  344. } while (0)
  345. #else
  346. #define SCTP_INP_RLOCK(_inp) \
  347. KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx already locked", __FILE__, __LINE__))
  348. #define SCTP_INP_WLOCK(_inp) \
  349. KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx already locked", __FILE__, __LINE__))
  350. #endif
  351. #define SCTP_INP_RUNLOCK(_inp) \
  352. KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx not locked", __FILE__, __LINE__))
  353. #define SCTP_INP_WUNLOCK(_inp) \
  354. KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s:%d: inp_mtx not locked", __FILE__, __LINE__))
  355. #else
  356. #ifdef SCTP_LOCK_LOGGING
  357. #define SCTP_INP_RLOCK(_inp) do { \
  358. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
  359. sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
  360. (void)pthread_mutex_lock(&(_inp)->inp_mtx); \
  361. } while (0)
  362. #define SCTP_INP_WLOCK(_inp) do { \
  363. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
  364. sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
  365. (void)pthread_mutex_lock(&(_inp)->inp_mtx); \
  366. } while (0)
  367. #else
  368. #define SCTP_INP_RLOCK(_inp) \
  369. (void)pthread_mutex_lock(&(_inp)->inp_mtx)
  370. #define SCTP_INP_WLOCK(_inp) \
  371. (void)pthread_mutex_lock(&(_inp)->inp_mtx)
  372. #endif
  373. #define SCTP_INP_RUNLOCK(_inp) \
  374. (void)pthread_mutex_unlock(&(_inp)->inp_mtx)
  375. #define SCTP_INP_WUNLOCK(_inp) \
  376. (void)pthread_mutex_unlock(&(_inp)->inp_mtx)
  377. #endif
  378. #define SCTP_INP_RLOCK_ASSERT(_inp) \
  379. KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s:%d: inp_mtx not locked", __FILE__, __LINE__))
  380. #define SCTP_INP_WLOCK_ASSERT(_inp) \
  381. KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s:%d: inp_mtx not locked", __FILE__, __LINE__))
  382. #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
  383. #define SCTP_INP_DECR_REF(_inp) atomic_subtract_int(&((_inp)->refcount), 1)
  384. #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
  385. (void)pthread_mutex_init(&(_inp)->inp_create_mtx, &SCTP_BASE_VAR(mtx_attr))
  386. #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
  387. (void)pthread_mutex_destroy(&(_inp)->inp_create_mtx)
  388. #ifdef INVARIANTS
  389. #ifdef SCTP_LOCK_LOGGING
  390. #define SCTP_ASOC_CREATE_LOCK(_inp) do { \
  391. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
  392. sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE); \
  393. KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s:%d: inp_create_mtx already locked", __FILE__, __LINE__)); \
  394. } while (0)
  395. #else
  396. #define SCTP_ASOC_CREATE_LOCK(_inp) \
  397. KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s:%d: inp_create_mtx already locked", __FILE__, __LINE__))
  398. #endif
  399. #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
  400. KASSERT(pthread_mutex_unlock(&(_inp)->inp_create_mtx) == 0, ("%s:%d: inp_create_mtx not locked", __FILE__, __LINE__))
  401. #else
  402. #ifdef SCTP_LOCK_LOGGING
  403. #define SCTP_ASOC_CREATE_LOCK(_inp) do { \
  404. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
  405. sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE); \
  406. (void)pthread_mutex_lock(&(_inp)->inp_create_mtx); \
  407. } while (0)
  408. #else
  409. #define SCTP_ASOC_CREATE_LOCK(_inp) \
  410. (void)pthread_mutex_lock(&(_inp)->inp_create_mtx)
  411. #endif
  412. #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
  413. (void)pthread_mutex_unlock(&(_inp)->inp_create_mtx)
  414. #endif
  415. /*
  416. * For the majority of things (once we have found the association) we will
  417. * lock the actual association mutex. This will protect all the assoiciation
  418. * level queues and streams and such. We will need to lock the socket layer
  419. * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
  420. * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
  421. */
  422. #define SCTP_TCB_LOCK_INIT(_tcb) \
  423. (void)pthread_mutex_init(&(_tcb)->tcb_mtx, &SCTP_BASE_VAR(mtx_attr))
  424. #define SCTP_TCB_LOCK_DESTROY(_tcb) \
  425. (void)pthread_mutex_destroy(&(_tcb)->tcb_mtx)
  426. #ifdef INVARIANTS
  427. #ifdef SCTP_LOCK_LOGGING
  428. #define SCTP_TCB_LOCK(_tcb) do { \
  429. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
  430. sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB); \
  431. KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s:%d: tcb_mtx already locked", __FILE__, __LINE__)) \
  432. } while (0)
  433. #else
  434. #define SCTP_TCB_LOCK(_tcb) \
  435. KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s:%d: tcb_mtx already locked", __FILE__, __LINE__))
  436. #endif
  437. #define SCTP_TCB_UNLOCK(_tcb) \
  438. KASSERT(pthread_mutex_unlock(&(_tcb)->tcb_mtx) == 0, ("%s:%d: tcb_mtx not locked", __FILE__, __LINE__))
  439. #else
  440. #ifdef SCTP_LOCK_LOGGING
  441. #define SCTP_TCB_LOCK(_tcb) do { \
  442. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
  443. sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB); \
  444. (void)pthread_mutex_lock(&(_tcb)->tcb_mtx); \
  445. } while (0)
  446. #else
  447. #define SCTP_TCB_LOCK(_tcb) \
  448. (void)pthread_mutex_lock(&(_tcb)->tcb_mtx)
  449. #endif
  450. #define SCTP_TCB_UNLOCK(_tcb) (void)pthread_mutex_unlock(&(_tcb)->tcb_mtx)
  451. #endif
  452. #define SCTP_TCB_LOCK_ASSERT(_tcb) \
  453. KASSERT(pthread_mutex_trylock(&(_tcb)->tcb_mtx) == EBUSY, ("%s:%d: tcb_mtx not locked", __FILE__, __LINE__))
  454. #define SCTP_TCB_TRYLOCK(_tcb) (!(pthread_mutex_trylock(&(_tcb)->tcb_mtx)))
  455. #endif
  456. #endif /* SCTP_PER_SOCKET_LOCKING */
  457. /*
  458. * common locks
  459. */
  460. /* copied over to compile */
  461. #define SCTP_INP_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
  462. #define SCTP_INP_READ_CONTENDED(_inp) (0) /* Don't know if this is possible */
  463. #define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
  464. /* socket locks */
  465. #if defined(_WIN32)
  466. #define SOCKBUF_LOCK_ASSERT(_so_buf)
  467. #define SOCKBUF_LOCK(_so_buf) \
  468. EnterCriticalSection(&(_so_buf)->sb_mtx)
  469. #define SOCKBUF_UNLOCK(_so_buf) \
  470. LeaveCriticalSection(&(_so_buf)->sb_mtx)
  471. #define SOCK_LOCK(_so) \
  472. SOCKBUF_LOCK(&(_so)->so_rcv)
  473. #define SOCK_UNLOCK(_so) \
  474. SOCKBUF_UNLOCK(&(_so)->so_rcv)
  475. #else
  476. #define SOCKBUF_LOCK_ASSERT(_so_buf) \
  477. KASSERT(pthread_mutex_trylock(SOCKBUF_MTX(_so_buf)) == EBUSY, ("%s:%d: socket buffer not locked", __FILE__, __LINE__))
  478. #ifdef INVARIANTS
  479. #define SOCKBUF_LOCK(_so_buf) \
  480. KASSERT(pthread_mutex_lock(SOCKBUF_MTX(_so_buf)) == 0, ("%s:%d: sockbuf_mtx already locked", __FILE__, __LINE__))
  481. #define SOCKBUF_UNLOCK(_so_buf) \
  482. KASSERT(pthread_mutex_unlock(SOCKBUF_MTX(_so_buf)) == 0, ("%s:%d: sockbuf_mtx not locked", __FILE__, __LINE__))
  483. #else
  484. #define SOCKBUF_LOCK(_so_buf) \
  485. pthread_mutex_lock(SOCKBUF_MTX(_so_buf))
  486. #define SOCKBUF_UNLOCK(_so_buf) \
  487. pthread_mutex_unlock(SOCKBUF_MTX(_so_buf))
  488. #endif
  489. #define SOCK_LOCK(_so) \
  490. SOCKBUF_LOCK(&(_so)->so_rcv)
  491. #define SOCK_UNLOCK(_so) \
  492. SOCKBUF_UNLOCK(&(_so)->so_rcv)
  493. #endif
  494. #define SCTP_STATLOG_INIT_LOCK()
  495. #define SCTP_STATLOG_LOCK()
  496. #define SCTP_STATLOG_UNLOCK()
  497. #define SCTP_STATLOG_DESTROY()
  498. #if defined(_WIN32)
  499. /* address list locks */
  500. #if WINVER < 0x0600
  501. #define SCTP_IPI_ADDR_INIT() \
  502. InitializeCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
  503. #define SCTP_IPI_ADDR_DESTROY() \
  504. DeleteCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
  505. #define SCTP_IPI_ADDR_RLOCK() \
  506. EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
  507. #define SCTP_IPI_ADDR_RUNLOCK() \
  508. LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
  509. #define SCTP_IPI_ADDR_WLOCK() \
  510. EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
  511. #define SCTP_IPI_ADDR_WUNLOCK() \
  512. LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
  513. #define SCTP_IPI_ADDR_LOCK_ASSERT()
  514. #define SCTP_IPI_ADDR_WLOCK_ASSERT()
  515. #else
  516. #define SCTP_IPI_ADDR_INIT() \
  517. InitializeSRWLock(&SCTP_BASE_INFO(ipi_addr_mtx))
  518. #define SCTP_IPI_ADDR_DESTROY()
  519. #define SCTP_IPI_ADDR_RLOCK() \
  520. AcquireSRWLockShared(&SCTP_BASE_INFO(ipi_addr_mtx))
  521. #define SCTP_IPI_ADDR_RUNLOCK() \
  522. ReleaseSRWLockShared(&SCTP_BASE_INFO(ipi_addr_mtx))
  523. #define SCTP_IPI_ADDR_WLOCK() \
  524. AcquireSRWLockExclusive(&SCTP_BASE_INFO(ipi_addr_mtx))
  525. #define SCTP_IPI_ADDR_WUNLOCK() \
  526. ReleaseSRWLockExclusive(&SCTP_BASE_INFO(ipi_addr_mtx))
  527. #define SCTP_IPI_ADDR_LOCK_ASSERT()
  528. #define SCTP_IPI_ADDR_WLOCK_ASSERT()
  529. #endif
  530. /* iterator locks */
  531. #define SCTP_ITERATOR_LOCK_INIT() \
  532. InitializeCriticalSection(&sctp_it_ctl.it_mtx)
  533. #define SCTP_ITERATOR_LOCK_DESTROY() \
  534. DeleteCriticalSection(&sctp_it_ctl.it_mtx)
  535. #define SCTP_ITERATOR_LOCK() \
  536. EnterCriticalSection(&sctp_it_ctl.it_mtx)
  537. #define SCTP_ITERATOR_UNLOCK() \
  538. LeaveCriticalSection(&sctp_it_ctl.it_mtx)
  539. #define SCTP_IPI_ITERATOR_WQ_INIT() \
  540. InitializeCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
  541. #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
  542. DeleteCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
  543. #define SCTP_IPI_ITERATOR_WQ_LOCK() \
  544. EnterCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
  545. #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
  546. LeaveCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
  547. #else
  548. /* address list locks */
  549. #define SCTP_IPI_ADDR_INIT() \
  550. (void)pthread_rwlock_init(&SCTP_BASE_INFO(ipi_addr_mtx), &SCTP_BASE_VAR(rwlock_attr))
  551. #define SCTP_IPI_ADDR_DESTROY() \
  552. (void)pthread_rwlock_destroy(&SCTP_BASE_INFO(ipi_addr_mtx))
  553. #ifdef INVARIANTS
  554. #define SCTP_IPI_ADDR_RLOCK() \
  555. KASSERT(pthread_rwlock_rdlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s:%d: ipi_addr_mtx already locked", __FILE__, __LINE__))
  556. #define SCTP_IPI_ADDR_RUNLOCK() \
  557. KASSERT(pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s:%d: ipi_addr_mtx not locked", __FILE__, __LINE__))
  558. #define SCTP_IPI_ADDR_WLOCK() \
  559. KASSERT(pthread_rwlock_wrlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s:%d: ipi_addr_mtx already locked", __FILE__, __LINE__))
  560. #define SCTP_IPI_ADDR_WUNLOCK() \
  561. KASSERT(pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s:%d: ipi_addr_mtx not locked", __FILE__, __LINE__))
  562. #else
  563. #define SCTP_IPI_ADDR_RLOCK() \
  564. (void)pthread_rwlock_rdlock(&SCTP_BASE_INFO(ipi_addr_mtx))
  565. #define SCTP_IPI_ADDR_RUNLOCK() \
  566. (void)pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
  567. #define SCTP_IPI_ADDR_WLOCK() \
  568. (void)pthread_rwlock_wrlock(&SCTP_BASE_INFO(ipi_addr_mtx))
  569. #define SCTP_IPI_ADDR_WUNLOCK() \
  570. (void)pthread_rwlock_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
  571. #endif
  572. #define SCTP_IPI_ADDR_LOCK_ASSERT()
  573. #define SCTP_IPI_ADDR_WLOCK_ASSERT()
  574. /* iterator locks */
  575. #define SCTP_ITERATOR_LOCK_INIT() \
  576. (void)pthread_mutex_init(&sctp_it_ctl.it_mtx, &SCTP_BASE_VAR(mtx_attr))
  577. #define SCTP_ITERATOR_LOCK_DESTROY() \
  578. (void)pthread_mutex_destroy(&sctp_it_ctl.it_mtx)
  579. #ifdef INVARIANTS
  580. #define SCTP_ITERATOR_LOCK() \
  581. KASSERT(pthread_mutex_lock(&sctp_it_ctl.it_mtx) == 0, ("%s:%d: it_mtx already locked", __FILE__, __LINE__))
  582. #define SCTP_ITERATOR_UNLOCK() \
  583. KASSERT(pthread_mutex_unlock(&sctp_it_ctl.it_mtx) == 0, ("%s:%d: it_mtx not locked", __FILE__, __LINE__))
  584. #else
  585. #define SCTP_ITERATOR_LOCK() \
  586. (void)pthread_mutex_lock(&sctp_it_ctl.it_mtx)
  587. #define SCTP_ITERATOR_UNLOCK() \
  588. (void)pthread_mutex_unlock(&sctp_it_ctl.it_mtx)
  589. #endif
  590. #define SCTP_IPI_ITERATOR_WQ_INIT() \
  591. (void)pthread_mutex_init(&sctp_it_ctl.ipi_iterator_wq_mtx, &SCTP_BASE_VAR(mtx_attr))
  592. #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
  593. (void)pthread_mutex_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx)
  594. #ifdef INVARIANTS
  595. #define SCTP_IPI_ITERATOR_WQ_LOCK() \
  596. KASSERT(pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s:%d: ipi_iterator_wq_mtx already locked", __FILE__, __LINE__))
  597. #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
  598. KASSERT(pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s:%d: ipi_iterator_wq_mtx not locked", __FILE__, __LINE__))
  599. #else
  600. #define SCTP_IPI_ITERATOR_WQ_LOCK() \
  601. (void)pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx)
  602. #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
  603. (void)pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx)
  604. #endif
  605. #endif
  606. #define SCTP_INCR_EP_COUNT() \
  607. atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
  608. #define SCTP_DECR_EP_COUNT() \
  609. atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
  610. #define SCTP_INCR_ASOC_COUNT() \
  611. atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
  612. #define SCTP_DECR_ASOC_COUNT() \
  613. atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
  614. #define SCTP_INCR_LADDR_COUNT() \
  615. atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
  616. #define SCTP_DECR_LADDR_COUNT() \
  617. atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
  618. #define SCTP_INCR_RADDR_COUNT() \
  619. atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
  620. #define SCTP_DECR_RADDR_COUNT() \
  621. atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
  622. #define SCTP_INCR_CHK_COUNT() \
  623. atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
  624. #define SCTP_DECR_CHK_COUNT() \
  625. atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
  626. #define SCTP_INCR_READQ_COUNT() \
  627. atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
  628. #define SCTP_DECR_READQ_COUNT() \
  629. atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
  630. #define SCTP_INCR_STRMOQ_COUNT() \
  631. atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)
  632. #define SCTP_DECR_STRMOQ_COUNT() \
  633. atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)
  634. #endif