user_mbuf.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589
  1. /*-
  2. * Copyright (c) 1982, 1986, 1988, 1993
  3. * The Regents of the University of California.
  4. * All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. * 3. Neither the name of the University nor the names of its contributors
  15. * may be used to endorse or promote products derived from this software
  16. * without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  19. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  20. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  21. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  22. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  23. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  24. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  25. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  26. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  27. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  28. * SUCH DAMAGE.
  29. *
  30. */
  31. /*
  32. * __Userspace__ version of /usr/src/sys/kern/kern_mbuf.c
  33. * We are initializing two zones for Mbufs and Clusters.
  34. *
  35. */
  36. #include <stdio.h>
  37. #include <string.h>
  38. /* #include <sys/param.h> This defines MSIZE 256 */
  39. #if !defined(SCTP_SIMPLE_ALLOCATOR)
  40. #include "umem.h"
  41. #endif
  42. #include "user_mbuf.h"
  43. #include "user_environment.h"
  44. #include "user_atomic.h"
  45. #include "netinet/sctp_pcb.h"
  46. #define KIPC_MAX_LINKHDR 4 /* int: max length of link header (see sys/sysclt.h) */
  47. #define KIPC_MAX_PROTOHDR 5 /* int: max length of network header (see sys/sysclt.h)*/
  48. int max_linkhdr = KIPC_MAX_LINKHDR;
  49. int max_protohdr = KIPC_MAX_PROTOHDR; /* Size of largest protocol layer header. */
  50. /*
  51. * Zones from which we allocate.
  52. */
  53. sctp_zone_t zone_mbuf;
  54. sctp_zone_t zone_clust;
  55. sctp_zone_t zone_ext_refcnt;
  56. /* __Userspace__ clust_mb_args will be passed as callback data to mb_ctor_clust
  57. * and mb_dtor_clust.
  58. * Note: I had to use struct clust_args as an encapsulation for an mbuf pointer.
  59. * struct mbuf * clust_mb_args; does not work.
  60. */
  61. struct clust_args clust_mb_args;
  62. /* __Userspace__
  63. * Local prototypes.
  64. */
  65. static int mb_ctor_mbuf(void *, void *, int);
  66. static int mb_ctor_clust(void *, void *, int);
  67. static void mb_dtor_mbuf(void *, void *);
  68. static void mb_dtor_clust(void *, void *);
  69. /***************** Functions taken from user_mbuf.h *************/
  70. static int mbuf_constructor_dup(struct mbuf *m, int pkthdr, short type)
  71. {
  72. int flags = pkthdr;
  73. m->m_next = NULL;
  74. m->m_nextpkt = NULL;
  75. m->m_len = 0;
  76. m->m_flags = flags;
  77. m->m_type = type;
  78. if (flags & M_PKTHDR) {
  79. m->m_data = m->m_pktdat;
  80. m->m_pkthdr.rcvif = NULL;
  81. m->m_pkthdr.len = 0;
  82. m->m_pkthdr.header = NULL;
  83. m->m_pkthdr.csum_flags = 0;
  84. m->m_pkthdr.csum_data = 0;
  85. m->m_pkthdr.tso_segsz = 0;
  86. m->m_pkthdr.ether_vtag = 0;
  87. SLIST_INIT(&m->m_pkthdr.tags);
  88. } else
  89. m->m_data = m->m_dat;
  90. return (0);
  91. }
  92. /* __Userspace__ */
  93. struct mbuf *
  94. m_get(int how, short type)
  95. {
  96. struct mbuf *mret;
  97. #if defined(SCTP_SIMPLE_ALLOCATOR)
  98. struct mb_args mbuf_mb_args;
  99. /* The following setter function is not yet being enclosed within
  100. * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
  101. * mb_dtor_mbuf. See comment there
  102. */
  103. mbuf_mb_args.flags = 0;
  104. mbuf_mb_args.type = type;
  105. #endif
  106. /* Mbuf master zone, zone_mbuf, has already been
  107. * created in mbuf_initialize() */
  108. mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
  109. #if defined(SCTP_SIMPLE_ALLOCATOR)
  110. mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
  111. #endif
  112. /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
  113. /* There are cases when an object available in the current CPU's
  114. * loaded magazine and in those cases the object's constructor is not applied.
  115. * If that is the case, then we are duplicating constructor initialization here,
  116. * so that the mbuf is properly constructed before returning it.
  117. */
  118. if (mret) {
  119. #if USING_MBUF_CONSTRUCTOR
  120. if (! (mret->m_type == type) ) {
  121. mbuf_constructor_dup(mret, 0, type);
  122. }
  123. #else
  124. mbuf_constructor_dup(mret, 0, type);
  125. #endif
  126. }
  127. return mret;
  128. }
  129. /* __Userspace__ */
  130. struct mbuf *
  131. m_gethdr(int how, short type)
  132. {
  133. struct mbuf *mret;
  134. #if defined(SCTP_SIMPLE_ALLOCATOR)
  135. struct mb_args mbuf_mb_args;
  136. /* The following setter function is not yet being enclosed within
  137. * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
  138. * mb_dtor_mbuf. See comment there
  139. */
  140. mbuf_mb_args.flags = M_PKTHDR;
  141. mbuf_mb_args.type = type;
  142. #endif
  143. mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
  144. #if defined(SCTP_SIMPLE_ALLOCATOR)
  145. mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
  146. #endif
  147. /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
  148. /* There are cases when an object available in the current CPU's
  149. * loaded magazine and in those cases the object's constructor is not applied.
  150. * If that is the case, then we are duplicating constructor initialization here,
  151. * so that the mbuf is properly constructed before returning it.
  152. */
  153. if (mret) {
  154. #if USING_MBUF_CONSTRUCTOR
  155. if (! ((mret->m_flags & M_PKTHDR) && (mret->m_type == type)) ) {
  156. mbuf_constructor_dup(mret, M_PKTHDR, type);
  157. }
  158. #else
  159. mbuf_constructor_dup(mret, M_PKTHDR, type);
  160. #endif
  161. }
  162. return mret;
  163. }
  164. /* __Userspace__ */
  165. struct mbuf *
  166. m_free(struct mbuf *m)
  167. {
  168. struct mbuf *n = m->m_next;
  169. if (m->m_flags & M_EXT)
  170. mb_free_ext(m);
  171. else if ((m->m_flags & M_NOFREE) == 0) {
  172. #if defined(SCTP_SIMPLE_ALLOCATOR)
  173. mb_dtor_mbuf(m, NULL);
  174. #endif
  175. SCTP_ZONE_FREE(zone_mbuf, m);
  176. }
  177. /*umem_cache_free(zone_mbuf, m);*/
  178. return (n);
  179. }
  180. static void
  181. clust_constructor_dup(caddr_t m_clust, struct mbuf* m)
  182. {
  183. u_int *refcnt;
  184. int type, size;
  185. if (m == NULL) {
  186. return;
  187. }
  188. /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
  189. type = EXT_CLUSTER;
  190. size = MCLBYTES;
  191. refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
  192. /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
  193. #if !defined(SCTP_SIMPLE_ALLOCATOR)
  194. if (refcnt == NULL) {
  195. umem_reap();
  196. refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
  197. /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
  198. }
  199. #endif
  200. *refcnt = 1;
  201. m->m_ext.ext_buf = (caddr_t)m_clust;
  202. m->m_data = m->m_ext.ext_buf;
  203. m->m_flags |= M_EXT;
  204. m->m_ext.ext_free = NULL;
  205. m->m_ext.ext_args = NULL;
  206. m->m_ext.ext_size = size;
  207. m->m_ext.ext_type = type;
  208. m->m_ext.ref_cnt = refcnt;
  209. return;
  210. }
  211. /* __Userspace__ */
  212. void
  213. m_clget(struct mbuf *m, int how)
  214. {
  215. caddr_t mclust_ret;
  216. #if defined(SCTP_SIMPLE_ALLOCATOR)
  217. struct clust_args clust_mb_args_l;
  218. #endif
  219. if (m->m_flags & M_EXT) {
  220. SCTPDBG(SCTP_DEBUG_USR, "%s: %p mbuf already has cluster\n", __func__, (void *)m);
  221. }
  222. m->m_ext.ext_buf = (char *)NULL;
  223. #if defined(SCTP_SIMPLE_ALLOCATOR)
  224. clust_mb_args_l.parent_mbuf = m;
  225. #endif
  226. mclust_ret = SCTP_ZONE_GET(zone_clust, char);
  227. #if defined(SCTP_SIMPLE_ALLOCATOR)
  228. mb_ctor_clust(mclust_ret, &clust_mb_args_l, 0);
  229. #endif
  230. /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
  231. /*
  232. On a cluster allocation failure, call umem_reap() and retry.
  233. */
  234. if (mclust_ret == NULL) {
  235. #if !defined(SCTP_SIMPLE_ALLOCATOR)
  236. /* mclust_ret = SCTP_ZONE_GET(zone_clust, char);
  237. mb_ctor_clust(mclust_ret, &clust_mb_args, 0);
  238. #else*/
  239. umem_reap();
  240. mclust_ret = SCTP_ZONE_GET(zone_clust, char);
  241. #endif
  242. /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
  243. /* if (NULL == mclust_ret) { */
  244. SCTPDBG(SCTP_DEBUG_USR, "Memory allocation failure in %s\n", __func__);
  245. /* } */
  246. }
  247. #if USING_MBUF_CONSTRUCTOR
  248. if ((m->m_ext.ext_buf == NULL)) {
  249. clust_constructor_dup(mclust_ret, m);
  250. }
  251. #else
  252. clust_constructor_dup(mclust_ret, m);
  253. #endif
  254. }
  255. struct mbuf *
  256. m_getm2(struct mbuf *m, int len, int how, short type, int flags, int allonebuf)
  257. {
  258. struct mbuf *mb, *nm = NULL, *mtail = NULL;
  259. int size, mbuf_threshold, space_needed = len;
  260. KASSERT(len >= 0, ("%s: len is < 0", __func__));
  261. /* Validate flags. */
  262. flags &= (M_PKTHDR | M_EOR);
  263. /* Packet header mbuf must be first in chain. */
  264. if ((flags & M_PKTHDR) && m != NULL) {
  265. flags &= ~M_PKTHDR;
  266. }
  267. if (allonebuf == 0)
  268. mbuf_threshold = SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count);
  269. else
  270. mbuf_threshold = 1;
  271. /* Loop and append maximum sized mbufs to the chain tail. */
  272. while (len > 0) {
  273. if ((!allonebuf && len >= MCLBYTES) || (len > (int)(((mbuf_threshold - 1) * MLEN) + MHLEN))) {
  274. mb = m_gethdr(how, type);
  275. MCLGET(mb, how);
  276. size = MCLBYTES;
  277. /* SCTP_BUF_LEN(mb) = MCLBYTES; */
  278. } else if (flags & M_PKTHDR) {
  279. mb = m_gethdr(how, type);
  280. if (len < MHLEN) {
  281. size = len;
  282. } else {
  283. size = MHLEN;
  284. }
  285. } else {
  286. mb = m_get(how, type);
  287. if (len < MLEN) {
  288. size = len;
  289. } else {
  290. size = MLEN;
  291. }
  292. }
  293. /* Fail the whole operation if one mbuf can't be allocated. */
  294. if (mb == NULL) {
  295. if (nm != NULL)
  296. m_freem(nm);
  297. return (NULL);
  298. }
  299. if (allonebuf != 0 && size < space_needed) {
  300. m_freem(mb);
  301. return (NULL);
  302. }
  303. /* Book keeping. */
  304. len -= size;
  305. if (mtail != NULL)
  306. mtail->m_next = mb;
  307. else
  308. nm = mb;
  309. mtail = mb;
  310. flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */
  311. }
  312. if (flags & M_EOR) {
  313. mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */
  314. }
  315. /* If mbuf was supplied, append new chain to the end of it. */
  316. if (m != NULL) {
  317. for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
  318. mtail->m_next = nm;
  319. mtail->m_flags &= ~M_EOR;
  320. } else {
  321. m = nm;
  322. }
  323. return (m);
  324. }
  325. /*
  326. * Copy the contents of uio into a properly sized mbuf chain.
  327. */
  328. struct mbuf *
  329. m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
  330. {
  331. struct mbuf *m, *mb;
  332. int error, length;
  333. ssize_t total;
  334. int progress = 0;
  335. /*
  336. * len can be zero or an arbitrary large value bound by
  337. * the total data supplied by the uio.
  338. */
  339. if (len > 0)
  340. total = min(uio->uio_resid, len);
  341. else
  342. total = uio->uio_resid;
  343. /*
  344. * The smallest unit returned by m_getm2() is a single mbuf
  345. * with pkthdr. We can't align past it.
  346. */
  347. if (align >= MHLEN)
  348. return (NULL);
  349. /*
  350. * Give us the full allocation or nothing.
  351. * If len is zero return the smallest empty mbuf.
  352. */
  353. m = m_getm2(NULL, (int)max(total + align, 1), how, MT_DATA, flags, 0);
  354. if (m == NULL)
  355. return (NULL);
  356. m->m_data += align;
  357. /* Fill all mbufs with uio data and update header information. */
  358. for (mb = m; mb != NULL; mb = mb->m_next) {
  359. length = (int)min(M_TRAILINGSPACE(mb), total - progress);
  360. error = uiomove(mtod(mb, void *), length, uio);
  361. if (error) {
  362. m_freem(m);
  363. return (NULL);
  364. }
  365. mb->m_len = length;
  366. progress += length;
  367. if (flags & M_PKTHDR)
  368. m->m_pkthdr.len += length;
  369. }
  370. KASSERT(progress == total, ("%s: progress != total", __func__));
  371. return (m);
  372. }
  373. u_int
  374. m_length(struct mbuf *m0, struct mbuf **last)
  375. {
  376. struct mbuf *m;
  377. u_int len;
  378. len = 0;
  379. for (m = m0; m != NULL; m = m->m_next) {
  380. len += m->m_len;
  381. if (m->m_next == NULL)
  382. break;
  383. }
  384. if (last != NULL)
  385. *last = m;
  386. return (len);
  387. }
  388. struct mbuf *
  389. m_last(struct mbuf *m)
  390. {
  391. while (m->m_next) {
  392. m = m->m_next;
  393. }
  394. return (m);
  395. }
  396. /*
  397. * Unlink a tag from the list of tags associated with an mbuf.
  398. */
  399. static __inline void
  400. m_tag_unlink(struct mbuf *m, struct m_tag *t)
  401. {
  402. SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
  403. }
  404. /*
  405. * Reclaim resources associated with a tag.
  406. */
  407. static __inline void
  408. m_tag_free(struct m_tag *t)
  409. {
  410. (*t->m_tag_free)(t);
  411. }
  412. /*
  413. * Set up the contents of a tag. Note that this does not fill in the free
  414. * method; the caller is expected to do that.
  415. *
  416. * XXX probably should be called m_tag_init, but that was already taken.
  417. */
  418. static __inline void
  419. m_tag_setup(struct m_tag *t, uint32_t cookie, int type, int len)
  420. {
  421. t->m_tag_id = type;
  422. t->m_tag_len = len;
  423. t->m_tag_cookie = cookie;
  424. }
  425. /************ End functions from user_mbuf.h ******************/
  426. /************ End functions to substitute umem_cache_alloc and umem_cache_free **************/
  427. void
  428. mbuf_initialize(void *dummy)
  429. {
  430. /*
  431. * __Userspace__Configure UMA zones for Mbufs and Clusters.
  432. * (TODO: m_getcl() - using packet secondary zone).
  433. * There is no provision for trash_init and trash_fini in umem.
  434. *
  435. */
  436. /* zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
  437. mb_ctor_mbuf, mb_dtor_mbuf, NULL,
  438. &mbuf_mb_args,
  439. NULL, 0);
  440. zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);*/
  441. #if defined(SCTP_SIMPLE_ALLOCATOR)
  442. SCTP_ZONE_INIT(zone_mbuf, MBUF_MEM_NAME, MSIZE, 0);
  443. #else
  444. zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
  445. mb_ctor_mbuf, mb_dtor_mbuf, NULL,
  446. NULL,
  447. NULL, 0);
  448. #endif
  449. /*zone_ext_refcnt = umem_cache_create(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0,
  450. NULL, NULL, NULL,
  451. NULL,
  452. NULL, 0);*/
  453. SCTP_ZONE_INIT(zone_ext_refcnt, MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0);
  454. /*zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
  455. mb_ctor_clust, mb_dtor_clust, NULL,
  456. &clust_mb_args,
  457. NULL, 0);
  458. zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, NULL, NULL, NULL, NULL, NULL,0);*/
  459. #if defined(SCTP_SIMPLE_ALLOCATOR)
  460. SCTP_ZONE_INIT(zone_clust, MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0);
  461. #else
  462. zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
  463. mb_ctor_clust, mb_dtor_clust, NULL,
  464. &clust_mb_args,
  465. NULL, 0);
  466. #endif
  467. /* uma_prealloc() goes here... */
  468. /* __Userspace__ Add umem_reap here for low memory situation?
  469. *
  470. */
  471. }
  472. /*
  473. * __Userspace__
  474. *
  475. * Constructor for Mbuf master zone. We have a different constructor
  476. * for allocating the cluster.
  477. *
  478. * The 'arg' pointer points to a mb_args structure which
  479. * contains call-specific information required to support the
  480. * mbuf allocation API. See user_mbuf.h.
  481. *
  482. * The flgs parameter below can be UMEM_DEFAULT or UMEM_NOFAIL depending on what
  483. * was passed when umem_cache_alloc was called.
  484. * TODO: Use UMEM_NOFAIL in umem_cache_alloc and also define a failure handler
  485. * and call umem_nofail_callback(my_failure_handler) in the stack initialization routines
  486. * The advantage of using UMEM_NOFAIL is that we don't have to check if umem_cache_alloc
  487. * was successful or not. The failure handler would take care of it, if we use the UMEM_NOFAIL
  488. * flag.
  489. *
  490. * NOTE Ref: http://docs.sun.com/app/docs/doc/819-2243/6n4i099p2?l=en&a=view&q=umem_zalloc)
  491. * The umem_nofail_callback() function sets the **process-wide** UMEM_NOFAIL callback.
  492. * It also mentions that umem_nofail_callback is Evolving.
  493. *
  494. */
  495. static int
  496. mb_ctor_mbuf(void *mem, void *arg, int flgs)
  497. {
  498. #if USING_MBUF_CONSTRUCTOR
  499. struct mbuf *m;
  500. struct mb_args *args;
  501. int flags;
  502. short type;
  503. m = (struct mbuf *)mem;
  504. args = (struct mb_args *)arg;
  505. flags = args->flags;
  506. type = args->type;
  507. m->m_next = NULL;
  508. m->m_nextpkt = NULL;
  509. m->m_len = 0;
  510. m->m_flags = flags;
  511. m->m_type = type;
  512. if (flags & M_PKTHDR) {
  513. m->m_data = m->m_pktdat;
  514. m->m_pkthdr.rcvif = NULL;
  515. m->m_pkthdr.len = 0;
  516. m->m_pkthdr.header = NULL;
  517. m->m_pkthdr.csum_flags = 0;
  518. m->m_pkthdr.csum_data = 0;
  519. m->m_pkthdr.tso_segsz = 0;
  520. m->m_pkthdr.ether_vtag = 0;
  521. SLIST_INIT(&m->m_pkthdr.tags);
  522. } else
  523. m->m_data = m->m_dat;
  524. #endif
  525. return (0);
  526. }
  527. /*
  528. * __Userspace__
  529. * The Mbuf master zone destructor.
  530. * This would be called in response to umem_cache_destroy
  531. * TODO: Recheck if this is what we want to do in this destructor.
  532. * (Note: the number of times mb_dtor_mbuf is called is equal to the
  533. * number of individual mbufs allocated from zone_mbuf.
  534. */
  535. static void
  536. mb_dtor_mbuf(void *mem, void *arg)
  537. {
  538. struct mbuf *m;
  539. m = (struct mbuf *)mem;
  540. if ((m->m_flags & M_PKTHDR) != 0) {
  541. m_tag_delete_chain(m, NULL);
  542. }
  543. }
  544. /* __Userspace__
  545. * The Cluster zone constructor.
  546. *
  547. * Here the 'arg' pointer points to the Mbuf which we
  548. * are configuring cluster storage for. If 'arg' is
  549. * empty we allocate just the cluster without setting
  550. * the mbuf to it. See mbuf.h.
  551. */
  552. static int
  553. mb_ctor_clust(void *mem, void *arg, int flgs)
  554. {
  555. #if USING_MBUF_CONSTRUCTOR
  556. struct mbuf *m;
  557. struct clust_args * cla;
  558. u_int *refcnt;
  559. int type, size;
  560. sctp_zone_t zone;
  561. /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
  562. type = EXT_CLUSTER;
  563. zone = zone_clust;
  564. size = MCLBYTES;
  565. cla = (struct clust_args *)arg;
  566. m = cla->parent_mbuf;
  567. refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
  568. /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
  569. *refcnt = 1;
  570. if (m != NULL) {
  571. m->m_ext.ext_buf = (caddr_t)mem;
  572. m->m_data = m->m_ext.ext_buf;
  573. m->m_flags |= M_EXT;
  574. m->m_ext.ext_free = NULL;
  575. m->m_ext.ext_args = NULL;
  576. m->m_ext.ext_size = size;
  577. m->m_ext.ext_type = type;
  578. m->m_ext.ref_cnt = refcnt;
  579. }
  580. #endif
  581. return (0);
  582. }
  583. /* __Userspace__ */
  584. static void
  585. mb_dtor_clust(void *mem, void *arg)
  586. {
  587. /* mem is of type caddr_t. In sys/types.h we have typedef char * caddr_t; */
  588. /* mb_dtor_clust is called at time of umem_cache_destroy() (the number of times
  589. * mb_dtor_clust is called is equal to the number of individual mbufs allocated
  590. * from zone_clust. Similarly for mb_dtor_mbuf).
  591. * At this point the following:
  592. * struct mbuf *m;
  593. * m = (struct mbuf *)arg;
  594. * assert (*(m->m_ext.ref_cnt) == 0); is not meaningful since m->m_ext.ref_cnt = NULL;
  595. * has been done in mb_free_ext().
  596. */
  597. }
  598. /* Unlink and free a packet tag. */
  599. void
  600. m_tag_delete(struct mbuf *m, struct m_tag *t)
  601. {
  602. KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", (void *)m, (void *)t));
  603. m_tag_unlink(m, t);
  604. m_tag_free(t);
  605. }
  606. /* Unlink and free a packet tag chain, starting from given tag. */
  607. void
  608. m_tag_delete_chain(struct mbuf *m, struct m_tag *t)
  609. {
  610. struct m_tag *p, *q;
  611. KASSERT(m, ("m_tag_delete_chain: null mbuf"));
  612. if (t != NULL)
  613. p = t;
  614. else
  615. p = SLIST_FIRST(&m->m_pkthdr.tags);
  616. if (p == NULL)
  617. return;
  618. while ((q = SLIST_NEXT(p, m_tag_link)) != NULL)
  619. m_tag_delete(m, q);
  620. m_tag_delete(m, p);
  621. }
  622. #if 0
  623. static void
  624. sctp_print_mbuf_chain(struct mbuf *m)
  625. {
  626. SCTP_DEBUG_USR(SCTP_DEBUG_USR, "Printing mbuf chain %p.\n", (void *)m);
  627. for(; m; m=m->m_next) {
  628. SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: m_len = %ld, m_type = %x, m_next = %p.\n", (void *)m, m->m_len, m->m_type, (void *)m->m_next);
  629. if (m->m_flags & M_EXT)
  630. SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: extend_size = %d, extend_buffer = %p, ref_cnt = %d.\n", (void *)m, m->m_ext.ext_size, (void *)m->m_ext.ext_buf, *(m->m_ext.ref_cnt));
  631. }
  632. }
  633. #endif
  634. /*
  635. * Free an entire chain of mbufs and associated external buffers, if
  636. * applicable.
  637. */
  638. void
  639. m_freem(struct mbuf *mb)
  640. {
  641. while (mb != NULL)
  642. mb = m_free(mb);
  643. }
  644. /*
  645. * __Userspace__
  646. * clean mbufs with M_EXT storage attached to them
  647. * if the reference count hits 1.
  648. */
  649. void
  650. mb_free_ext(struct mbuf *m)
  651. {
  652. int skipmbuf;
  653. KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
  654. KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
  655. /*
  656. * check if the header is embedded in the cluster
  657. */
  658. skipmbuf = (m->m_flags & M_NOFREE);
  659. /* Free the external attached storage if this
  660. * mbuf is the only reference to it.
  661. *__Userspace__ TODO: jumbo frames
  662. *
  663. */
  664. /* NOTE: We had the same code that SCTP_DECREMENT_AND_CHECK_REFCOUNT
  665. reduces to here before but the IPHONE malloc commit had changed
  666. this to compare to 0 instead of 1 (see next line). Why?
  667. . .. this caused a huge memory leak in Linux.
  668. */
  669. #ifdef IPHONE
  670. if (atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 0)
  671. #else
  672. if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(m->m_ext.ref_cnt))
  673. #endif
  674. {
  675. if (m->m_ext.ext_type == EXT_CLUSTER){
  676. #if defined(SCTP_SIMPLE_ALLOCATOR)
  677. mb_dtor_clust(m->m_ext.ext_buf, &clust_mb_args);
  678. #endif
  679. SCTP_ZONE_FREE(zone_clust, m->m_ext.ext_buf);
  680. SCTP_ZONE_FREE(zone_ext_refcnt, (u_int*)m->m_ext.ref_cnt);
  681. m->m_ext.ref_cnt = NULL;
  682. }
  683. }
  684. if (skipmbuf)
  685. return;
  686. /* __Userspace__ Also freeing the storage for ref_cnt
  687. * Free this mbuf back to the mbuf zone with all m_ext
  688. * information purged.
  689. */
  690. m->m_ext.ext_buf = NULL;
  691. m->m_ext.ext_free = NULL;
  692. m->m_ext.ext_args = NULL;
  693. m->m_ext.ref_cnt = NULL;
  694. m->m_ext.ext_size = 0;
  695. m->m_ext.ext_type = 0;
  696. m->m_flags &= ~M_EXT;
  697. #if defined(SCTP_SIMPLE_ALLOCATOR)
  698. mb_dtor_mbuf(m, NULL);
  699. #endif
  700. SCTP_ZONE_FREE(zone_mbuf, m);
  701. /*umem_cache_free(zone_mbuf, m);*/
  702. }
  703. /*
  704. * "Move" mbuf pkthdr from "from" to "to".
  705. * "from" must have M_PKTHDR set, and "to" must be empty.
  706. */
  707. void
  708. m_move_pkthdr(struct mbuf *to, struct mbuf *from)
  709. {
  710. to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
  711. if ((to->m_flags & M_EXT) == 0)
  712. to->m_data = to->m_pktdat;
  713. to->m_pkthdr = from->m_pkthdr; /* especially tags */
  714. SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
  715. from->m_flags &= ~M_PKTHDR;
  716. }
  717. /*
  718. * Rearange an mbuf chain so that len bytes are contiguous
  719. * and in the data area of an mbuf (so that mtod and dtom
  720. * will work for a structure of size len). Returns the resulting
  721. * mbuf chain on success, frees it and returns null on failure.
  722. * If there is room, it will add up to max_protohdr-len extra bytes to the
  723. * contiguous region in an attempt to avoid being called next time.
  724. */
  725. struct mbuf *
  726. m_pullup(struct mbuf *n, int len)
  727. {
  728. struct mbuf *m;
  729. int count;
  730. int space;
  731. /*
  732. * If first mbuf has no cluster, and has room for len bytes
  733. * without shifting current data, pullup into it,
  734. * otherwise allocate a new mbuf to prepend to the chain.
  735. */
  736. if ((n->m_flags & M_EXT) == 0 &&
  737. n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
  738. if (n->m_len >= len)
  739. return (n);
  740. m = n;
  741. n = n->m_next;
  742. len -= m->m_len;
  743. } else {
  744. if (len > MHLEN)
  745. goto bad;
  746. MGET(m, M_NOWAIT, n->m_type);
  747. if (m == NULL)
  748. goto bad;
  749. m->m_len = 0;
  750. if (n->m_flags & M_PKTHDR)
  751. M_MOVE_PKTHDR(m, n);
  752. }
  753. space = (int)(&m->m_dat[MLEN] - (m->m_data + m->m_len));
  754. do {
  755. count = min(min(max(len, max_protohdr), space), n->m_len);
  756. memcpy(mtod(m, caddr_t) + m->m_len,mtod(n, caddr_t), (u_int)count);
  757. len -= count;
  758. m->m_len += count;
  759. n->m_len -= count;
  760. space -= count;
  761. if (n->m_len)
  762. n->m_data += count;
  763. else
  764. n = m_free(n);
  765. } while (len > 0 && n);
  766. if (len > 0) {
  767. (void) m_free(m);
  768. goto bad;
  769. }
  770. m->m_next = n;
  771. return (m);
  772. bad:
  773. m_freem(n);
  774. return (NULL);
  775. }
  776. static struct mbuf *
  777. m_dup1(struct mbuf *m, int off, int len, int wait)
  778. {
  779. struct mbuf *n = NULL;
  780. int copyhdr;
  781. if (len > MCLBYTES)
  782. return NULL;
  783. if (off == 0 && (m->m_flags & M_PKTHDR) != 0)
  784. copyhdr = 1;
  785. else
  786. copyhdr = 0;
  787. if (len >= MINCLSIZE) {
  788. if (copyhdr == 1) {
  789. m_clget(n, wait); /* TODO: include code for copying the header */
  790. m_dup_pkthdr(n, m, wait);
  791. } else
  792. m_clget(n, wait);
  793. } else {
  794. if (copyhdr == 1)
  795. n = m_gethdr(wait, m->m_type);
  796. else
  797. n = m_get(wait, m->m_type);
  798. }
  799. if (!n)
  800. return NULL; /* ENOBUFS */
  801. if (copyhdr && !m_dup_pkthdr(n, m, wait)) {
  802. m_free(n);
  803. return NULL;
  804. }
  805. m_copydata(m, off, len, mtod(n, caddr_t));
  806. n->m_len = len;
  807. return n;
  808. }
  809. /* Taken from sys/kern/uipc_mbuf2.c */
  810. struct mbuf *
  811. m_pulldown(struct mbuf *m, int off, int len, int *offp)
  812. {
  813. struct mbuf *n, *o;
  814. int hlen, tlen, olen;
  815. int writable;
  816. /* check invalid arguments. */
  817. KASSERT(m, ("m == NULL in m_pulldown()"));
  818. if (len > MCLBYTES) {
  819. m_freem(m);
  820. return NULL; /* impossible */
  821. }
  822. #ifdef PULLDOWN_DEBUG
  823. {
  824. struct mbuf *t;
  825. SCTP_DEBUG_USR(SCTP_DEBUG_USR, "before:");
  826. for (t = m; t; t = t->m_next)
  827. SCTP_DEBUG_USR(SCTP_DEBUG_USR, " %d", t->m_len);
  828. SCTP_DEBUG_USR(SCTP_DEBUG_USR, "\n");
  829. }
  830. #endif
  831. n = m;
  832. while (n != NULL && off > 0) {
  833. if (n->m_len > off)
  834. break;
  835. off -= n->m_len;
  836. n = n->m_next;
  837. }
  838. /* be sure to point non-empty mbuf */
  839. while (n != NULL && n->m_len == 0)
  840. n = n->m_next;
  841. if (!n) {
  842. m_freem(m);
  843. return NULL; /* mbuf chain too short */
  844. }
  845. writable = 0;
  846. if ((n->m_flags & M_EXT) == 0 ||
  847. (n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n)))
  848. writable = 1;
  849. /*
  850. * the target data is on <n, off>.
  851. * if we got enough data on the mbuf "n", we're done.
  852. */
  853. if ((off == 0 || offp) && len <= n->m_len - off && writable)
  854. goto ok;
  855. /*
  856. * when len <= n->m_len - off and off != 0, it is a special case.
  857. * len bytes from <n, off> sits in single mbuf, but the caller does
  858. * not like the starting position (off).
  859. * chop the current mbuf into two pieces, set off to 0.
  860. */
  861. if (len <= n->m_len - off) {
  862. o = m_dup1(n, off, n->m_len - off, M_NOWAIT);
  863. if (o == NULL) {
  864. m_freem(m);
  865. return NULL; /* ENOBUFS */
  866. }
  867. n->m_len = off;
  868. o->m_next = n->m_next;
  869. n->m_next = o;
  870. n = n->m_next;
  871. off = 0;
  872. goto ok;
  873. }
  874. /*
  875. * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
  876. * and construct contiguous mbuf with m_len == len.
  877. * note that hlen + tlen == len, and tlen > 0.
  878. */
  879. hlen = n->m_len - off;
  880. tlen = len - hlen;
  881. /*
  882. * ensure that we have enough trailing data on mbuf chain.
  883. * if not, we can do nothing about the chain.
  884. */
  885. olen = 0;
  886. for (o = n->m_next; o != NULL; o = o->m_next)
  887. olen += o->m_len;
  888. if (hlen + olen < len) {
  889. m_freem(m);
  890. return NULL; /* mbuf chain too short */
  891. }
  892. /*
  893. * easy cases first.
  894. * we need to use m_copydata() to get data from <n->m_next, 0>.
  895. */
  896. if ((off == 0 || offp) && (M_TRAILINGSPACE(n) >= tlen) && writable) {
  897. m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
  898. n->m_len += tlen;
  899. m_adj(n->m_next, tlen);
  900. goto ok;
  901. }
  902. if ((off == 0 || offp) && (M_LEADINGSPACE(n->m_next) >= hlen) && writable) {
  903. n->m_next->m_data -= hlen;
  904. n->m_next->m_len += hlen;
  905. memcpy( mtod(n->m_next, caddr_t), mtod(n, caddr_t) + off,hlen);
  906. n->m_len -= hlen;
  907. n = n->m_next;
  908. off = 0;
  909. goto ok;
  910. }
  911. /*
  912. * now, we need to do the hard way. don't m_copy as there's no room
  913. * on both end.
  914. */
  915. if (len > MLEN)
  916. m_clget(o, M_NOWAIT);
  917. /* o = m_getcl(M_NOWAIT, m->m_type, 0);*/
  918. else
  919. o = m_get(M_NOWAIT, m->m_type);
  920. if (!o) {
  921. m_freem(m);
  922. return NULL; /* ENOBUFS */
  923. }
  924. /* get hlen from <n, off> into <o, 0> */
  925. o->m_len = hlen;
  926. memcpy(mtod(o, caddr_t), mtod(n, caddr_t) + off, hlen);
  927. n->m_len -= hlen;
  928. /* get tlen from <n->m_next, 0> into <o, hlen> */
  929. m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
  930. o->m_len += tlen;
  931. m_adj(n->m_next, tlen);
  932. o->m_next = n->m_next;
  933. n->m_next = o;
  934. n = o;
  935. off = 0;
  936. ok:
  937. #ifdef PULLDOWN_DEBUG
  938. {
  939. struct mbuf *t;
  940. SCTP_DEBUG_USR(SCTP_DEBUG_USR, "after:");
  941. for (t = m; t; t = t->m_next)
  942. SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%c%d", t == n ? '*' : ' ', t->m_len);
  943. SCTP_DEBUG_USR(SCTP_DEBUG_USR, " (off=%d)\n", off);
  944. }
  945. #endif
  946. if (offp)
  947. *offp = off;
  948. return n;
  949. }
  950. /*
  951. * Attach the the cluster from *m to *n, set up m_ext in *n
  952. * and bump the refcount of the cluster.
  953. */
  954. static void
  955. mb_dupcl(struct mbuf *n, struct mbuf *m)
  956. {
  957. KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
  958. KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
  959. KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
  960. if (*(m->m_ext.ref_cnt) == 1)
  961. *(m->m_ext.ref_cnt) += 1;
  962. else
  963. atomic_add_int(m->m_ext.ref_cnt, 1);
  964. n->m_ext.ext_buf = m->m_ext.ext_buf;
  965. n->m_ext.ext_free = m->m_ext.ext_free;
  966. n->m_ext.ext_args = m->m_ext.ext_args;
  967. n->m_ext.ext_size = m->m_ext.ext_size;
  968. n->m_ext.ref_cnt = m->m_ext.ref_cnt;
  969. n->m_ext.ext_type = m->m_ext.ext_type;
  970. n->m_flags |= M_EXT;
  971. }
  972. /*
  973. * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
  974. * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
  975. * The wait parameter is a choice of M_TRYWAIT/M_NOWAIT from caller.
  976. * Note that the copy is read-only, because clusters are not copied,
  977. * only their reference counts are incremented.
  978. */
  979. struct mbuf *
  980. m_copym(struct mbuf *m, int off0, int len, int wait)
  981. {
  982. struct mbuf *n, **np;
  983. int off = off0;
  984. struct mbuf *top;
  985. int copyhdr = 0;
  986. KASSERT(off >= 0, ("m_copym, negative off %d", off));
  987. KASSERT(len >= 0, ("m_copym, negative len %d", len));
  988. KASSERT(m != NULL, ("m_copym, m is NULL"));
  989. #if !defined(INVARIANTS)
  990. if (m == NULL) {
  991. return (NULL);
  992. }
  993. #endif
  994. if (off == 0 && m->m_flags & M_PKTHDR)
  995. copyhdr = 1;
  996. while (off > 0) {
  997. KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
  998. if (off < m->m_len)
  999. break;
  1000. off -= m->m_len;
  1001. m = m->m_next;
  1002. }
  1003. np = &top;
  1004. top = 0;
  1005. while (len > 0) {
  1006. if (m == NULL) {
  1007. KASSERT(len == M_COPYALL, ("m_copym, length > size of mbuf chain"));
  1008. break;
  1009. }
  1010. if (copyhdr)
  1011. MGETHDR(n, wait, m->m_type);
  1012. else
  1013. MGET(n, wait, m->m_type);
  1014. *np = n;
  1015. if (n == NULL)
  1016. goto nospace;
  1017. if (copyhdr) {
  1018. if (!m_dup_pkthdr(n, m, wait))
  1019. goto nospace;
  1020. if (len == M_COPYALL)
  1021. n->m_pkthdr.len -= off0;
  1022. else
  1023. n->m_pkthdr.len = len;
  1024. copyhdr = 0;
  1025. }
  1026. n->m_len = min(len, m->m_len - off);
  1027. if (m->m_flags & M_EXT) {
  1028. n->m_data = m->m_data + off;
  1029. mb_dupcl(n, m);
  1030. } else
  1031. memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, (u_int)n->m_len);
  1032. if (len != M_COPYALL)
  1033. len -= n->m_len;
  1034. off = 0;
  1035. m = m->m_next;
  1036. np = &n->m_next;
  1037. }
  1038. return (top);
  1039. nospace:
  1040. m_freem(top);
  1041. return (NULL);
  1042. }
  1043. int
  1044. m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
  1045. {
  1046. struct m_tag *p, *t, *tprev = NULL;
  1047. KASSERT(to && from, ("m_tag_copy_chain: null argument, to %p from %p", (void *)to, (void *)from));
  1048. m_tag_delete_chain(to, NULL);
  1049. SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
  1050. t = m_tag_copy(p, how);
  1051. if (t == NULL) {
  1052. m_tag_delete_chain(to, NULL);
  1053. return 0;
  1054. }
  1055. if (tprev == NULL)
  1056. SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
  1057. else
  1058. SLIST_INSERT_AFTER(tprev, t, m_tag_link);
  1059. tprev = t;
  1060. }
  1061. return 1;
  1062. }
  1063. /*
  1064. * Duplicate "from"'s mbuf pkthdr in "to".
  1065. * "from" must have M_PKTHDR set, and "to" must be empty.
  1066. * In particular, this does a deep copy of the packet tags.
  1067. */
  1068. int
  1069. m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
  1070. {
  1071. KASSERT(to, ("m_dup_pkthdr: to is NULL"));
  1072. KASSERT(from, ("m_dup_pkthdr: from is NULL"));
  1073. to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
  1074. if ((to->m_flags & M_EXT) == 0)
  1075. to->m_data = to->m_pktdat;
  1076. to->m_pkthdr = from->m_pkthdr;
  1077. SLIST_INIT(&to->m_pkthdr.tags);
  1078. return (m_tag_copy_chain(to, from, MBTOM(how)));
  1079. }
  1080. /* Copy a single tag. */
  1081. struct m_tag *
  1082. m_tag_copy(struct m_tag *t, int how)
  1083. {
  1084. struct m_tag *p;
  1085. KASSERT(t, ("m_tag_copy: null tag"));
  1086. p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how);
  1087. if (p == NULL)
  1088. return (NULL);
  1089. memcpy(p + 1, t + 1, t->m_tag_len); /* Copy the data */
  1090. return p;
  1091. }
  1092. /* Get a packet tag structure along with specified data following. */
  1093. struct m_tag *
  1094. m_tag_alloc(uint32_t cookie, int type, int len, int wait)
  1095. {
  1096. struct m_tag *t;
  1097. if (len < 0)
  1098. return NULL;
  1099. t = malloc(len + sizeof(struct m_tag));
  1100. if (t == NULL)
  1101. return NULL;
  1102. m_tag_setup(t, cookie, type, len);
  1103. t->m_tag_free = m_tag_free_default;
  1104. return t;
  1105. }
  1106. /* Free a packet tag. */
  1107. void
  1108. m_tag_free_default(struct m_tag *t)
  1109. {
  1110. free(t);
  1111. }
  1112. /*
  1113. * Copy data from a buffer back into the indicated mbuf chain,
  1114. * starting "off" bytes from the beginning, extending the mbuf
  1115. * chain if necessary.
  1116. */
  1117. void
  1118. m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
  1119. {
  1120. int mlen;
  1121. struct mbuf *m = m0, *n;
  1122. int totlen = 0;
  1123. if (m0 == NULL)
  1124. return;
  1125. while (off > (mlen = m->m_len)) {
  1126. off -= mlen;
  1127. totlen += mlen;
  1128. if (m->m_next == NULL) {
  1129. n = m_get(M_NOWAIT, m->m_type);
  1130. if (n == NULL)
  1131. goto out;
  1132. memset(mtod(n, caddr_t), 0, MLEN);
  1133. n->m_len = min(MLEN, len + off);
  1134. m->m_next = n;
  1135. }
  1136. m = m->m_next;
  1137. }
  1138. while (len > 0) {
  1139. mlen = min (m->m_len - off, len);
  1140. memcpy(off + mtod(m, caddr_t), cp, (u_int)mlen);
  1141. cp += mlen;
  1142. len -= mlen;
  1143. mlen += off;
  1144. off = 0;
  1145. totlen += mlen;
  1146. if (len == 0)
  1147. break;
  1148. if (m->m_next == NULL) {
  1149. n = m_get(M_NOWAIT, m->m_type);
  1150. if (n == NULL)
  1151. break;
  1152. n->m_len = min(MLEN, len);
  1153. m->m_next = n;
  1154. }
  1155. m = m->m_next;
  1156. }
  1157. out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
  1158. m->m_pkthdr.len = totlen;
  1159. }
  1160. /*
  1161. * Apply function f to the data in an mbuf chain starting "off" bytes from
  1162. * the beginning, continuing for "len" bytes.
  1163. */
  1164. int
  1165. m_apply(struct mbuf *m, int off, int len,
  1166. int (*f)(void *, void *, u_int), void *arg)
  1167. {
  1168. u_int count;
  1169. int rval;
  1170. KASSERT(off >= 0, ("m_apply, negative off %d", off));
  1171. KASSERT(len >= 0, ("m_apply, negative len %d", len));
  1172. while (off > 0) {
  1173. KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
  1174. if (off < m->m_len)
  1175. break;
  1176. off -= m->m_len;
  1177. m = m->m_next;
  1178. }
  1179. while (len > 0) {
  1180. KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
  1181. count = min(m->m_len - off, len);
  1182. rval = (*f)(arg, mtod(m, caddr_t) + off, count);
  1183. if (rval)
  1184. return (rval);
  1185. len -= count;
  1186. off = 0;
  1187. m = m->m_next;
  1188. }
  1189. return (0);
  1190. }
  1191. /*
  1192. * Lesser-used path for M_PREPEND:
  1193. * allocate new mbuf to prepend to chain,
  1194. * copy junk along.
  1195. */
  1196. struct mbuf *
  1197. m_prepend(struct mbuf *m, int len, int how)
  1198. {
  1199. struct mbuf *mn;
  1200. if (m->m_flags & M_PKTHDR)
  1201. MGETHDR(mn, how, m->m_type);
  1202. else
  1203. MGET(mn, how, m->m_type);
  1204. if (mn == NULL) {
  1205. m_freem(m);
  1206. return (NULL);
  1207. }
  1208. if (m->m_flags & M_PKTHDR)
  1209. M_MOVE_PKTHDR(mn, m);
  1210. mn->m_next = m;
  1211. m = mn;
  1212. if (m->m_flags & M_PKTHDR) {
  1213. if (len < MHLEN)
  1214. MH_ALIGN(m, len);
  1215. } else {
  1216. if (len < MLEN)
  1217. M_ALIGN(m, len);
  1218. }
  1219. m->m_len = len;
  1220. return (m);
  1221. }
  1222. /*
  1223. * Copy data from an mbuf chain starting "off" bytes from the beginning,
  1224. * continuing for "len" bytes, into the indicated buffer.
  1225. */
  1226. void
  1227. m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
  1228. {
  1229. u_int count;
  1230. KASSERT(off >= 0, ("m_copydata, negative off %d", off));
  1231. KASSERT(len >= 0, ("m_copydata, negative len %d", len));
  1232. while (off > 0) {
  1233. KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
  1234. if (off < m->m_len)
  1235. break;
  1236. off -= m->m_len;
  1237. m = m->m_next;
  1238. }
  1239. while (len > 0) {
  1240. KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
  1241. count = min(m->m_len - off, len);
  1242. memcpy(cp, mtod(m, caddr_t) + off, count);
  1243. len -= count;
  1244. cp += count;
  1245. off = 0;
  1246. m = m->m_next;
  1247. }
  1248. }
  1249. /*
  1250. * Concatenate mbuf chain n to m.
  1251. * Both chains must be of the same type (e.g. MT_DATA).
  1252. * Any m_pkthdr is not updated.
  1253. */
  1254. void
  1255. m_cat(struct mbuf *m, struct mbuf *n)
  1256. {
  1257. while (m->m_next)
  1258. m = m->m_next;
  1259. while (n) {
  1260. if (m->m_flags & M_EXT ||
  1261. m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
  1262. /* just join the two chains */
  1263. m->m_next = n;
  1264. return;
  1265. }
  1266. /* splat the data from one into the other */
  1267. memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), (u_int)n->m_len);
  1268. m->m_len += n->m_len;
  1269. n = m_free(n);
  1270. }
  1271. }
  1272. void
  1273. m_adj(struct mbuf *mp, int req_len)
  1274. {
  1275. int len = req_len;
  1276. struct mbuf *m;
  1277. int count;
  1278. if ((m = mp) == NULL)
  1279. return;
  1280. if (len >= 0) {
  1281. /*
  1282. * Trim from head.
  1283. */
  1284. while (m != NULL && len > 0) {
  1285. if (m->m_len <= len) {
  1286. len -= m->m_len;
  1287. m->m_len = 0;
  1288. m = m->m_next;
  1289. } else {
  1290. m->m_len -= len;
  1291. m->m_data += len;
  1292. len = 0;
  1293. }
  1294. }
  1295. m = mp;
  1296. if (mp->m_flags & M_PKTHDR)
  1297. m->m_pkthdr.len -= (req_len - len);
  1298. } else {
  1299. /*
  1300. * Trim from tail. Scan the mbuf chain,
  1301. * calculating its length and finding the last mbuf.
  1302. * If the adjustment only affects this mbuf, then just
  1303. * adjust and return. Otherwise, rescan and truncate
  1304. * after the remaining size.
  1305. */
  1306. len = -len;
  1307. count = 0;
  1308. for (;;) {
  1309. count += m->m_len;
  1310. if (m->m_next == (struct mbuf *)0)
  1311. break;
  1312. m = m->m_next;
  1313. }
  1314. if (m->m_len >= len) {
  1315. m->m_len -= len;
  1316. if (mp->m_flags & M_PKTHDR)
  1317. mp->m_pkthdr.len -= len;
  1318. return;
  1319. }
  1320. count -= len;
  1321. if (count < 0)
  1322. count = 0;
  1323. /*
  1324. * Correct length for chain is "count".
  1325. * Find the mbuf with last data, adjust its length,
  1326. * and toss data from remaining mbufs on chain.
  1327. */
  1328. m = mp;
  1329. if (m->m_flags & M_PKTHDR)
  1330. m->m_pkthdr.len = count;
  1331. for (; m; m = m->m_next) {
  1332. if (m->m_len >= count) {
  1333. m->m_len = count;
  1334. if (m->m_next != NULL) {
  1335. m_freem(m->m_next);
  1336. m->m_next = NULL;
  1337. }
  1338. break;
  1339. }
  1340. count -= m->m_len;
  1341. }
  1342. }
  1343. }
  1344. /* m_split is used within sctp_handle_cookie_echo. */
  1345. /*
  1346. * Partition an mbuf chain in two pieces, returning the tail --
  1347. * all but the first len0 bytes. In case of failure, it returns NULL and
  1348. * attempts to restore the chain to its original state.
  1349. *
  1350. * Note that the resulting mbufs might be read-only, because the new
  1351. * mbuf can end up sharing an mbuf cluster with the original mbuf if
  1352. * the "breaking point" happens to lie within a cluster mbuf. Use the
  1353. * M_WRITABLE() macro to check for this case.
  1354. */
  1355. struct mbuf *
  1356. m_split(struct mbuf *m0, int len0, int wait)
  1357. {
  1358. struct mbuf *m, *n;
  1359. u_int len = len0, remain;
  1360. /* MBUF_CHECKSLEEP(wait); */
  1361. for (m = m0; m && (int)len > m->m_len; m = m->m_next)
  1362. len -= m->m_len;
  1363. if (m == NULL)
  1364. return (NULL);
  1365. remain = m->m_len - len;
  1366. if (m0->m_flags & M_PKTHDR) {
  1367. MGETHDR(n, wait, m0->m_type);
  1368. if (n == NULL)
  1369. return (NULL);
  1370. n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
  1371. n->m_pkthdr.len = m0->m_pkthdr.len - len0;
  1372. m0->m_pkthdr.len = len0;
  1373. if (m->m_flags & M_EXT)
  1374. goto extpacket;
  1375. if (remain > MHLEN) {
  1376. /* m can't be the lead packet */
  1377. MH_ALIGN(n, 0);
  1378. n->m_next = m_split(m, len, wait);
  1379. if (n->m_next == NULL) {
  1380. (void) m_free(n);
  1381. return (NULL);
  1382. } else {
  1383. n->m_len = 0;
  1384. return (n);
  1385. }
  1386. } else
  1387. MH_ALIGN(n, remain);
  1388. } else if (remain == 0) {
  1389. n = m->m_next;
  1390. m->m_next = NULL;
  1391. return (n);
  1392. } else {
  1393. MGET(n, wait, m->m_type);
  1394. if (n == NULL)
  1395. return (NULL);
  1396. M_ALIGN(n, remain);
  1397. }
  1398. extpacket:
  1399. if (m->m_flags & M_EXT) {
  1400. n->m_data = m->m_data + len;
  1401. mb_dupcl(n, m);
  1402. } else {
  1403. memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain);
  1404. }
  1405. n->m_len = remain;
  1406. m->m_len = len;
  1407. n->m_next = m->m_next;
  1408. m->m_next = NULL;
  1409. return (n);
  1410. }
  1411. int
  1412. pack_send_buffer(caddr_t buffer, struct mbuf* mb){
  1413. int count_to_copy;
  1414. int total_count_copied = 0;
  1415. int offset = 0;
  1416. do {
  1417. count_to_copy = mb->m_len;
  1418. memcpy(buffer+offset, mtod(mb, caddr_t), count_to_copy);
  1419. offset += count_to_copy;
  1420. total_count_copied += count_to_copy;
  1421. mb = mb->m_next;
  1422. } while(mb);
  1423. return (total_count_copied);
  1424. }