sctp_indata.c 183 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798
  1. /*-
  2. * SPDX-License-Identifier: BSD-3-Clause
  3. *
  4. * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
  5. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  6. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * a) Redistributions of source code must retain the above copyright notice,
  12. * this list of conditions and the following disclaimer.
  13. *
  14. * b) Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in
  16. * the documentation and/or other materials provided with the distribution.
  17. *
  18. * c) Neither the name of Cisco Systems, Inc. nor the names of its
  19. * contributors may be used to endorse or promote products derived
  20. * from this software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  23. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  24. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  32. * THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #if defined(__FreeBSD__) && !defined(__Userspace__)
  35. #include <sys/cdefs.h>
  36. __FBSDID("$FreeBSD$");
  37. #endif
  38. #include <netinet/sctp_os.h>
  39. #if defined(__FreeBSD__) && !defined(__Userspace__)
  40. #include <sys/proc.h>
  41. #endif
  42. #include <netinet/sctp_var.h>
  43. #include <netinet/sctp_sysctl.h>
  44. #include <netinet/sctp_header.h>
  45. #include <netinet/sctp_pcb.h>
  46. #include <netinet/sctputil.h>
  47. #include <netinet/sctp_output.h>
  48. #include <netinet/sctp_uio.h>
  49. #include <netinet/sctp_auth.h>
  50. #include <netinet/sctp_timer.h>
  51. #include <netinet/sctp_asconf.h>
  52. #include <netinet/sctp_indata.h>
  53. #include <netinet/sctp_bsd_addr.h>
  54. #include <netinet/sctp_input.h>
  55. #include <netinet/sctp_crc32.h>
  56. #if defined(__FreeBSD__) && !defined(__Userspace__)
  57. #include <netinet/sctp_lock_bsd.h>
  58. #endif
  59. /*
  60. * NOTES: On the outbound side of things I need to check the sack timer to
  61. * see if I should generate a sack into the chunk queue (if I have data to
  62. * send that is and will be sending it .. for bundling.
  63. *
  64. * The callback in sctp_usrreq.c will get called when the socket is read from.
  65. * This will cause sctp_service_queues() to get called on the top entry in
  66. * the list.
  67. */
  68. static uint32_t
  69. sctp_add_chk_to_control(struct sctp_queued_to_read *control,
  70. struct sctp_stream_in *strm,
  71. struct sctp_tcb *stcb,
  72. struct sctp_association *asoc,
  73. struct sctp_tmit_chunk *chk, int hold_rlock);
  74. void
  75. sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
  76. {
  77. asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
  78. }
  79. /* Calculate what the rwnd would be */
  80. uint32_t
  81. sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
  82. {
  83. uint32_t calc = 0;
  84. /*
  85. * This is really set wrong with respect to a 1-2-m socket. Since
  86. * the sb_cc is the count that everyone as put up. When we re-write
  87. * sctp_soreceive then we will fix this so that ONLY this
  88. * associations data is taken into account.
  89. */
  90. if (stcb->sctp_socket == NULL) {
  91. return (calc);
  92. }
  93. KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
  94. ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
  95. KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
  96. ("size_on_all_streams is %u", asoc->size_on_all_streams));
  97. if (stcb->asoc.sb_cc == 0 &&
  98. asoc->cnt_on_reasm_queue == 0 &&
  99. asoc->cnt_on_all_streams == 0) {
  100. /* Full rwnd granted */
  101. calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
  102. return (calc);
  103. }
  104. /* get actual space */
  105. calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
  106. /*
  107. * take out what has NOT been put on socket queue and we yet hold
  108. * for putting up.
  109. */
  110. calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
  111. asoc->cnt_on_reasm_queue * MSIZE));
  112. calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
  113. asoc->cnt_on_all_streams * MSIZE));
  114. if (calc == 0) {
  115. /* out of space */
  116. return (calc);
  117. }
  118. /* what is the overhead of all these rwnd's */
  119. calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
  120. /* If the window gets too small due to ctrl-stuff, reduce it
  121. * to 1, even it is 0. SWS engaged
  122. */
  123. if (calc < stcb->asoc.my_rwnd_control_len) {
  124. calc = 1;
  125. }
  126. return (calc);
  127. }
  128. /*
  129. * Build out our readq entry based on the incoming packet.
  130. */
  131. struct sctp_queued_to_read *
  132. sctp_build_readq_entry(struct sctp_tcb *stcb,
  133. struct sctp_nets *net,
  134. uint32_t tsn, uint32_t ppid,
  135. uint32_t context, uint16_t sid,
  136. uint32_t mid, uint8_t flags,
  137. struct mbuf *dm)
  138. {
  139. struct sctp_queued_to_read *read_queue_e = NULL;
  140. sctp_alloc_a_readq(stcb, read_queue_e);
  141. if (read_queue_e == NULL) {
  142. goto failed_build;
  143. }
  144. memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
  145. read_queue_e->sinfo_stream = sid;
  146. read_queue_e->sinfo_flags = (flags << 8);
  147. read_queue_e->sinfo_ppid = ppid;
  148. read_queue_e->sinfo_context = context;
  149. read_queue_e->sinfo_tsn = tsn;
  150. read_queue_e->sinfo_cumtsn = tsn;
  151. read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
  152. read_queue_e->mid = mid;
  153. read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
  154. TAILQ_INIT(&read_queue_e->reasm);
  155. read_queue_e->whoFrom = net;
  156. atomic_add_int(&net->ref_count, 1);
  157. read_queue_e->data = dm;
  158. read_queue_e->stcb = stcb;
  159. read_queue_e->port_from = stcb->rport;
  160. if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
  161. read_queue_e->do_not_ref_stcb = 1;
  162. }
  163. failed_build:
  164. return (read_queue_e);
  165. }
  166. struct mbuf *
  167. sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
  168. {
  169. struct sctp_extrcvinfo *seinfo;
  170. struct sctp_sndrcvinfo *outinfo;
  171. struct sctp_rcvinfo *rcvinfo;
  172. struct sctp_nxtinfo *nxtinfo;
  173. #if defined(_WIN32)
  174. WSACMSGHDR *cmh;
  175. #else
  176. struct cmsghdr *cmh;
  177. #endif
  178. struct mbuf *ret;
  179. int len;
  180. int use_extended;
  181. int provide_nxt;
  182. if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
  183. sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
  184. sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
  185. /* user does not want any ancillary data */
  186. return (NULL);
  187. }
  188. len = 0;
  189. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
  190. len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
  191. }
  192. seinfo = (struct sctp_extrcvinfo *)sinfo;
  193. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
  194. (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
  195. provide_nxt = 1;
  196. len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
  197. } else {
  198. provide_nxt = 0;
  199. }
  200. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
  201. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
  202. use_extended = 1;
  203. len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
  204. } else {
  205. use_extended = 0;
  206. len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
  207. }
  208. } else {
  209. use_extended = 0;
  210. }
  211. ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
  212. if (ret == NULL) {
  213. /* No space */
  214. return (ret);
  215. }
  216. SCTP_BUF_LEN(ret) = 0;
  217. /* We need a CMSG header followed by the struct */
  218. #if defined(_WIN32)
  219. cmh = mtod(ret, WSACMSGHDR *);
  220. #else
  221. cmh = mtod(ret, struct cmsghdr *);
  222. #endif
  223. /*
  224. * Make sure that there is no un-initialized padding between
  225. * the cmsg header and cmsg data and after the cmsg data.
  226. */
  227. memset(cmh, 0, len);
  228. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
  229. cmh->cmsg_level = IPPROTO_SCTP;
  230. cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
  231. cmh->cmsg_type = SCTP_RCVINFO;
  232. rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
  233. rcvinfo->rcv_sid = sinfo->sinfo_stream;
  234. rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
  235. rcvinfo->rcv_flags = sinfo->sinfo_flags;
  236. rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
  237. rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
  238. rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
  239. rcvinfo->rcv_context = sinfo->sinfo_context;
  240. rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
  241. #if defined(_WIN32)
  242. cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
  243. #else
  244. cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
  245. #endif
  246. SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
  247. }
  248. if (provide_nxt) {
  249. cmh->cmsg_level = IPPROTO_SCTP;
  250. cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
  251. cmh->cmsg_type = SCTP_NXTINFO;
  252. nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
  253. nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
  254. nxtinfo->nxt_flags = 0;
  255. if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
  256. nxtinfo->nxt_flags |= SCTP_UNORDERED;
  257. }
  258. if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
  259. nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
  260. }
  261. if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
  262. nxtinfo->nxt_flags |= SCTP_COMPLETE;
  263. }
  264. nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
  265. nxtinfo->nxt_length = seinfo->serinfo_next_length;
  266. nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
  267. #if defined(_WIN32)
  268. cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
  269. #else
  270. cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
  271. #endif
  272. SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
  273. }
  274. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
  275. cmh->cmsg_level = IPPROTO_SCTP;
  276. outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
  277. if (use_extended) {
  278. cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
  279. cmh->cmsg_type = SCTP_EXTRCV;
  280. memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
  281. SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
  282. } else {
  283. cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
  284. cmh->cmsg_type = SCTP_SNDRCV;
  285. *outinfo = *sinfo;
  286. SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
  287. }
  288. }
  289. return (ret);
  290. }
  291. static void
  292. sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
  293. {
  294. uint32_t gap, i;
  295. int in_r, in_nr;
  296. if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
  297. return;
  298. }
  299. if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
  300. /*
  301. * This tsn is behind the cum ack and thus we don't
  302. * need to worry about it being moved from one to the other.
  303. */
  304. return;
  305. }
  306. SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
  307. in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
  308. in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
  309. KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__));
  310. if (!in_nr) {
  311. SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
  312. if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
  313. asoc->highest_tsn_inside_nr_map = tsn;
  314. }
  315. }
  316. if (in_r) {
  317. SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
  318. if (tsn == asoc->highest_tsn_inside_map) {
  319. /* We must back down to see what the new highest is. */
  320. for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
  321. SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
  322. if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
  323. asoc->highest_tsn_inside_map = i;
  324. break;
  325. }
  326. }
  327. if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) {
  328. asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
  329. }
  330. }
  331. }
  332. }
  333. static int
  334. sctp_place_control_in_stream(struct sctp_stream_in *strm,
  335. struct sctp_association *asoc,
  336. struct sctp_queued_to_read *control)
  337. {
  338. struct sctp_queued_to_read *at;
  339. struct sctp_readhead *q;
  340. uint8_t flags, unordered;
  341. flags = (control->sinfo_flags >> 8);
  342. unordered = flags & SCTP_DATA_UNORDERED;
  343. if (unordered) {
  344. q = &strm->uno_inqueue;
  345. if (asoc->idata_supported == 0) {
  346. if (!TAILQ_EMPTY(q)) {
  347. /* Only one stream can be here in old style -- abort */
  348. return (-1);
  349. }
  350. TAILQ_INSERT_TAIL(q, control, next_instrm);
  351. control->on_strm_q = SCTP_ON_UNORDERED;
  352. return (0);
  353. }
  354. } else {
  355. q = &strm->inqueue;
  356. }
  357. if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
  358. control->end_added = 1;
  359. control->first_frag_seen = 1;
  360. control->last_frag_seen = 1;
  361. }
  362. if (TAILQ_EMPTY(q)) {
  363. /* Empty queue */
  364. TAILQ_INSERT_HEAD(q, control, next_instrm);
  365. if (unordered) {
  366. control->on_strm_q = SCTP_ON_UNORDERED;
  367. } else {
  368. control->on_strm_q = SCTP_ON_ORDERED;
  369. }
  370. return (0);
  371. } else {
  372. TAILQ_FOREACH(at, q, next_instrm) {
  373. if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
  374. /*
  375. * one in queue is bigger than the
  376. * new one, insert before this one
  377. */
  378. TAILQ_INSERT_BEFORE(at, control, next_instrm);
  379. if (unordered) {
  380. control->on_strm_q = SCTP_ON_UNORDERED;
  381. } else {
  382. control->on_strm_q = SCTP_ON_ORDERED;
  383. }
  384. break;
  385. } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
  386. /*
  387. * Gak, He sent me a duplicate msg
  388. * id number?? return -1 to abort.
  389. */
  390. return (-1);
  391. } else {
  392. if (TAILQ_NEXT(at, next_instrm) == NULL) {
  393. /*
  394. * We are at the end, insert
  395. * it after this one
  396. */
  397. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
  398. sctp_log_strm_del(control, at,
  399. SCTP_STR_LOG_FROM_INSERT_TL);
  400. }
  401. TAILQ_INSERT_AFTER(q, at, control, next_instrm);
  402. if (unordered) {
  403. control->on_strm_q = SCTP_ON_UNORDERED;
  404. } else {
  405. control->on_strm_q = SCTP_ON_ORDERED;
  406. }
  407. break;
  408. }
  409. }
  410. }
  411. }
  412. return (0);
  413. }
  414. static void
  415. sctp_abort_in_reasm(struct sctp_tcb *stcb,
  416. struct sctp_queued_to_read *control,
  417. struct sctp_tmit_chunk *chk,
  418. int *abort_flag, int opspot)
  419. {
  420. char msg[SCTP_DIAG_INFO_LEN];
  421. struct mbuf *oper;
  422. if (stcb->asoc.idata_supported) {
  423. SCTP_SNPRINTF(msg, sizeof(msg),
  424. "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
  425. opspot,
  426. control->fsn_included,
  427. chk->rec.data.tsn,
  428. chk->rec.data.sid,
  429. chk->rec.data.fsn, chk->rec.data.mid);
  430. } else {
  431. SCTP_SNPRINTF(msg, sizeof(msg),
  432. "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
  433. opspot,
  434. control->fsn_included,
  435. chk->rec.data.tsn,
  436. chk->rec.data.sid,
  437. chk->rec.data.fsn,
  438. (uint16_t)chk->rec.data.mid);
  439. }
  440. oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  441. sctp_m_freem(chk->data);
  442. chk->data = NULL;
  443. sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
  444. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
  445. sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED);
  446. *abort_flag = 1;
  447. }
  448. static void
  449. sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
  450. {
  451. /*
  452. * The control could not be placed and must be cleaned.
  453. */
  454. struct sctp_tmit_chunk *chk, *nchk;
  455. TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
  456. TAILQ_REMOVE(&control->reasm, chk, sctp_next);
  457. if (chk->data)
  458. sctp_m_freem(chk->data);
  459. chk->data = NULL;
  460. sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
  461. }
  462. sctp_free_remote_addr(control->whoFrom);
  463. if (control->data) {
  464. sctp_m_freem(control->data);
  465. control->data = NULL;
  466. }
  467. sctp_free_a_readq(stcb, control);
  468. }
  469. /*
  470. * Queue the chunk either right into the socket buffer if it is the next one
  471. * to go OR put it in the correct place in the delivery queue. If we do
  472. * append to the so_buf, keep doing so until we are out of order as
  473. * long as the control's entered are non-fragmented.
  474. */
  475. static void
  476. sctp_queue_data_to_stream(struct sctp_tcb *stcb,
  477. struct sctp_association *asoc,
  478. struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
  479. {
  480. /*
  481. * FIX-ME maybe? What happens when the ssn wraps? If we are getting
  482. * all the data in one stream this could happen quite rapidly. One
  483. * could use the TSN to keep track of things, but this scheme breaks
  484. * down in the other type of stream usage that could occur. Send a
  485. * single msg to stream 0, send 4Billion messages to stream 1, now
  486. * send a message to stream 0. You have a situation where the TSN
  487. * has wrapped but not in the stream. Is this worth worrying about
  488. * or should we just change our queue sort at the bottom to be by
  489. * TSN.
  490. *
  491. * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
  492. * with TSN 1? If the peer is doing some sort of funky TSN/SSN
  493. * assignment this could happen... and I don't see how this would be
  494. * a violation. So for now I am undecided an will leave the sort by
  495. * SSN alone. Maybe a hybrid approach is the answer
  496. *
  497. */
  498. struct sctp_queued_to_read *at;
  499. int queue_needed;
  500. uint32_t nxt_todel;
  501. struct mbuf *op_err;
  502. struct sctp_stream_in *strm;
  503. char msg[SCTP_DIAG_INFO_LEN];
  504. strm = &asoc->strmin[control->sinfo_stream];
  505. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
  506. sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
  507. }
  508. if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
  509. /* The incoming sseq is behind where we last delivered? */
  510. SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
  511. strm->last_mid_delivered, control->mid);
  512. /*
  513. * throw it in the stream so it gets cleaned up in
  514. * association destruction
  515. */
  516. TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
  517. if (asoc->idata_supported) {
  518. SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
  519. strm->last_mid_delivered, control->sinfo_tsn,
  520. control->sinfo_stream, control->mid);
  521. } else {
  522. SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
  523. (uint16_t)strm->last_mid_delivered,
  524. control->sinfo_tsn,
  525. control->sinfo_stream,
  526. (uint16_t)control->mid);
  527. }
  528. op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  529. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
  530. sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  531. *abort_flag = 1;
  532. return;
  533. }
  534. queue_needed = 1;
  535. asoc->size_on_all_streams += control->length;
  536. sctp_ucount_incr(asoc->cnt_on_all_streams);
  537. nxt_todel = strm->last_mid_delivered + 1;
  538. if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
  539. #if defined(__APPLE__) && !defined(__Userspace__)
  540. struct socket *so;
  541. so = SCTP_INP_SO(stcb->sctp_ep);
  542. atomic_add_int(&stcb->asoc.refcnt, 1);
  543. SCTP_TCB_UNLOCK(stcb);
  544. SCTP_SOCKET_LOCK(so, 1);
  545. SCTP_TCB_LOCK(stcb);
  546. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  547. if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  548. SCTP_SOCKET_UNLOCK(so, 1);
  549. return;
  550. }
  551. #endif
  552. /* can be delivered right away? */
  553. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
  554. sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
  555. }
  556. /* EY it wont be queued if it could be delivered directly */
  557. queue_needed = 0;
  558. if (asoc->size_on_all_streams >= control->length) {
  559. asoc->size_on_all_streams -= control->length;
  560. } else {
  561. #ifdef INVARIANTS
  562. panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
  563. #else
  564. asoc->size_on_all_streams = 0;
  565. #endif
  566. }
  567. sctp_ucount_decr(asoc->cnt_on_all_streams);
  568. strm->last_mid_delivered++;
  569. sctp_mark_non_revokable(asoc, control->sinfo_tsn);
  570. sctp_add_to_readq(stcb->sctp_ep, stcb,
  571. control,
  572. &stcb->sctp_socket->so_rcv, 1,
  573. SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
  574. TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
  575. /* all delivered */
  576. nxt_todel = strm->last_mid_delivered + 1;
  577. if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
  578. (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
  579. if (control->on_strm_q == SCTP_ON_ORDERED) {
  580. TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
  581. if (asoc->size_on_all_streams >= control->length) {
  582. asoc->size_on_all_streams -= control->length;
  583. } else {
  584. #ifdef INVARIANTS
  585. panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
  586. #else
  587. asoc->size_on_all_streams = 0;
  588. #endif
  589. }
  590. sctp_ucount_decr(asoc->cnt_on_all_streams);
  591. #ifdef INVARIANTS
  592. } else {
  593. panic("Huh control: %p is on_strm_q: %d",
  594. control, control->on_strm_q);
  595. #endif
  596. }
  597. control->on_strm_q = 0;
  598. strm->last_mid_delivered++;
  599. /*
  600. * We ignore the return of deliver_data here
  601. * since we always can hold the chunk on the
  602. * d-queue. And we have a finite number that
  603. * can be delivered from the strq.
  604. */
  605. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
  606. sctp_log_strm_del(control, NULL,
  607. SCTP_STR_LOG_FROM_IMMED_DEL);
  608. }
  609. sctp_mark_non_revokable(asoc, control->sinfo_tsn);
  610. sctp_add_to_readq(stcb->sctp_ep, stcb,
  611. control,
  612. &stcb->sctp_socket->so_rcv, 1,
  613. SCTP_READ_LOCK_NOT_HELD,
  614. SCTP_SO_LOCKED);
  615. continue;
  616. } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
  617. *need_reasm = 1;
  618. }
  619. break;
  620. }
  621. #if defined(__APPLE__) && !defined(__Userspace__)
  622. SCTP_SOCKET_UNLOCK(so, 1);
  623. #endif
  624. }
  625. if (queue_needed) {
  626. /*
  627. * Ok, we did not deliver this guy, find the correct place
  628. * to put it on the queue.
  629. */
  630. if (sctp_place_control_in_stream(strm, asoc, control)) {
  631. SCTP_SNPRINTF(msg, sizeof(msg),
  632. "Queue to str MID: %u duplicate", control->mid);
  633. sctp_clean_up_control(stcb, control);
  634. op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  635. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
  636. sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  637. *abort_flag = 1;
  638. }
  639. }
  640. }
  641. static void
  642. sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
  643. {
  644. struct mbuf *m, *prev = NULL;
  645. struct sctp_tcb *stcb;
  646. stcb = control->stcb;
  647. control->held_length = 0;
  648. control->length = 0;
  649. m = control->data;
  650. while (m) {
  651. if (SCTP_BUF_LEN(m) == 0) {
  652. /* Skip mbufs with NO length */
  653. if (prev == NULL) {
  654. /* First one */
  655. control->data = sctp_m_free(m);
  656. m = control->data;
  657. } else {
  658. SCTP_BUF_NEXT(prev) = sctp_m_free(m);
  659. m = SCTP_BUF_NEXT(prev);
  660. }
  661. if (m == NULL) {
  662. control->tail_mbuf = prev;
  663. }
  664. continue;
  665. }
  666. prev = m;
  667. atomic_add_int(&control->length, SCTP_BUF_LEN(m));
  668. if (control->on_read_q) {
  669. /*
  670. * On read queue so we must increment the
  671. * SB stuff, we assume caller has done any locks of SB.
  672. */
  673. sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
  674. }
  675. m = SCTP_BUF_NEXT(m);
  676. }
  677. if (prev) {
  678. control->tail_mbuf = prev;
  679. }
  680. }
  681. static void
  682. sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
  683. {
  684. struct mbuf *prev=NULL;
  685. struct sctp_tcb *stcb;
  686. stcb = control->stcb;
  687. if (stcb == NULL) {
  688. #ifdef INVARIANTS
  689. panic("Control broken");
  690. #else
  691. return;
  692. #endif
  693. }
  694. if (control->tail_mbuf == NULL) {
  695. /* TSNH */
  696. sctp_m_freem(control->data);
  697. control->data = m;
  698. sctp_setup_tail_pointer(control);
  699. return;
  700. }
  701. control->tail_mbuf->m_next = m;
  702. while (m) {
  703. if (SCTP_BUF_LEN(m) == 0) {
  704. /* Skip mbufs with NO length */
  705. if (prev == NULL) {
  706. /* First one */
  707. control->tail_mbuf->m_next = sctp_m_free(m);
  708. m = control->tail_mbuf->m_next;
  709. } else {
  710. SCTP_BUF_NEXT(prev) = sctp_m_free(m);
  711. m = SCTP_BUF_NEXT(prev);
  712. }
  713. if (m == NULL) {
  714. control->tail_mbuf = prev;
  715. }
  716. continue;
  717. }
  718. prev = m;
  719. if (control->on_read_q) {
  720. /*
  721. * On read queue so we must increment the
  722. * SB stuff, we assume caller has done any locks of SB.
  723. */
  724. sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
  725. }
  726. *added += SCTP_BUF_LEN(m);
  727. atomic_add_int(&control->length, SCTP_BUF_LEN(m));
  728. m = SCTP_BUF_NEXT(m);
  729. }
  730. if (prev) {
  731. control->tail_mbuf = prev;
  732. }
  733. }
  734. static void
  735. sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
  736. {
  737. memset(nc, 0, sizeof(struct sctp_queued_to_read));
  738. nc->sinfo_stream = control->sinfo_stream;
  739. nc->mid = control->mid;
  740. TAILQ_INIT(&nc->reasm);
  741. nc->top_fsn = control->top_fsn;
  742. nc->mid = control->mid;
  743. nc->sinfo_flags = control->sinfo_flags;
  744. nc->sinfo_ppid = control->sinfo_ppid;
  745. nc->sinfo_context = control->sinfo_context;
  746. nc->fsn_included = 0xffffffff;
  747. nc->sinfo_tsn = control->sinfo_tsn;
  748. nc->sinfo_cumtsn = control->sinfo_cumtsn;
  749. nc->sinfo_assoc_id = control->sinfo_assoc_id;
  750. nc->whoFrom = control->whoFrom;
  751. atomic_add_int(&nc->whoFrom->ref_count, 1);
  752. nc->stcb = control->stcb;
  753. nc->port_from = control->port_from;
  754. nc->do_not_ref_stcb = control->do_not_ref_stcb;
  755. }
  756. static void
  757. sctp_reset_a_control(struct sctp_queued_to_read *control,
  758. struct sctp_inpcb *inp, uint32_t tsn)
  759. {
  760. control->fsn_included = tsn;
  761. if (control->on_read_q) {
  762. /*
  763. * We have to purge it from there,
  764. * hopefully this will work :-)
  765. */
  766. TAILQ_REMOVE(&inp->read_queue, control, next);
  767. control->on_read_q = 0;
  768. }
  769. }
  770. static int
  771. sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
  772. struct sctp_association *asoc,
  773. struct sctp_stream_in *strm,
  774. struct sctp_queued_to_read *control,
  775. uint32_t pd_point,
  776. int inp_read_lock_held)
  777. {
  778. /* Special handling for the old un-ordered data chunk.
  779. * All the chunks/TSN's go to mid 0. So
  780. * we have to do the old style watching to see
  781. * if we have it all. If you return one, no other
  782. * control entries on the un-ordered queue will
  783. * be looked at. In theory there should be no others
  784. * entries in reality, unless the guy is sending both
  785. * unordered NDATA and unordered DATA...
  786. */
  787. struct sctp_tmit_chunk *chk, *lchk, *tchk;
  788. uint32_t fsn;
  789. struct sctp_queued_to_read *nc;
  790. int cnt_added;
  791. if (control->first_frag_seen == 0) {
  792. /* Nothing we can do, we have not seen the first piece yet */
  793. return (1);
  794. }
  795. /* Collapse any we can */
  796. cnt_added = 0;
  797. restart:
  798. fsn = control->fsn_included + 1;
  799. /* Now what can we add? */
  800. TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
  801. if (chk->rec.data.fsn == fsn) {
  802. /* Ok lets add it */
  803. sctp_alloc_a_readq(stcb, nc);
  804. if (nc == NULL) {
  805. break;
  806. }
  807. memset(nc, 0, sizeof(struct sctp_queued_to_read));
  808. TAILQ_REMOVE(&control->reasm, chk, sctp_next);
  809. sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held);
  810. fsn++;
  811. cnt_added++;
  812. chk = NULL;
  813. if (control->end_added) {
  814. /* We are done */
  815. if (!TAILQ_EMPTY(&control->reasm)) {
  816. /*
  817. * Ok we have to move anything left on
  818. * the control queue to a new control.
  819. */
  820. sctp_build_readq_entry_from_ctl(nc, control);
  821. tchk = TAILQ_FIRST(&control->reasm);
  822. if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
  823. TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
  824. if (asoc->size_on_reasm_queue >= tchk->send_size) {
  825. asoc->size_on_reasm_queue -= tchk->send_size;
  826. } else {
  827. #ifdef INVARIANTS
  828. panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
  829. #else
  830. asoc->size_on_reasm_queue = 0;
  831. #endif
  832. }
  833. sctp_ucount_decr(asoc->cnt_on_reasm_queue);
  834. nc->first_frag_seen = 1;
  835. nc->fsn_included = tchk->rec.data.fsn;
  836. nc->data = tchk->data;
  837. nc->sinfo_ppid = tchk->rec.data.ppid;
  838. nc->sinfo_tsn = tchk->rec.data.tsn;
  839. sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
  840. tchk->data = NULL;
  841. sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
  842. sctp_setup_tail_pointer(nc);
  843. tchk = TAILQ_FIRST(&control->reasm);
  844. }
  845. /* Spin the rest onto the queue */
  846. while (tchk) {
  847. TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
  848. TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
  849. tchk = TAILQ_FIRST(&control->reasm);
  850. }
  851. /* Now lets add it to the queue after removing control */
  852. TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
  853. nc->on_strm_q = SCTP_ON_UNORDERED;
  854. if (control->on_strm_q) {
  855. TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
  856. control->on_strm_q = 0;
  857. }
  858. }
  859. if (control->pdapi_started) {
  860. strm->pd_api_started = 0;
  861. control->pdapi_started = 0;
  862. }
  863. if (control->on_strm_q) {
  864. TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
  865. control->on_strm_q = 0;
  866. SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
  867. }
  868. if (control->on_read_q == 0) {
  869. sctp_add_to_readq(stcb->sctp_ep, stcb, control,
  870. &stcb->sctp_socket->so_rcv, control->end_added,
  871. inp_read_lock_held, SCTP_SO_NOT_LOCKED);
  872. #if defined(__Userspace__)
  873. } else {
  874. sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
  875. #endif
  876. }
  877. sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
  878. if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
  879. /* Switch to the new guy and continue */
  880. control = nc;
  881. goto restart;
  882. } else {
  883. if (nc->on_strm_q == 0) {
  884. sctp_free_a_readq(stcb, nc);
  885. }
  886. }
  887. return (1);
  888. } else {
  889. sctp_free_a_readq(stcb, nc);
  890. }
  891. } else {
  892. /* Can't add more */
  893. break;
  894. }
  895. }
  896. if (cnt_added && strm->pd_api_started) {
  897. #if defined(__Userspace__)
  898. sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
  899. #endif
  900. sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
  901. }
  902. if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
  903. strm->pd_api_started = 1;
  904. control->pdapi_started = 1;
  905. sctp_add_to_readq(stcb->sctp_ep, stcb, control,
  906. &stcb->sctp_socket->so_rcv, control->end_added,
  907. inp_read_lock_held, SCTP_SO_NOT_LOCKED);
  908. sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
  909. return (0);
  910. } else {
  911. return (1);
  912. }
  913. }
  914. static void
  915. sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
  916. struct sctp_association *asoc,
  917. struct sctp_queued_to_read *control,
  918. struct sctp_tmit_chunk *chk,
  919. int *abort_flag)
  920. {
  921. struct sctp_tmit_chunk *at;
  922. int inserted;
  923. /*
  924. * Here we need to place the chunk into the control structure
  925. * sorted in the correct order.
  926. */
  927. if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
  928. /* Its the very first one. */
  929. SCTPDBG(SCTP_DEBUG_XXX,
  930. "chunk is a first fsn: %u becomes fsn_included\n",
  931. chk->rec.data.fsn);
  932. at = TAILQ_FIRST(&control->reasm);
  933. if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
  934. /*
  935. * The first chunk in the reassembly is
  936. * a smaller TSN than this one, even though
  937. * this has a first, it must be from a subsequent
  938. * msg.
  939. */
  940. goto place_chunk;
  941. }
  942. if (control->first_frag_seen) {
  943. /*
  944. * In old un-ordered we can reassembly on
  945. * one control multiple messages. As long
  946. * as the next FIRST is greater then the old
  947. * first (TSN i.e. FSN wise)
  948. */
  949. struct mbuf *tdata;
  950. uint32_t tmp;
  951. if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
  952. /* Easy way the start of a new guy beyond the lowest */
  953. goto place_chunk;
  954. }
  955. if ((chk->rec.data.fsn == control->fsn_included) ||
  956. (control->pdapi_started)) {
  957. /*
  958. * Ok this should not happen, if it does
  959. * we started the pd-api on the higher TSN (since
  960. * the equals part is a TSN failure it must be that).
  961. *
  962. * We are completely hosed in that case since I have
  963. * no way to recover. This really will only happen
  964. * if we can get more TSN's higher before the pd-api-point.
  965. */
  966. sctp_abort_in_reasm(stcb, control, chk,
  967. abort_flag,
  968. SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
  969. return;
  970. }
  971. /*
  972. * Ok we have two firsts and the one we just got
  973. * is smaller than the one we previously placed.. yuck!
  974. * We must swap them out.
  975. */
  976. /* swap the mbufs */
  977. tdata = control->data;
  978. control->data = chk->data;
  979. chk->data = tdata;
  980. /* Save the lengths */
  981. chk->send_size = control->length;
  982. /* Recompute length of control and tail pointer */
  983. sctp_setup_tail_pointer(control);
  984. /* Fix the FSN included */
  985. tmp = control->fsn_included;
  986. control->fsn_included = chk->rec.data.fsn;
  987. chk->rec.data.fsn = tmp;
  988. /* Fix the TSN included */
  989. tmp = control->sinfo_tsn;
  990. control->sinfo_tsn = chk->rec.data.tsn;
  991. chk->rec.data.tsn = tmp;
  992. /* Fix the PPID included */
  993. tmp = control->sinfo_ppid;
  994. control->sinfo_ppid = chk->rec.data.ppid;
  995. chk->rec.data.ppid = tmp;
  996. /* Fix tail pointer */
  997. goto place_chunk;
  998. }
  999. control->first_frag_seen = 1;
  1000. control->fsn_included = chk->rec.data.fsn;
  1001. control->top_fsn = chk->rec.data.fsn;
  1002. control->sinfo_tsn = chk->rec.data.tsn;
  1003. control->sinfo_ppid = chk->rec.data.ppid;
  1004. control->data = chk->data;
  1005. sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
  1006. chk->data = NULL;
  1007. sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
  1008. sctp_setup_tail_pointer(control);
  1009. return;
  1010. }
  1011. place_chunk:
  1012. inserted = 0;
  1013. TAILQ_FOREACH(at, &control->reasm, sctp_next) {
  1014. if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
  1015. /*
  1016. * This one in queue is bigger than the new one, insert
  1017. * the new one before at.
  1018. */
  1019. asoc->size_on_reasm_queue += chk->send_size;
  1020. sctp_ucount_incr(asoc->cnt_on_reasm_queue);
  1021. inserted = 1;
  1022. TAILQ_INSERT_BEFORE(at, chk, sctp_next);
  1023. break;
  1024. } else if (at->rec.data.fsn == chk->rec.data.fsn) {
  1025. /*
  1026. * They sent a duplicate fsn number. This
  1027. * really should not happen since the FSN is
  1028. * a TSN and it should have been dropped earlier.
  1029. */
  1030. sctp_abort_in_reasm(stcb, control, chk,
  1031. abort_flag,
  1032. SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
  1033. return;
  1034. }
  1035. }
  1036. if (inserted == 0) {
  1037. /* Its at the end */
  1038. asoc->size_on_reasm_queue += chk->send_size;
  1039. sctp_ucount_incr(asoc->cnt_on_reasm_queue);
  1040. control->top_fsn = chk->rec.data.fsn;
  1041. TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
  1042. }
  1043. }
  1044. static int
  1045. sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
  1046. struct sctp_stream_in *strm, int inp_read_lock_held)
  1047. {
  1048. /*
  1049. * Given a stream, strm, see if any of
  1050. * the SSN's on it that are fragmented
  1051. * are ready to deliver. If so go ahead
  1052. * and place them on the read queue. In
  1053. * so placing if we have hit the end, then
  1054. * we need to remove them from the stream's queue.
  1055. */
  1056. struct sctp_queued_to_read *control, *nctl = NULL;
  1057. uint32_t next_to_del;
  1058. uint32_t pd_point;
  1059. int ret = 0;
  1060. if (stcb->sctp_socket) {
  1061. pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
  1062. stcb->sctp_ep->partial_delivery_point);
  1063. } else {
  1064. pd_point = stcb->sctp_ep->partial_delivery_point;
  1065. }
  1066. control = TAILQ_FIRST(&strm->uno_inqueue);
  1067. if ((control != NULL) &&
  1068. (asoc->idata_supported == 0)) {
  1069. /* Special handling needed for "old" data format */
  1070. if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
  1071. goto done_un;
  1072. }
  1073. }
  1074. if (strm->pd_api_started) {
  1075. /* Can't add more */
  1076. return (0);
  1077. }
  1078. while (control) {
  1079. SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
  1080. control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
  1081. nctl = TAILQ_NEXT(control, next_instrm);
  1082. if (control->end_added) {
  1083. /* We just put the last bit on */
  1084. if (control->on_strm_q) {
  1085. #ifdef INVARIANTS
  1086. if (control->on_strm_q != SCTP_ON_UNORDERED) {
  1087. panic("Huh control: %p on_q: %d -- not unordered?",
  1088. control, control->on_strm_q);
  1089. }
  1090. #endif
  1091. SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
  1092. TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
  1093. if (asoc->size_on_all_streams >= control->length) {
  1094. asoc->size_on_all_streams -= control->length;
  1095. } else {
  1096. #ifdef INVARIANTS
  1097. panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
  1098. #else
  1099. asoc->size_on_all_streams = 0;
  1100. #endif
  1101. }
  1102. sctp_ucount_decr(asoc->cnt_on_all_streams);
  1103. control->on_strm_q = 0;
  1104. }
  1105. if (control->on_read_q == 0) {
  1106. sctp_add_to_readq(stcb->sctp_ep, stcb,
  1107. control,
  1108. &stcb->sctp_socket->so_rcv, control->end_added,
  1109. inp_read_lock_held, SCTP_SO_NOT_LOCKED);
  1110. }
  1111. } else {
  1112. /* Can we do a PD-API for this un-ordered guy? */
  1113. if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
  1114. strm->pd_api_started = 1;
  1115. control->pdapi_started = 1;
  1116. sctp_add_to_readq(stcb->sctp_ep, stcb,
  1117. control,
  1118. &stcb->sctp_socket->so_rcv, control->end_added,
  1119. inp_read_lock_held, SCTP_SO_NOT_LOCKED);
  1120. break;
  1121. }
  1122. }
  1123. control = nctl;
  1124. }
  1125. done_un:
  1126. control = TAILQ_FIRST(&strm->inqueue);
  1127. if (strm->pd_api_started) {
  1128. /* Can't add more */
  1129. return (0);
  1130. }
  1131. if (control == NULL) {
  1132. return (ret);
  1133. }
  1134. if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
  1135. /* Ok the guy at the top was being partially delivered
  1136. * completed, so we remove it. Note
  1137. * the pd_api flag was taken off when the
  1138. * chunk was merged on in sctp_queue_data_for_reasm below.
  1139. */
  1140. nctl = TAILQ_NEXT(control, next_instrm);
  1141. SCTPDBG(SCTP_DEBUG_XXX,
  1142. "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
  1143. control, control->end_added, control->mid,
  1144. control->top_fsn, control->fsn_included,
  1145. strm->last_mid_delivered);
  1146. if (control->end_added) {
  1147. if (control->on_strm_q) {
  1148. #ifdef INVARIANTS
  1149. if (control->on_strm_q != SCTP_ON_ORDERED) {
  1150. panic("Huh control: %p on_q: %d -- not ordered?",
  1151. control, control->on_strm_q);
  1152. }
  1153. #endif
  1154. SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
  1155. TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
  1156. if (asoc->size_on_all_streams >= control->length) {
  1157. asoc->size_on_all_streams -= control->length;
  1158. } else {
  1159. #ifdef INVARIANTS
  1160. panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
  1161. #else
  1162. asoc->size_on_all_streams = 0;
  1163. #endif
  1164. }
  1165. sctp_ucount_decr(asoc->cnt_on_all_streams);
  1166. control->on_strm_q = 0;
  1167. }
  1168. if (strm->pd_api_started && control->pdapi_started) {
  1169. control->pdapi_started = 0;
  1170. strm->pd_api_started = 0;
  1171. }
  1172. if (control->on_read_q == 0) {
  1173. sctp_add_to_readq(stcb->sctp_ep, stcb,
  1174. control,
  1175. &stcb->sctp_socket->so_rcv, control->end_added,
  1176. inp_read_lock_held, SCTP_SO_NOT_LOCKED);
  1177. }
  1178. control = nctl;
  1179. }
  1180. }
  1181. if (strm->pd_api_started) {
  1182. /* Can't add more must have gotten an un-ordered above being partially delivered. */
  1183. return (0);
  1184. }
  1185. deliver_more:
  1186. next_to_del = strm->last_mid_delivered + 1;
  1187. if (control) {
  1188. SCTPDBG(SCTP_DEBUG_XXX,
  1189. "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
  1190. control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
  1191. next_to_del);
  1192. nctl = TAILQ_NEXT(control, next_instrm);
  1193. if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
  1194. (control->first_frag_seen)) {
  1195. int done;
  1196. /* Ok we can deliver it onto the stream. */
  1197. if (control->end_added) {
  1198. /* We are done with it afterwards */
  1199. if (control->on_strm_q) {
  1200. #ifdef INVARIANTS
  1201. if (control->on_strm_q != SCTP_ON_ORDERED) {
  1202. panic("Huh control: %p on_q: %d -- not ordered?",
  1203. control, control->on_strm_q);
  1204. }
  1205. #endif
  1206. SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
  1207. TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
  1208. if (asoc->size_on_all_streams >= control->length) {
  1209. asoc->size_on_all_streams -= control->length;
  1210. } else {
  1211. #ifdef INVARIANTS
  1212. panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
  1213. #else
  1214. asoc->size_on_all_streams = 0;
  1215. #endif
  1216. }
  1217. sctp_ucount_decr(asoc->cnt_on_all_streams);
  1218. control->on_strm_q = 0;
  1219. }
  1220. ret++;
  1221. }
  1222. if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
  1223. /* A singleton now slipping through - mark it non-revokable too */
  1224. sctp_mark_non_revokable(asoc, control->sinfo_tsn);
  1225. } else if (control->end_added == 0) {
  1226. /* Check if we can defer adding until its all there */
  1227. if ((control->length < pd_point) || (strm->pd_api_started)) {
  1228. /* Don't need it or cannot add more (one being delivered that way) */
  1229. goto out;
  1230. }
  1231. }
  1232. done = (control->end_added) && (control->last_frag_seen);
  1233. if (control->on_read_q == 0) {
  1234. if (!done) {
  1235. if (asoc->size_on_all_streams >= control->length) {
  1236. asoc->size_on_all_streams -= control->length;
  1237. } else {
  1238. #ifdef INVARIANTS
  1239. panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
  1240. #else
  1241. asoc->size_on_all_streams = 0;
  1242. #endif
  1243. }
  1244. strm->pd_api_started = 1;
  1245. control->pdapi_started = 1;
  1246. }
  1247. sctp_add_to_readq(stcb->sctp_ep, stcb,
  1248. control,
  1249. &stcb->sctp_socket->so_rcv, control->end_added,
  1250. inp_read_lock_held, SCTP_SO_NOT_LOCKED);
  1251. }
  1252. strm->last_mid_delivered = next_to_del;
  1253. if (done) {
  1254. control = nctl;
  1255. goto deliver_more;
  1256. }
  1257. }
  1258. }
  1259. out:
  1260. return (ret);
  1261. }
  1262. uint32_t
  1263. sctp_add_chk_to_control(struct sctp_queued_to_read *control,
  1264. struct sctp_stream_in *strm,
  1265. struct sctp_tcb *stcb, struct sctp_association *asoc,
  1266. struct sctp_tmit_chunk *chk, int hold_rlock)
  1267. {
  1268. /*
  1269. * Given a control and a chunk, merge the
  1270. * data from the chk onto the control and free
  1271. * up the chunk resources.
  1272. */
  1273. uint32_t added=0;
  1274. int i_locked = 0;
  1275. if (control->on_read_q && (hold_rlock == 0)) {
  1276. /*
  1277. * Its being pd-api'd so we must
  1278. * do some locks.
  1279. */
  1280. SCTP_INP_READ_LOCK(stcb->sctp_ep);
  1281. i_locked = 1;
  1282. }
  1283. if (control->data == NULL) {
  1284. control->data = chk->data;
  1285. sctp_setup_tail_pointer(control);
  1286. } else {
  1287. sctp_add_to_tail_pointer(control, chk->data, &added);
  1288. }
  1289. control->fsn_included = chk->rec.data.fsn;
  1290. asoc->size_on_reasm_queue -= chk->send_size;
  1291. sctp_ucount_decr(asoc->cnt_on_reasm_queue);
  1292. sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
  1293. chk->data = NULL;
  1294. if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
  1295. control->first_frag_seen = 1;
  1296. control->sinfo_tsn = chk->rec.data.tsn;
  1297. control->sinfo_ppid = chk->rec.data.ppid;
  1298. }
  1299. if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
  1300. /* Its complete */
  1301. if ((control->on_strm_q) && (control->on_read_q)) {
  1302. if (control->pdapi_started) {
  1303. control->pdapi_started = 0;
  1304. strm->pd_api_started = 0;
  1305. }
  1306. if (control->on_strm_q == SCTP_ON_UNORDERED) {
  1307. /* Unordered */
  1308. TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
  1309. control->on_strm_q = 0;
  1310. } else if (control->on_strm_q == SCTP_ON_ORDERED) {
  1311. /* Ordered */
  1312. TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
  1313. /*
  1314. * Don't need to decrement size_on_all_streams,
  1315. * since control is on the read queue.
  1316. */
  1317. sctp_ucount_decr(asoc->cnt_on_all_streams);
  1318. control->on_strm_q = 0;
  1319. #ifdef INVARIANTS
  1320. } else if (control->on_strm_q) {
  1321. panic("Unknown state on ctrl: %p on_strm_q: %d", control,
  1322. control->on_strm_q);
  1323. #endif
  1324. }
  1325. }
  1326. control->end_added = 1;
  1327. control->last_frag_seen = 1;
  1328. }
  1329. if (i_locked) {
  1330. SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
  1331. }
  1332. sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
  1333. return (added);
  1334. }
  1335. /*
  1336. * Dump onto the re-assembly queue, in its proper place. After dumping on the
  1337. * queue, see if anthing can be delivered. If so pull it off (or as much as
  1338. * we can. If we run out of space then we must dump what we can and set the
  1339. * appropriate flag to say we queued what we could.
  1340. */
  1341. static void
  1342. sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
  1343. struct sctp_queued_to_read *control,
  1344. struct sctp_tmit_chunk *chk,
  1345. int created_control,
  1346. int *abort_flag, uint32_t tsn)
  1347. {
  1348. uint32_t next_fsn;
  1349. struct sctp_tmit_chunk *at, *nat;
  1350. struct sctp_stream_in *strm;
  1351. int do_wakeup, unordered;
  1352. uint32_t lenadded;
  1353. strm = &asoc->strmin[control->sinfo_stream];
  1354. /*
  1355. * For old un-ordered data chunks.
  1356. */
  1357. if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
  1358. unordered = 1;
  1359. } else {
  1360. unordered = 0;
  1361. }
  1362. /* Must be added to the stream-in queue */
  1363. if (created_control) {
  1364. if ((unordered == 0) || (asoc->idata_supported)) {
  1365. sctp_ucount_incr(asoc->cnt_on_all_streams);
  1366. }
  1367. if (sctp_place_control_in_stream(strm, asoc, control)) {
  1368. /* Duplicate SSN? */
  1369. sctp_abort_in_reasm(stcb, control, chk,
  1370. abort_flag,
  1371. SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
  1372. sctp_clean_up_control(stcb, control);
  1373. return;
  1374. }
  1375. if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
  1376. /* Ok we created this control and now
  1377. * lets validate that its legal i.e. there
  1378. * is a B bit set, if not and we have
  1379. * up to the cum-ack then its invalid.
  1380. */
  1381. if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
  1382. sctp_abort_in_reasm(stcb, control, chk,
  1383. abort_flag,
  1384. SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
  1385. return;
  1386. }
  1387. }
  1388. }
  1389. if ((asoc->idata_supported == 0) && (unordered == 1)) {
  1390. sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
  1391. return;
  1392. }
  1393. /*
  1394. * Ok we must queue the chunk into the reasembly portion:
  1395. * o if its the first it goes to the control mbuf.
  1396. * o if its not first but the next in sequence it goes to the control,
  1397. * and each succeeding one in order also goes.
  1398. * o if its not in order we place it on the list in its place.
  1399. */
  1400. if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
  1401. /* Its the very first one. */
  1402. SCTPDBG(SCTP_DEBUG_XXX,
  1403. "chunk is a first fsn: %u becomes fsn_included\n",
  1404. chk->rec.data.fsn);
  1405. if (control->first_frag_seen) {
  1406. /*
  1407. * Error on senders part, they either
  1408. * sent us two data chunks with FIRST,
  1409. * or they sent two un-ordered chunks that
  1410. * were fragmented at the same time in the same stream.
  1411. */
  1412. sctp_abort_in_reasm(stcb, control, chk,
  1413. abort_flag,
  1414. SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
  1415. return;
  1416. }
  1417. control->first_frag_seen = 1;
  1418. control->sinfo_ppid = chk->rec.data.ppid;
  1419. control->sinfo_tsn = chk->rec.data.tsn;
  1420. control->fsn_included = chk->rec.data.fsn;
  1421. control->data = chk->data;
  1422. sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
  1423. chk->data = NULL;
  1424. sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
  1425. sctp_setup_tail_pointer(control);
  1426. asoc->size_on_all_streams += control->length;
  1427. } else {
  1428. /* Place the chunk in our list */
  1429. int inserted=0;
  1430. if (control->last_frag_seen == 0) {
  1431. /* Still willing to raise highest FSN seen */
  1432. if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
  1433. SCTPDBG(SCTP_DEBUG_XXX,
  1434. "We have a new top_fsn: %u\n",
  1435. chk->rec.data.fsn);
  1436. control->top_fsn = chk->rec.data.fsn;
  1437. }
  1438. if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
  1439. SCTPDBG(SCTP_DEBUG_XXX,
  1440. "The last fsn is now in place fsn: %u\n",
  1441. chk->rec.data.fsn);
  1442. control->last_frag_seen = 1;
  1443. if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
  1444. SCTPDBG(SCTP_DEBUG_XXX,
  1445. "New fsn: %u is not at top_fsn: %u -- abort\n",
  1446. chk->rec.data.fsn,
  1447. control->top_fsn);
  1448. sctp_abort_in_reasm(stcb, control, chk,
  1449. abort_flag,
  1450. SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
  1451. return;
  1452. }
  1453. }
  1454. if (asoc->idata_supported || control->first_frag_seen) {
  1455. /*
  1456. * For IDATA we always check since we know that
  1457. * the first fragment is 0. For old DATA we have
  1458. * to receive the first before we know the first FSN
  1459. * (which is the TSN).
  1460. */
  1461. if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
  1462. /* We have already delivered up to this so its a dup */
  1463. sctp_abort_in_reasm(stcb, control, chk,
  1464. abort_flag,
  1465. SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
  1466. return;
  1467. }
  1468. }
  1469. } else {
  1470. if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
  1471. /* Second last? huh? */
  1472. SCTPDBG(SCTP_DEBUG_XXX,
  1473. "Duplicate last fsn: %u (top: %u) -- abort\n",
  1474. chk->rec.data.fsn, control->top_fsn);
  1475. sctp_abort_in_reasm(stcb, control,
  1476. chk, abort_flag,
  1477. SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
  1478. return;
  1479. }
  1480. if (asoc->idata_supported || control->first_frag_seen) {
  1481. /*
  1482. * For IDATA we always check since we know that
  1483. * the first fragment is 0. For old DATA we have
  1484. * to receive the first before we know the first FSN
  1485. * (which is the TSN).
  1486. */
  1487. if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
  1488. /* We have already delivered up to this so its a dup */
  1489. SCTPDBG(SCTP_DEBUG_XXX,
  1490. "New fsn: %u is already seen in included_fsn: %u -- abort\n",
  1491. chk->rec.data.fsn, control->fsn_included);
  1492. sctp_abort_in_reasm(stcb, control, chk,
  1493. abort_flag,
  1494. SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
  1495. return;
  1496. }
  1497. }
  1498. /* validate not beyond top FSN if we have seen last one */
  1499. if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
  1500. SCTPDBG(SCTP_DEBUG_XXX,
  1501. "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
  1502. chk->rec.data.fsn,
  1503. control->top_fsn);
  1504. sctp_abort_in_reasm(stcb, control, chk,
  1505. abort_flag,
  1506. SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
  1507. return;
  1508. }
  1509. }
  1510. /*
  1511. * If we reach here, we need to place the
  1512. * new chunk in the reassembly for this
  1513. * control.
  1514. */
  1515. SCTPDBG(SCTP_DEBUG_XXX,
  1516. "chunk is a not first fsn: %u needs to be inserted\n",
  1517. chk->rec.data.fsn);
  1518. TAILQ_FOREACH(at, &control->reasm, sctp_next) {
  1519. if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
  1520. if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
  1521. /* Last not at the end? huh? */
  1522. SCTPDBG(SCTP_DEBUG_XXX,
  1523. "Last fragment not last in list: -- abort\n");
  1524. sctp_abort_in_reasm(stcb, control,
  1525. chk, abort_flag,
  1526. SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
  1527. return;
  1528. }
  1529. /*
  1530. * This one in queue is bigger than the new one, insert
  1531. * the new one before at.
  1532. */
  1533. SCTPDBG(SCTP_DEBUG_XXX,
  1534. "Insert it before fsn: %u\n",
  1535. at->rec.data.fsn);
  1536. asoc->size_on_reasm_queue += chk->send_size;
  1537. sctp_ucount_incr(asoc->cnt_on_reasm_queue);
  1538. TAILQ_INSERT_BEFORE(at, chk, sctp_next);
  1539. inserted = 1;
  1540. break;
  1541. } else if (at->rec.data.fsn == chk->rec.data.fsn) {
  1542. /* Gak, He sent me a duplicate str seq number */
  1543. /*
  1544. * foo bar, I guess I will just free this new guy,
  1545. * should we abort too? FIX ME MAYBE? Or it COULD be
  1546. * that the SSN's have wrapped. Maybe I should
  1547. * compare to TSN somehow... sigh for now just blow
  1548. * away the chunk!
  1549. */
  1550. SCTPDBG(SCTP_DEBUG_XXX,
  1551. "Duplicate to fsn: %u -- abort\n",
  1552. at->rec.data.fsn);
  1553. sctp_abort_in_reasm(stcb, control,
  1554. chk, abort_flag,
  1555. SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
  1556. return;
  1557. }
  1558. }
  1559. if (inserted == 0) {
  1560. /* Goes on the end */
  1561. SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
  1562. chk->rec.data.fsn);
  1563. asoc->size_on_reasm_queue += chk->send_size;
  1564. sctp_ucount_incr(asoc->cnt_on_reasm_queue);
  1565. TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
  1566. }
  1567. }
  1568. /*
  1569. * Ok lets see if we can suck any up into the control
  1570. * structure that are in seq if it makes sense.
  1571. */
  1572. do_wakeup = 0;
  1573. /*
  1574. * If the first fragment has not been
  1575. * seen there is no sense in looking.
  1576. */
  1577. if (control->first_frag_seen) {
  1578. next_fsn = control->fsn_included + 1;
  1579. TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
  1580. if (at->rec.data.fsn == next_fsn) {
  1581. /* We can add this one now to the control */
  1582. SCTPDBG(SCTP_DEBUG_XXX,
  1583. "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
  1584. control, at,
  1585. at->rec.data.fsn,
  1586. next_fsn, control->fsn_included);
  1587. TAILQ_REMOVE(&control->reasm, at, sctp_next);
  1588. lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
  1589. if (control->on_read_q) {
  1590. do_wakeup = 1;
  1591. } else {
  1592. /*
  1593. * We only add to the size-on-all-streams
  1594. * if its not on the read q. The read q
  1595. * flag will cause a sballoc so its accounted
  1596. * for there.
  1597. */
  1598. asoc->size_on_all_streams += lenadded;
  1599. }
  1600. next_fsn++;
  1601. if (control->end_added && control->pdapi_started) {
  1602. if (strm->pd_api_started) {
  1603. strm->pd_api_started = 0;
  1604. control->pdapi_started = 0;
  1605. }
  1606. if (control->on_read_q == 0) {
  1607. sctp_add_to_readq(stcb->sctp_ep, stcb,
  1608. control,
  1609. &stcb->sctp_socket->so_rcv, control->end_added,
  1610. SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  1611. }
  1612. break;
  1613. }
  1614. } else {
  1615. break;
  1616. }
  1617. }
  1618. }
  1619. if (do_wakeup) {
  1620. #if defined(__Userspace__)
  1621. sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
  1622. #endif
  1623. /* Need to wakeup the reader */
  1624. sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
  1625. }
  1626. }
  1627. static struct sctp_queued_to_read *
  1628. sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
  1629. {
  1630. struct sctp_queued_to_read *control;
  1631. if (ordered) {
  1632. TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
  1633. if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
  1634. break;
  1635. }
  1636. }
  1637. } else {
  1638. if (idata_supported) {
  1639. TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
  1640. if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
  1641. break;
  1642. }
  1643. }
  1644. } else {
  1645. control = TAILQ_FIRST(&strm->uno_inqueue);
  1646. }
  1647. }
  1648. return (control);
  1649. }
  1650. static int
  1651. sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
  1652. struct mbuf **m, int offset, int chk_length,
  1653. struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
  1654. int *break_flag, int last_chunk, uint8_t chk_type)
  1655. {
  1656. struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
  1657. struct sctp_stream_in *strm;
  1658. uint32_t tsn, fsn, gap, mid;
  1659. struct mbuf *dmbuf;
  1660. int the_len;
  1661. int need_reasm_check = 0;
  1662. uint16_t sid;
  1663. struct mbuf *op_err;
  1664. char msg[SCTP_DIAG_INFO_LEN];
  1665. struct sctp_queued_to_read *control, *ncontrol;
  1666. uint32_t ppid;
  1667. uint8_t chk_flags;
  1668. struct sctp_stream_reset_list *liste;
  1669. int ordered;
  1670. size_t clen;
  1671. int created_control = 0;
  1672. if (chk_type == SCTP_IDATA) {
  1673. struct sctp_idata_chunk *chunk, chunk_buf;
  1674. chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
  1675. sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
  1676. chk_flags = chunk->ch.chunk_flags;
  1677. clen = sizeof(struct sctp_idata_chunk);
  1678. tsn = ntohl(chunk->dp.tsn);
  1679. sid = ntohs(chunk->dp.sid);
  1680. mid = ntohl(chunk->dp.mid);
  1681. if (chk_flags & SCTP_DATA_FIRST_FRAG) {
  1682. fsn = 0;
  1683. ppid = chunk->dp.ppid_fsn.ppid;
  1684. } else {
  1685. fsn = ntohl(chunk->dp.ppid_fsn.fsn);
  1686. ppid = 0xffffffff; /* Use as an invalid value. */
  1687. }
  1688. } else {
  1689. struct sctp_data_chunk *chunk, chunk_buf;
  1690. chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
  1691. sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
  1692. chk_flags = chunk->ch.chunk_flags;
  1693. clen = sizeof(struct sctp_data_chunk);
  1694. tsn = ntohl(chunk->dp.tsn);
  1695. sid = ntohs(chunk->dp.sid);
  1696. mid = (uint32_t)(ntohs(chunk->dp.ssn));
  1697. fsn = tsn;
  1698. ppid = chunk->dp.ppid;
  1699. }
  1700. if ((size_t)chk_length == clen) {
  1701. /*
  1702. * Need to send an abort since we had a
  1703. * empty data chunk.
  1704. */
  1705. op_err = sctp_generate_no_user_data_cause(tsn);
  1706. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
  1707. sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  1708. *abort_flag = 1;
  1709. return (0);
  1710. }
  1711. if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
  1712. asoc->send_sack = 1;
  1713. }
  1714. ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
  1715. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
  1716. sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
  1717. }
  1718. if (stcb == NULL) {
  1719. return (0);
  1720. }
  1721. SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
  1722. if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
  1723. /* It is a duplicate */
  1724. SCTP_STAT_INCR(sctps_recvdupdata);
  1725. if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
  1726. /* Record a dup for the next outbound sack */
  1727. asoc->dup_tsns[asoc->numduptsns] = tsn;
  1728. asoc->numduptsns++;
  1729. }
  1730. asoc->send_sack = 1;
  1731. return (0);
  1732. }
  1733. /* Calculate the number of TSN's between the base and this TSN */
  1734. SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
  1735. if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
  1736. /* Can't hold the bit in the mapping at max array, toss it */
  1737. return (0);
  1738. }
  1739. if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
  1740. SCTP_TCB_LOCK_ASSERT(stcb);
  1741. if (sctp_expand_mapping_array(asoc, gap)) {
  1742. /* Can't expand, drop it */
  1743. return (0);
  1744. }
  1745. }
  1746. if (SCTP_TSN_GT(tsn, *high_tsn)) {
  1747. *high_tsn = tsn;
  1748. }
  1749. /* See if we have received this one already */
  1750. if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
  1751. SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
  1752. SCTP_STAT_INCR(sctps_recvdupdata);
  1753. if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
  1754. /* Record a dup for the next outbound sack */
  1755. asoc->dup_tsns[asoc->numduptsns] = tsn;
  1756. asoc->numduptsns++;
  1757. }
  1758. asoc->send_sack = 1;
  1759. return (0);
  1760. }
  1761. /*
  1762. * Check to see about the GONE flag, duplicates would cause a sack
  1763. * to be sent up above
  1764. */
  1765. if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
  1766. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
  1767. (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
  1768. /*
  1769. * wait a minute, this guy is gone, there is no longer a
  1770. * receiver. Send peer an ABORT!
  1771. */
  1772. op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
  1773. sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  1774. *abort_flag = 1;
  1775. return (0);
  1776. }
  1777. /*
  1778. * Now before going further we see if there is room. If NOT then we
  1779. * MAY let one through only IF this TSN is the one we are waiting
  1780. * for on a partial delivery API.
  1781. */
  1782. /* Is the stream valid? */
  1783. if (sid >= asoc->streamincnt) {
  1784. struct sctp_error_invalid_stream *cause;
  1785. op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
  1786. 0, M_NOWAIT, 1, MT_DATA);
  1787. if (op_err != NULL) {
  1788. /* add some space up front so prepend will work well */
  1789. SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
  1790. cause = mtod(op_err, struct sctp_error_invalid_stream *);
  1791. /*
  1792. * Error causes are just param's and this one has
  1793. * two back to back phdr, one with the error type
  1794. * and size, the other with the streamid and a rsvd
  1795. */
  1796. SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
  1797. cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
  1798. cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
  1799. cause->stream_id = htons(sid);
  1800. cause->reserved = htons(0);
  1801. sctp_queue_op_err(stcb, op_err);
  1802. }
  1803. SCTP_STAT_INCR(sctps_badsid);
  1804. SCTP_TCB_LOCK_ASSERT(stcb);
  1805. SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
  1806. if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
  1807. asoc->highest_tsn_inside_nr_map = tsn;
  1808. }
  1809. if (tsn == (asoc->cumulative_tsn + 1)) {
  1810. /* Update cum-ack */
  1811. asoc->cumulative_tsn = tsn;
  1812. }
  1813. return (0);
  1814. }
  1815. /*
  1816. * If its a fragmented message, lets see if we can
  1817. * find the control on the reassembly queues.
  1818. */
  1819. if ((chk_type == SCTP_IDATA) &&
  1820. ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
  1821. (fsn == 0)) {
  1822. /*
  1823. * The first *must* be fsn 0, and other
  1824. * (middle/end) pieces can *not* be fsn 0.
  1825. * XXX: This can happen in case of a wrap around.
  1826. * Ignore is for now.
  1827. */
  1828. SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
  1829. goto err_out;
  1830. }
  1831. control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
  1832. SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
  1833. chk_flags, control);
  1834. if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
  1835. /* See if we can find the re-assembly entity */
  1836. if (control != NULL) {
  1837. /* We found something, does it belong? */
  1838. if (ordered && (mid != control->mid)) {
  1839. SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
  1840. err_out:
  1841. op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  1842. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
  1843. sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  1844. *abort_flag = 1;
  1845. return (0);
  1846. }
  1847. if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
  1848. /* We can't have a switched order with an unordered chunk */
  1849. SCTP_SNPRINTF(msg, sizeof(msg),
  1850. "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
  1851. tsn);
  1852. goto err_out;
  1853. }
  1854. if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
  1855. /* We can't have a switched unordered with a ordered chunk */
  1856. SCTP_SNPRINTF(msg, sizeof(msg),
  1857. "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
  1858. tsn);
  1859. goto err_out;
  1860. }
  1861. }
  1862. } else {
  1863. /* Its a complete segment. Lets validate we
  1864. * don't have a re-assembly going on with
  1865. * the same Stream/Seq (for ordered) or in
  1866. * the same Stream for unordered.
  1867. */
  1868. if (control != NULL) {
  1869. if (ordered || asoc->idata_supported) {
  1870. SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
  1871. chk_flags, mid);
  1872. SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
  1873. goto err_out;
  1874. } else {
  1875. if ((tsn == control->fsn_included + 1) &&
  1876. (control->end_added == 0)) {
  1877. SCTP_SNPRINTF(msg, sizeof(msg),
  1878. "Illegal message sequence, missing end for MID: %8.8x",
  1879. control->fsn_included);
  1880. goto err_out;
  1881. } else {
  1882. control = NULL;
  1883. }
  1884. }
  1885. }
  1886. }
  1887. /* now do the tests */
  1888. if (((asoc->cnt_on_all_streams +
  1889. asoc->cnt_on_reasm_queue +
  1890. asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
  1891. (((int)asoc->my_rwnd) <= 0)) {
  1892. /*
  1893. * When we have NO room in the rwnd we check to make sure
  1894. * the reader is doing its job...
  1895. */
  1896. if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) {
  1897. /* some to read, wake-up */
  1898. #if defined(__APPLE__) && !defined(__Userspace__)
  1899. struct socket *so;
  1900. so = SCTP_INP_SO(stcb->sctp_ep);
  1901. atomic_add_int(&stcb->asoc.refcnt, 1);
  1902. SCTP_TCB_UNLOCK(stcb);
  1903. SCTP_SOCKET_LOCK(so, 1);
  1904. SCTP_TCB_LOCK(stcb);
  1905. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  1906. if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  1907. /* assoc was freed while we were unlocked */
  1908. SCTP_SOCKET_UNLOCK(so, 1);
  1909. return (0);
  1910. }
  1911. #endif
  1912. sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
  1913. #if defined(__APPLE__) && !defined(__Userspace__)
  1914. SCTP_SOCKET_UNLOCK(so, 1);
  1915. #endif
  1916. }
  1917. /* now is it in the mapping array of what we have accepted? */
  1918. if (chk_type == SCTP_DATA) {
  1919. if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
  1920. SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
  1921. /* Nope not in the valid range dump it */
  1922. dump_packet:
  1923. sctp_set_rwnd(stcb, asoc);
  1924. if ((asoc->cnt_on_all_streams +
  1925. asoc->cnt_on_reasm_queue +
  1926. asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
  1927. SCTP_STAT_INCR(sctps_datadropchklmt);
  1928. } else {
  1929. SCTP_STAT_INCR(sctps_datadroprwnd);
  1930. }
  1931. *break_flag = 1;
  1932. return (0);
  1933. }
  1934. } else {
  1935. if (control == NULL) {
  1936. goto dump_packet;
  1937. }
  1938. if (SCTP_TSN_GT(fsn, control->top_fsn)) {
  1939. goto dump_packet;
  1940. }
  1941. }
  1942. }
  1943. #ifdef SCTP_ASOCLOG_OF_TSNS
  1944. SCTP_TCB_LOCK_ASSERT(stcb);
  1945. if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
  1946. asoc->tsn_in_at = 0;
  1947. asoc->tsn_in_wrapped = 1;
  1948. }
  1949. asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
  1950. asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
  1951. asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
  1952. asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
  1953. asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
  1954. asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
  1955. asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
  1956. asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
  1957. asoc->tsn_in_at++;
  1958. #endif
  1959. /*
  1960. * Before we continue lets validate that we are not being fooled by
  1961. * an evil attacker. We can only have Nk chunks based on our TSN
  1962. * spread allowed by the mapping array N * 8 bits, so there is no
  1963. * way our stream sequence numbers could have wrapped. We of course
  1964. * only validate the FIRST fragment so the bit must be set.
  1965. */
  1966. if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
  1967. (TAILQ_EMPTY(&asoc->resetHead)) &&
  1968. (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
  1969. SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
  1970. /* The incoming sseq is behind where we last delivered? */
  1971. SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
  1972. mid, asoc->strmin[sid].last_mid_delivered);
  1973. if (asoc->idata_supported) {
  1974. SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
  1975. asoc->strmin[sid].last_mid_delivered,
  1976. tsn,
  1977. sid,
  1978. mid);
  1979. } else {
  1980. SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
  1981. (uint16_t)asoc->strmin[sid].last_mid_delivered,
  1982. tsn,
  1983. sid,
  1984. (uint16_t)mid);
  1985. }
  1986. op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  1987. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
  1988. sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  1989. *abort_flag = 1;
  1990. return (0);
  1991. }
  1992. if (chk_type == SCTP_IDATA) {
  1993. the_len = (chk_length - sizeof(struct sctp_idata_chunk));
  1994. } else {
  1995. the_len = (chk_length - sizeof(struct sctp_data_chunk));
  1996. }
  1997. if (last_chunk == 0) {
  1998. if (chk_type == SCTP_IDATA) {
  1999. dmbuf = SCTP_M_COPYM(*m,
  2000. (offset + sizeof(struct sctp_idata_chunk)),
  2001. the_len, M_NOWAIT);
  2002. } else {
  2003. dmbuf = SCTP_M_COPYM(*m,
  2004. (offset + sizeof(struct sctp_data_chunk)),
  2005. the_len, M_NOWAIT);
  2006. }
  2007. #ifdef SCTP_MBUF_LOGGING
  2008. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
  2009. sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
  2010. }
  2011. #endif
  2012. } else {
  2013. /* We can steal the last chunk */
  2014. int l_len;
  2015. dmbuf = *m;
  2016. /* lop off the top part */
  2017. if (chk_type == SCTP_IDATA) {
  2018. m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
  2019. } else {
  2020. m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
  2021. }
  2022. if (SCTP_BUF_NEXT(dmbuf) == NULL) {
  2023. l_len = SCTP_BUF_LEN(dmbuf);
  2024. } else {
  2025. /* need to count up the size hopefully
  2026. * does not hit this to often :-0
  2027. */
  2028. struct mbuf *lat;
  2029. l_len = 0;
  2030. for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
  2031. l_len += SCTP_BUF_LEN(lat);
  2032. }
  2033. }
  2034. if (l_len > the_len) {
  2035. /* Trim the end round bytes off too */
  2036. m_adj(dmbuf, -(l_len - the_len));
  2037. }
  2038. }
  2039. if (dmbuf == NULL) {
  2040. SCTP_STAT_INCR(sctps_nomem);
  2041. return (0);
  2042. }
  2043. /*
  2044. * Now no matter what, we need a control, get one
  2045. * if we don't have one (we may have gotten it
  2046. * above when we found the message was fragmented
  2047. */
  2048. if (control == NULL) {
  2049. sctp_alloc_a_readq(stcb, control);
  2050. sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
  2051. ppid,
  2052. sid,
  2053. chk_flags,
  2054. NULL, fsn, mid);
  2055. if (control == NULL) {
  2056. SCTP_STAT_INCR(sctps_nomem);
  2057. return (0);
  2058. }
  2059. if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
  2060. struct mbuf *mm;
  2061. control->data = dmbuf;
  2062. control->tail_mbuf = NULL;
  2063. for (mm = control->data; mm; mm = mm->m_next) {
  2064. control->length += SCTP_BUF_LEN(mm);
  2065. if (SCTP_BUF_NEXT(mm) == NULL) {
  2066. control->tail_mbuf = mm;
  2067. }
  2068. }
  2069. control->end_added = 1;
  2070. control->last_frag_seen = 1;
  2071. control->first_frag_seen = 1;
  2072. control->fsn_included = fsn;
  2073. control->top_fsn = fsn;
  2074. }
  2075. created_control = 1;
  2076. }
  2077. SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
  2078. chk_flags, ordered, mid, control);
  2079. if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
  2080. TAILQ_EMPTY(&asoc->resetHead) &&
  2081. ((ordered == 0) ||
  2082. (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
  2083. TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
  2084. /* Candidate for express delivery */
  2085. /*
  2086. * Its not fragmented, No PD-API is up, Nothing in the
  2087. * delivery queue, Its un-ordered OR ordered and the next to
  2088. * deliver AND nothing else is stuck on the stream queue,
  2089. * And there is room for it in the socket buffer. Lets just
  2090. * stuff it up the buffer....
  2091. */
  2092. SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
  2093. if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
  2094. asoc->highest_tsn_inside_nr_map = tsn;
  2095. }
  2096. SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
  2097. control, mid);
  2098. sctp_add_to_readq(stcb->sctp_ep, stcb,
  2099. control, &stcb->sctp_socket->so_rcv,
  2100. 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  2101. if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
  2102. /* for ordered, bump what we delivered */
  2103. asoc->strmin[sid].last_mid_delivered++;
  2104. }
  2105. SCTP_STAT_INCR(sctps_recvexpress);
  2106. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
  2107. sctp_log_strm_del_alt(stcb, tsn, mid, sid,
  2108. SCTP_STR_LOG_FROM_EXPRS_DEL);
  2109. }
  2110. control = NULL;
  2111. goto finish_express_del;
  2112. }
  2113. /* Now will we need a chunk too? */
  2114. if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
  2115. sctp_alloc_a_chunk(stcb, chk);
  2116. if (chk == NULL) {
  2117. /* No memory so we drop the chunk */
  2118. SCTP_STAT_INCR(sctps_nomem);
  2119. if (last_chunk == 0) {
  2120. /* we copied it, free the copy */
  2121. sctp_m_freem(dmbuf);
  2122. }
  2123. return (0);
  2124. }
  2125. chk->rec.data.tsn = tsn;
  2126. chk->no_fr_allowed = 0;
  2127. chk->rec.data.fsn = fsn;
  2128. chk->rec.data.mid = mid;
  2129. chk->rec.data.sid = sid;
  2130. chk->rec.data.ppid = ppid;
  2131. chk->rec.data.context = stcb->asoc.context;
  2132. chk->rec.data.doing_fast_retransmit = 0;
  2133. chk->rec.data.rcv_flags = chk_flags;
  2134. chk->asoc = asoc;
  2135. chk->send_size = the_len;
  2136. chk->whoTo = net;
  2137. SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
  2138. chk,
  2139. control, mid);
  2140. atomic_add_int(&net->ref_count, 1);
  2141. chk->data = dmbuf;
  2142. }
  2143. /* Set the appropriate TSN mark */
  2144. if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
  2145. SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
  2146. if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
  2147. asoc->highest_tsn_inside_nr_map = tsn;
  2148. }
  2149. } else {
  2150. SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
  2151. if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
  2152. asoc->highest_tsn_inside_map = tsn;
  2153. }
  2154. }
  2155. /* Now is it complete (i.e. not fragmented)? */
  2156. if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
  2157. /*
  2158. * Special check for when streams are resetting. We
  2159. * could be more smart about this and check the
  2160. * actual stream to see if it is not being reset..
  2161. * that way we would not create a HOLB when amongst
  2162. * streams being reset and those not being reset.
  2163. *
  2164. */
  2165. if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
  2166. SCTP_TSN_GT(tsn, liste->tsn)) {
  2167. /*
  2168. * yep its past where we need to reset... go
  2169. * ahead and queue it.
  2170. */
  2171. if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
  2172. /* first one on */
  2173. TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
  2174. } else {
  2175. struct sctp_queued_to_read *lcontrol, *nlcontrol;
  2176. unsigned char inserted = 0;
  2177. TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
  2178. if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
  2179. continue;
  2180. } else {
  2181. /* found it */
  2182. TAILQ_INSERT_BEFORE(lcontrol, control, next);
  2183. inserted = 1;
  2184. break;
  2185. }
  2186. }
  2187. if (inserted == 0) {
  2188. /*
  2189. * must be put at end, use
  2190. * prevP (all setup from
  2191. * loop) to setup nextP.
  2192. */
  2193. TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
  2194. }
  2195. }
  2196. goto finish_express_del;
  2197. }
  2198. if (chk_flags & SCTP_DATA_UNORDERED) {
  2199. /* queue directly into socket buffer */
  2200. SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
  2201. control, mid);
  2202. sctp_mark_non_revokable(asoc, control->sinfo_tsn);
  2203. sctp_add_to_readq(stcb->sctp_ep, stcb,
  2204. control,
  2205. &stcb->sctp_socket->so_rcv, 1,
  2206. SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  2207. } else {
  2208. SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
  2209. mid);
  2210. sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
  2211. if (*abort_flag) {
  2212. if (last_chunk) {
  2213. *m = NULL;
  2214. }
  2215. return (0);
  2216. }
  2217. }
  2218. goto finish_express_del;
  2219. }
  2220. /* If we reach here its a reassembly */
  2221. need_reasm_check = 1;
  2222. SCTPDBG(SCTP_DEBUG_XXX,
  2223. "Queue data to stream for reasm control: %p MID: %u\n",
  2224. control, mid);
  2225. sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
  2226. if (*abort_flag) {
  2227. /*
  2228. * the assoc is now gone and chk was put onto the
  2229. * reasm queue, which has all been freed.
  2230. */
  2231. if (last_chunk) {
  2232. *m = NULL;
  2233. }
  2234. return (0);
  2235. }
  2236. finish_express_del:
  2237. /* Here we tidy up things */
  2238. if (tsn == (asoc->cumulative_tsn + 1)) {
  2239. /* Update cum-ack */
  2240. asoc->cumulative_tsn = tsn;
  2241. }
  2242. if (last_chunk) {
  2243. *m = NULL;
  2244. }
  2245. if (ordered) {
  2246. SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
  2247. } else {
  2248. SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
  2249. }
  2250. SCTP_STAT_INCR(sctps_recvdata);
  2251. /* Set it present please */
  2252. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
  2253. sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
  2254. }
  2255. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
  2256. sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
  2257. asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
  2258. }
  2259. if (need_reasm_check) {
  2260. (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
  2261. need_reasm_check = 0;
  2262. }
  2263. /* check the special flag for stream resets */
  2264. if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
  2265. SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
  2266. /*
  2267. * we have finished working through the backlogged TSN's now
  2268. * time to reset streams. 1: call reset function. 2: free
  2269. * pending_reply space 3: distribute any chunks in
  2270. * pending_reply_queue.
  2271. */
  2272. sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
  2273. TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
  2274. sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
  2275. SCTP_FREE(liste, SCTP_M_STRESET);
  2276. /*sa_ignore FREED_MEMORY*/
  2277. liste = TAILQ_FIRST(&asoc->resetHead);
  2278. if (TAILQ_EMPTY(&asoc->resetHead)) {
  2279. /* All can be removed */
  2280. TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
  2281. TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
  2282. strm = &asoc->strmin[control->sinfo_stream];
  2283. sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
  2284. if (*abort_flag) {
  2285. return (0);
  2286. }
  2287. if (need_reasm_check) {
  2288. (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
  2289. need_reasm_check = 0;
  2290. }
  2291. }
  2292. } else {
  2293. TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
  2294. if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
  2295. break;
  2296. }
  2297. /*
  2298. * if control->sinfo_tsn is <= liste->tsn we can
  2299. * process it which is the NOT of
  2300. * control->sinfo_tsn > liste->tsn
  2301. */
  2302. TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
  2303. strm = &asoc->strmin[control->sinfo_stream];
  2304. sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
  2305. if (*abort_flag) {
  2306. return (0);
  2307. }
  2308. if (need_reasm_check) {
  2309. (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
  2310. need_reasm_check = 0;
  2311. }
  2312. }
  2313. }
  2314. }
  2315. return (1);
  2316. }
  2317. static const int8_t sctp_map_lookup_tab[256] = {
  2318. 0, 1, 0, 2, 0, 1, 0, 3,
  2319. 0, 1, 0, 2, 0, 1, 0, 4,
  2320. 0, 1, 0, 2, 0, 1, 0, 3,
  2321. 0, 1, 0, 2, 0, 1, 0, 5,
  2322. 0, 1, 0, 2, 0, 1, 0, 3,
  2323. 0, 1, 0, 2, 0, 1, 0, 4,
  2324. 0, 1, 0, 2, 0, 1, 0, 3,
  2325. 0, 1, 0, 2, 0, 1, 0, 6,
  2326. 0, 1, 0, 2, 0, 1, 0, 3,
  2327. 0, 1, 0, 2, 0, 1, 0, 4,
  2328. 0, 1, 0, 2, 0, 1, 0, 3,
  2329. 0, 1, 0, 2, 0, 1, 0, 5,
  2330. 0, 1, 0, 2, 0, 1, 0, 3,
  2331. 0, 1, 0, 2, 0, 1, 0, 4,
  2332. 0, 1, 0, 2, 0, 1, 0, 3,
  2333. 0, 1, 0, 2, 0, 1, 0, 7,
  2334. 0, 1, 0, 2, 0, 1, 0, 3,
  2335. 0, 1, 0, 2, 0, 1, 0, 4,
  2336. 0, 1, 0, 2, 0, 1, 0, 3,
  2337. 0, 1, 0, 2, 0, 1, 0, 5,
  2338. 0, 1, 0, 2, 0, 1, 0, 3,
  2339. 0, 1, 0, 2, 0, 1, 0, 4,
  2340. 0, 1, 0, 2, 0, 1, 0, 3,
  2341. 0, 1, 0, 2, 0, 1, 0, 6,
  2342. 0, 1, 0, 2, 0, 1, 0, 3,
  2343. 0, 1, 0, 2, 0, 1, 0, 4,
  2344. 0, 1, 0, 2, 0, 1, 0, 3,
  2345. 0, 1, 0, 2, 0, 1, 0, 5,
  2346. 0, 1, 0, 2, 0, 1, 0, 3,
  2347. 0, 1, 0, 2, 0, 1, 0, 4,
  2348. 0, 1, 0, 2, 0, 1, 0, 3,
  2349. 0, 1, 0, 2, 0, 1, 0, 8
  2350. };
  2351. void
  2352. sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
  2353. {
  2354. /*
  2355. * Now we also need to check the mapping array in a couple of ways.
  2356. * 1) Did we move the cum-ack point?
  2357. *
  2358. * When you first glance at this you might think
  2359. * that all entries that make up the position
  2360. * of the cum-ack would be in the nr-mapping array
  2361. * only.. i.e. things up to the cum-ack are always
  2362. * deliverable. Thats true with one exception, when
  2363. * its a fragmented message we may not deliver the data
  2364. * until some threshold (or all of it) is in place. So
  2365. * we must OR the nr_mapping_array and mapping_array to
  2366. * get a true picture of the cum-ack.
  2367. */
  2368. struct sctp_association *asoc;
  2369. int at;
  2370. uint8_t val;
  2371. int slide_from, slide_end, lgap, distance;
  2372. uint32_t old_cumack, old_base, old_highest, highest_tsn;
  2373. asoc = &stcb->asoc;
  2374. old_cumack = asoc->cumulative_tsn;
  2375. old_base = asoc->mapping_array_base_tsn;
  2376. old_highest = asoc->highest_tsn_inside_map;
  2377. /*
  2378. * We could probably improve this a small bit by calculating the
  2379. * offset of the current cum-ack as the starting point.
  2380. */
  2381. at = 0;
  2382. for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
  2383. val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
  2384. if (val == 0xff) {
  2385. at += 8;
  2386. } else {
  2387. /* there is a 0 bit */
  2388. at += sctp_map_lookup_tab[val];
  2389. break;
  2390. }
  2391. }
  2392. asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
  2393. if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
  2394. SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
  2395. #ifdef INVARIANTS
  2396. panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
  2397. asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
  2398. #else
  2399. SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
  2400. asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
  2401. sctp_print_mapping_array(asoc);
  2402. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
  2403. sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
  2404. }
  2405. asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
  2406. asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
  2407. #endif
  2408. }
  2409. if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
  2410. highest_tsn = asoc->highest_tsn_inside_nr_map;
  2411. } else {
  2412. highest_tsn = asoc->highest_tsn_inside_map;
  2413. }
  2414. if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
  2415. /* The complete array was completed by a single FR */
  2416. /* highest becomes the cum-ack */
  2417. int clr;
  2418. #ifdef INVARIANTS
  2419. unsigned int i;
  2420. #endif
  2421. /* clear the array */
  2422. clr = ((at+7) >> 3);
  2423. if (clr > asoc->mapping_array_size) {
  2424. clr = asoc->mapping_array_size;
  2425. }
  2426. memset(asoc->mapping_array, 0, clr);
  2427. memset(asoc->nr_mapping_array, 0, clr);
  2428. #ifdef INVARIANTS
  2429. for (i = 0; i < asoc->mapping_array_size; i++) {
  2430. if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
  2431. SCTP_PRINTF("Error Mapping array's not clean at clear\n");
  2432. sctp_print_mapping_array(asoc);
  2433. }
  2434. }
  2435. #endif
  2436. asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
  2437. asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
  2438. } else if (at >= 8) {
  2439. /* we can slide the mapping array down */
  2440. /* slide_from holds where we hit the first NON 0xff byte */
  2441. /*
  2442. * now calculate the ceiling of the move using our highest
  2443. * TSN value
  2444. */
  2445. SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
  2446. slide_end = (lgap >> 3);
  2447. if (slide_end < slide_from) {
  2448. sctp_print_mapping_array(asoc);
  2449. #ifdef INVARIANTS
  2450. panic("impossible slide");
  2451. #else
  2452. SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
  2453. lgap, slide_end, slide_from, at);
  2454. return;
  2455. #endif
  2456. }
  2457. if (slide_end > asoc->mapping_array_size) {
  2458. #ifdef INVARIANTS
  2459. panic("would overrun buffer");
  2460. #else
  2461. SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
  2462. asoc->mapping_array_size, slide_end);
  2463. slide_end = asoc->mapping_array_size;
  2464. #endif
  2465. }
  2466. distance = (slide_end - slide_from) + 1;
  2467. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
  2468. sctp_log_map(old_base, old_cumack, old_highest,
  2469. SCTP_MAP_PREPARE_SLIDE);
  2470. sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
  2471. (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
  2472. }
  2473. if (distance + slide_from > asoc->mapping_array_size ||
  2474. distance < 0) {
  2475. /*
  2476. * Here we do NOT slide forward the array so that
  2477. * hopefully when more data comes in to fill it up
  2478. * we will be able to slide it forward. Really I
  2479. * don't think this should happen :-0
  2480. */
  2481. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
  2482. sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
  2483. (uint32_t) asoc->mapping_array_size,
  2484. SCTP_MAP_SLIDE_NONE);
  2485. }
  2486. } else {
  2487. int ii;
  2488. for (ii = 0; ii < distance; ii++) {
  2489. asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
  2490. asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
  2491. }
  2492. for (ii = distance; ii < asoc->mapping_array_size; ii++) {
  2493. asoc->mapping_array[ii] = 0;
  2494. asoc->nr_mapping_array[ii] = 0;
  2495. }
  2496. if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
  2497. asoc->highest_tsn_inside_map += (slide_from << 3);
  2498. }
  2499. if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
  2500. asoc->highest_tsn_inside_nr_map += (slide_from << 3);
  2501. }
  2502. asoc->mapping_array_base_tsn += (slide_from << 3);
  2503. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
  2504. sctp_log_map(asoc->mapping_array_base_tsn,
  2505. asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
  2506. SCTP_MAP_SLIDE_RESULT);
  2507. }
  2508. }
  2509. }
  2510. }
  2511. void
  2512. sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
  2513. {
  2514. struct sctp_association *asoc;
  2515. uint32_t highest_tsn;
  2516. int is_a_gap;
  2517. sctp_slide_mapping_arrays(stcb);
  2518. asoc = &stcb->asoc;
  2519. if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
  2520. highest_tsn = asoc->highest_tsn_inside_nr_map;
  2521. } else {
  2522. highest_tsn = asoc->highest_tsn_inside_map;
  2523. }
  2524. /* Is there a gap now? */
  2525. is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
  2526. /*
  2527. * Now we need to see if we need to queue a sack or just start the
  2528. * timer (if allowed).
  2529. */
  2530. if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
  2531. /*
  2532. * Ok special case, in SHUTDOWN-SENT case. here we
  2533. * maker sure SACK timer is off and instead send a
  2534. * SHUTDOWN and a SACK
  2535. */
  2536. if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
  2537. sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
  2538. stcb->sctp_ep, stcb, NULL,
  2539. SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
  2540. }
  2541. sctp_send_shutdown(stcb,
  2542. ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
  2543. if (is_a_gap) {
  2544. sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
  2545. }
  2546. } else {
  2547. /*
  2548. * CMT DAC algorithm: increase number of packets
  2549. * received since last ack
  2550. */
  2551. stcb->asoc.cmt_dac_pkts_rcvd++;
  2552. if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */
  2553. ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
  2554. * longer is one */
  2555. (stcb->asoc.numduptsns) || /* we have dup's */
  2556. (is_a_gap) || /* is still a gap */
  2557. (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
  2558. (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */
  2559. if ((stcb->asoc.sctp_cmt_on_off > 0) &&
  2560. (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
  2561. (stcb->asoc.send_sack == 0) &&
  2562. (stcb->asoc.numduptsns == 0) &&
  2563. (stcb->asoc.delayed_ack) &&
  2564. (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
  2565. /*
  2566. * CMT DAC algorithm: With CMT,
  2567. * delay acks even in the face of
  2568. * reordering. Therefore, if acks
  2569. * that do not have to be sent
  2570. * because of the above reasons,
  2571. * will be delayed. That is, acks
  2572. * that would have been sent due to
  2573. * gap reports will be delayed with
  2574. * DAC. Start the delayed ack timer.
  2575. */
  2576. sctp_timer_start(SCTP_TIMER_TYPE_RECV,
  2577. stcb->sctp_ep, stcb, NULL);
  2578. } else {
  2579. /*
  2580. * Ok we must build a SACK since the
  2581. * timer is pending, we got our
  2582. * first packet OR there are gaps or
  2583. * duplicates.
  2584. */
  2585. sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
  2586. SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
  2587. sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
  2588. }
  2589. } else {
  2590. if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
  2591. sctp_timer_start(SCTP_TIMER_TYPE_RECV,
  2592. stcb->sctp_ep, stcb, NULL);
  2593. }
  2594. }
  2595. }
  2596. }
  2597. int
  2598. sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
  2599. struct sctp_inpcb *inp, struct sctp_tcb *stcb,
  2600. struct sctp_nets *net, uint32_t *high_tsn)
  2601. {
  2602. struct sctp_chunkhdr *ch, chunk_buf;
  2603. struct sctp_association *asoc;
  2604. int num_chunks = 0; /* number of control chunks processed */
  2605. int stop_proc = 0;
  2606. int break_flag, last_chunk;
  2607. int abort_flag = 0, was_a_gap;
  2608. struct mbuf *m;
  2609. uint32_t highest_tsn;
  2610. uint16_t chk_length;
  2611. /* set the rwnd */
  2612. sctp_set_rwnd(stcb, &stcb->asoc);
  2613. m = *mm;
  2614. SCTP_TCB_LOCK_ASSERT(stcb);
  2615. asoc = &stcb->asoc;
  2616. if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
  2617. highest_tsn = asoc->highest_tsn_inside_nr_map;
  2618. } else {
  2619. highest_tsn = asoc->highest_tsn_inside_map;
  2620. }
  2621. was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
  2622. /*
  2623. * setup where we got the last DATA packet from for any SACK that
  2624. * may need to go out. Don't bump the net. This is done ONLY when a
  2625. * chunk is assigned.
  2626. */
  2627. asoc->last_data_chunk_from = net;
  2628. /*-
  2629. * Now before we proceed we must figure out if this is a wasted
  2630. * cluster... i.e. it is a small packet sent in and yet the driver
  2631. * underneath allocated a full cluster for it. If so we must copy it
  2632. * to a smaller mbuf and free up the cluster mbuf. This will help
  2633. * with cluster starvation.
  2634. */
  2635. if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
  2636. /* we only handle mbufs that are singletons.. not chains */
  2637. m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
  2638. if (m) {
  2639. /* ok lets see if we can copy the data up */
  2640. caddr_t *from, *to;
  2641. /* get the pointers and copy */
  2642. to = mtod(m, caddr_t *);
  2643. from = mtod((*mm), caddr_t *);
  2644. memcpy(to, from, SCTP_BUF_LEN((*mm)));
  2645. /* copy the length and free up the old */
  2646. SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
  2647. sctp_m_freem(*mm);
  2648. /* success, back copy */
  2649. *mm = m;
  2650. } else {
  2651. /* We are in trouble in the mbuf world .. yikes */
  2652. m = *mm;
  2653. }
  2654. }
  2655. /* get pointer to the first chunk header */
  2656. ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
  2657. sizeof(struct sctp_chunkhdr),
  2658. (uint8_t *)&chunk_buf);
  2659. if (ch == NULL) {
  2660. return (1);
  2661. }
  2662. /*
  2663. * process all DATA chunks...
  2664. */
  2665. *high_tsn = asoc->cumulative_tsn;
  2666. break_flag = 0;
  2667. asoc->data_pkts_seen++;
  2668. while (stop_proc == 0) {
  2669. /* validate chunk length */
  2670. chk_length = ntohs(ch->chunk_length);
  2671. if (length - *offset < chk_length) {
  2672. /* all done, mutulated chunk */
  2673. stop_proc = 1;
  2674. continue;
  2675. }
  2676. if ((asoc->idata_supported == 1) &&
  2677. (ch->chunk_type == SCTP_DATA)) {
  2678. struct mbuf *op_err;
  2679. char msg[SCTP_DIAG_INFO_LEN];
  2680. SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
  2681. op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  2682. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
  2683. sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  2684. return (2);
  2685. }
  2686. if ((asoc->idata_supported == 0) &&
  2687. (ch->chunk_type == SCTP_IDATA)) {
  2688. struct mbuf *op_err;
  2689. char msg[SCTP_DIAG_INFO_LEN];
  2690. SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
  2691. op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  2692. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
  2693. sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  2694. return (2);
  2695. }
  2696. if ((ch->chunk_type == SCTP_DATA) ||
  2697. (ch->chunk_type == SCTP_IDATA)) {
  2698. uint16_t clen;
  2699. if (ch->chunk_type == SCTP_DATA) {
  2700. clen = sizeof(struct sctp_data_chunk);
  2701. } else {
  2702. clen = sizeof(struct sctp_idata_chunk);
  2703. }
  2704. if (chk_length < clen) {
  2705. /*
  2706. * Need to send an abort since we had a
  2707. * invalid data chunk.
  2708. */
  2709. struct mbuf *op_err;
  2710. char msg[SCTP_DIAG_INFO_LEN];
  2711. SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
  2712. ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
  2713. chk_length);
  2714. op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  2715. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
  2716. sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  2717. return (2);
  2718. }
  2719. #ifdef SCTP_AUDITING_ENABLED
  2720. sctp_audit_log(0xB1, 0);
  2721. #endif
  2722. if (SCTP_SIZE32(chk_length) == (length - *offset)) {
  2723. last_chunk = 1;
  2724. } else {
  2725. last_chunk = 0;
  2726. }
  2727. if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
  2728. chk_length, net, high_tsn, &abort_flag, &break_flag,
  2729. last_chunk, ch->chunk_type)) {
  2730. num_chunks++;
  2731. }
  2732. if (abort_flag)
  2733. return (2);
  2734. if (break_flag) {
  2735. /*
  2736. * Set because of out of rwnd space and no
  2737. * drop rep space left.
  2738. */
  2739. stop_proc = 1;
  2740. continue;
  2741. }
  2742. } else {
  2743. /* not a data chunk in the data region */
  2744. switch (ch->chunk_type) {
  2745. case SCTP_INITIATION:
  2746. case SCTP_INITIATION_ACK:
  2747. case SCTP_SELECTIVE_ACK:
  2748. case SCTP_NR_SELECTIVE_ACK:
  2749. case SCTP_HEARTBEAT_REQUEST:
  2750. case SCTP_HEARTBEAT_ACK:
  2751. case SCTP_ABORT_ASSOCIATION:
  2752. case SCTP_SHUTDOWN:
  2753. case SCTP_SHUTDOWN_ACK:
  2754. case SCTP_OPERATION_ERROR:
  2755. case SCTP_COOKIE_ECHO:
  2756. case SCTP_COOKIE_ACK:
  2757. case SCTP_ECN_ECHO:
  2758. case SCTP_ECN_CWR:
  2759. case SCTP_SHUTDOWN_COMPLETE:
  2760. case SCTP_AUTHENTICATION:
  2761. case SCTP_ASCONF_ACK:
  2762. case SCTP_PACKET_DROPPED:
  2763. case SCTP_STREAM_RESET:
  2764. case SCTP_FORWARD_CUM_TSN:
  2765. case SCTP_ASCONF:
  2766. {
  2767. /*
  2768. * Now, what do we do with KNOWN chunks that
  2769. * are NOT in the right place?
  2770. *
  2771. * For now, I do nothing but ignore them. We
  2772. * may later want to add sysctl stuff to
  2773. * switch out and do either an ABORT() or
  2774. * possibly process them.
  2775. */
  2776. struct mbuf *op_err;
  2777. char msg[SCTP_DIAG_INFO_LEN];
  2778. SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
  2779. ch->chunk_type);
  2780. op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  2781. sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  2782. return (2);
  2783. }
  2784. default:
  2785. /*
  2786. * Unknown chunk type: use bit rules after
  2787. * checking length
  2788. */
  2789. if (chk_length < sizeof(struct sctp_chunkhdr)) {
  2790. /*
  2791. * Need to send an abort since we had a
  2792. * invalid chunk.
  2793. */
  2794. struct mbuf *op_err;
  2795. char msg[SCTP_DIAG_INFO_LEN];
  2796. SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
  2797. op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  2798. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
  2799. sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  2800. return (2);
  2801. }
  2802. if (ch->chunk_type & 0x40) {
  2803. /* Add a error report to the queue */
  2804. struct mbuf *op_err;
  2805. struct sctp_gen_error_cause *cause;
  2806. op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
  2807. 0, M_NOWAIT, 1, MT_DATA);
  2808. if (op_err != NULL) {
  2809. cause = mtod(op_err, struct sctp_gen_error_cause *);
  2810. cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
  2811. cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
  2812. SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
  2813. SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
  2814. if (SCTP_BUF_NEXT(op_err) != NULL) {
  2815. sctp_queue_op_err(stcb, op_err);
  2816. } else {
  2817. sctp_m_freem(op_err);
  2818. }
  2819. }
  2820. }
  2821. if ((ch->chunk_type & 0x80) == 0) {
  2822. /* discard the rest of this packet */
  2823. stop_proc = 1;
  2824. } /* else skip this bad chunk and
  2825. * continue... */
  2826. break;
  2827. } /* switch of chunk type */
  2828. }
  2829. *offset += SCTP_SIZE32(chk_length);
  2830. if ((*offset >= length) || stop_proc) {
  2831. /* no more data left in the mbuf chain */
  2832. stop_proc = 1;
  2833. continue;
  2834. }
  2835. ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
  2836. sizeof(struct sctp_chunkhdr),
  2837. (uint8_t *)&chunk_buf);
  2838. if (ch == NULL) {
  2839. *offset = length;
  2840. stop_proc = 1;
  2841. continue;
  2842. }
  2843. }
  2844. if (break_flag) {
  2845. /*
  2846. * we need to report rwnd overrun drops.
  2847. */
  2848. sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
  2849. }
  2850. if (num_chunks) {
  2851. /*
  2852. * Did we get data, if so update the time for auto-close and
  2853. * give peer credit for being alive.
  2854. */
  2855. SCTP_STAT_INCR(sctps_recvpktwithdata);
  2856. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
  2857. sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
  2858. stcb->asoc.overall_error_count,
  2859. 0,
  2860. SCTP_FROM_SCTP_INDATA,
  2861. __LINE__);
  2862. }
  2863. stcb->asoc.overall_error_count = 0;
  2864. (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
  2865. }
  2866. /* now service all of the reassm queue if needed */
  2867. if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
  2868. /* Assure that we ack right away */
  2869. stcb->asoc.send_sack = 1;
  2870. }
  2871. /* Start a sack timer or QUEUE a SACK for sending */
  2872. sctp_sack_check(stcb, was_a_gap);
  2873. return (0);
  2874. }
  2875. static int
  2876. sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
  2877. uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
  2878. int *num_frs,
  2879. uint32_t *biggest_newly_acked_tsn,
  2880. uint32_t *this_sack_lowest_newack,
  2881. int *rto_ok)
  2882. {
  2883. struct sctp_tmit_chunk *tp1;
  2884. unsigned int theTSN;
  2885. int j, wake_him = 0, circled = 0;
  2886. /* Recover the tp1 we last saw */
  2887. tp1 = *p_tp1;
  2888. if (tp1 == NULL) {
  2889. tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
  2890. }
  2891. for (j = frag_strt; j <= frag_end; j++) {
  2892. theTSN = j + last_tsn;
  2893. while (tp1) {
  2894. if (tp1->rec.data.doing_fast_retransmit)
  2895. (*num_frs) += 1;
  2896. /*-
  2897. * CMT: CUCv2 algorithm. For each TSN being
  2898. * processed from the sent queue, track the
  2899. * next expected pseudo-cumack, or
  2900. * rtx_pseudo_cumack, if required. Separate
  2901. * cumack trackers for first transmissions,
  2902. * and retransmissions.
  2903. */
  2904. if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
  2905. (tp1->whoTo->find_pseudo_cumack == 1) &&
  2906. (tp1->snd_count == 1)) {
  2907. tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
  2908. tp1->whoTo->find_pseudo_cumack = 0;
  2909. }
  2910. if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
  2911. (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
  2912. (tp1->snd_count > 1)) {
  2913. tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
  2914. tp1->whoTo->find_rtx_pseudo_cumack = 0;
  2915. }
  2916. if (tp1->rec.data.tsn == theTSN) {
  2917. if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
  2918. /*-
  2919. * must be held until
  2920. * cum-ack passes
  2921. */
  2922. if (tp1->sent < SCTP_DATAGRAM_RESEND) {
  2923. /*-
  2924. * If it is less than RESEND, it is
  2925. * now no-longer in flight.
  2926. * Higher values may already be set
  2927. * via previous Gap Ack Blocks...
  2928. * i.e. ACKED or RESEND.
  2929. */
  2930. if (SCTP_TSN_GT(tp1->rec.data.tsn,
  2931. *biggest_newly_acked_tsn)) {
  2932. *biggest_newly_acked_tsn = tp1->rec.data.tsn;
  2933. }
  2934. /*-
  2935. * CMT: SFR algo (and HTNA) - set
  2936. * saw_newack to 1 for dest being
  2937. * newly acked. update
  2938. * this_sack_highest_newack if
  2939. * appropriate.
  2940. */
  2941. if (tp1->rec.data.chunk_was_revoked == 0)
  2942. tp1->whoTo->saw_newack = 1;
  2943. if (SCTP_TSN_GT(tp1->rec.data.tsn,
  2944. tp1->whoTo->this_sack_highest_newack)) {
  2945. tp1->whoTo->this_sack_highest_newack =
  2946. tp1->rec.data.tsn;
  2947. }
  2948. /*-
  2949. * CMT DAC algo: also update
  2950. * this_sack_lowest_newack
  2951. */
  2952. if (*this_sack_lowest_newack == 0) {
  2953. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
  2954. sctp_log_sack(*this_sack_lowest_newack,
  2955. last_tsn,
  2956. tp1->rec.data.tsn,
  2957. 0,
  2958. 0,
  2959. SCTP_LOG_TSN_ACKED);
  2960. }
  2961. *this_sack_lowest_newack = tp1->rec.data.tsn;
  2962. }
  2963. /*-
  2964. * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
  2965. * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
  2966. * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
  2967. * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
  2968. * Separate pseudo_cumack trackers for first transmissions and
  2969. * retransmissions.
  2970. */
  2971. if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
  2972. if (tp1->rec.data.chunk_was_revoked == 0) {
  2973. tp1->whoTo->new_pseudo_cumack = 1;
  2974. }
  2975. tp1->whoTo->find_pseudo_cumack = 1;
  2976. }
  2977. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
  2978. sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
  2979. }
  2980. if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
  2981. if (tp1->rec.data.chunk_was_revoked == 0) {
  2982. tp1->whoTo->new_pseudo_cumack = 1;
  2983. }
  2984. tp1->whoTo->find_rtx_pseudo_cumack = 1;
  2985. }
  2986. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
  2987. sctp_log_sack(*biggest_newly_acked_tsn,
  2988. last_tsn,
  2989. tp1->rec.data.tsn,
  2990. frag_strt,
  2991. frag_end,
  2992. SCTP_LOG_TSN_ACKED);
  2993. }
  2994. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
  2995. sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
  2996. tp1->whoTo->flight_size,
  2997. tp1->book_size,
  2998. (uint32_t)(uintptr_t)tp1->whoTo,
  2999. tp1->rec.data.tsn);
  3000. }
  3001. sctp_flight_size_decrease(tp1);
  3002. if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
  3003. (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
  3004. tp1);
  3005. }
  3006. sctp_total_flight_decrease(stcb, tp1);
  3007. tp1->whoTo->net_ack += tp1->send_size;
  3008. if (tp1->snd_count < 2) {
  3009. /*-
  3010. * True non-retransmitted chunk
  3011. */
  3012. tp1->whoTo->net_ack2 += tp1->send_size;
  3013. /*-
  3014. * update RTO too ?
  3015. */
  3016. if (tp1->do_rtt) {
  3017. if (*rto_ok &&
  3018. sctp_calculate_rto(stcb,
  3019. &stcb->asoc,
  3020. tp1->whoTo,
  3021. &tp1->sent_rcv_time,
  3022. SCTP_RTT_FROM_DATA)) {
  3023. *rto_ok = 0;
  3024. }
  3025. if (tp1->whoTo->rto_needed == 0) {
  3026. tp1->whoTo->rto_needed = 1;
  3027. }
  3028. tp1->do_rtt = 0;
  3029. }
  3030. }
  3031. }
  3032. if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
  3033. if (SCTP_TSN_GT(tp1->rec.data.tsn,
  3034. stcb->asoc.this_sack_highest_gap)) {
  3035. stcb->asoc.this_sack_highest_gap =
  3036. tp1->rec.data.tsn;
  3037. }
  3038. if (tp1->sent == SCTP_DATAGRAM_RESEND) {
  3039. sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
  3040. #ifdef SCTP_AUDITING_ENABLED
  3041. sctp_audit_log(0xB2,
  3042. (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
  3043. #endif
  3044. }
  3045. }
  3046. /*-
  3047. * All chunks NOT UNSENT fall through here and are marked
  3048. * (leave PR-SCTP ones that are to skip alone though)
  3049. */
  3050. if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
  3051. (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
  3052. tp1->sent = SCTP_DATAGRAM_MARKED;
  3053. }
  3054. if (tp1->rec.data.chunk_was_revoked) {
  3055. /* deflate the cwnd */
  3056. tp1->whoTo->cwnd -= tp1->book_size;
  3057. tp1->rec.data.chunk_was_revoked = 0;
  3058. }
  3059. /* NR Sack code here */
  3060. if (nr_sacking &&
  3061. (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
  3062. if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
  3063. stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
  3064. #ifdef INVARIANTS
  3065. } else {
  3066. panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
  3067. #endif
  3068. }
  3069. if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
  3070. (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
  3071. TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
  3072. stcb->asoc.trigger_reset = 1;
  3073. }
  3074. tp1->sent = SCTP_DATAGRAM_NR_ACKED;
  3075. if (tp1->data) {
  3076. /* sa_ignore NO_NULL_CHK */
  3077. sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
  3078. sctp_m_freem(tp1->data);
  3079. tp1->data = NULL;
  3080. }
  3081. wake_him++;
  3082. }
  3083. }
  3084. break;
  3085. } /* if (tp1->tsn == theTSN) */
  3086. if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
  3087. break;
  3088. }
  3089. tp1 = TAILQ_NEXT(tp1, sctp_next);
  3090. if ((tp1 == NULL) && (circled == 0)) {
  3091. circled++;
  3092. tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
  3093. }
  3094. } /* end while (tp1) */
  3095. if (tp1 == NULL) {
  3096. circled = 0;
  3097. tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
  3098. }
  3099. /* In case the fragments were not in order we must reset */
  3100. } /* end for (j = fragStart */
  3101. *p_tp1 = tp1;
  3102. return (wake_him); /* Return value only used for nr-sack */
  3103. }
  3104. static int
  3105. sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
  3106. uint32_t last_tsn, uint32_t *biggest_tsn_acked,
  3107. uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
  3108. int num_seg, int num_nr_seg, int *rto_ok)
  3109. {
  3110. struct sctp_gap_ack_block *frag, block;
  3111. struct sctp_tmit_chunk *tp1;
  3112. int i;
  3113. int num_frs = 0;
  3114. int chunk_freed;
  3115. int non_revocable;
  3116. uint16_t frag_strt, frag_end, prev_frag_end;
  3117. tp1 = TAILQ_FIRST(&asoc->sent_queue);
  3118. prev_frag_end = 0;
  3119. chunk_freed = 0;
  3120. for (i = 0; i < (num_seg + num_nr_seg); i++) {
  3121. if (i == num_seg) {
  3122. prev_frag_end = 0;
  3123. tp1 = TAILQ_FIRST(&asoc->sent_queue);
  3124. }
  3125. frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
  3126. sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
  3127. *offset += sizeof(block);
  3128. if (frag == NULL) {
  3129. return (chunk_freed);
  3130. }
  3131. frag_strt = ntohs(frag->start);
  3132. frag_end = ntohs(frag->end);
  3133. if (frag_strt > frag_end) {
  3134. /* This gap report is malformed, skip it. */
  3135. continue;
  3136. }
  3137. if (frag_strt <= prev_frag_end) {
  3138. /* This gap report is not in order, so restart. */
  3139. tp1 = TAILQ_FIRST(&asoc->sent_queue);
  3140. }
  3141. if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
  3142. *biggest_tsn_acked = last_tsn + frag_end;
  3143. }
  3144. if (i < num_seg) {
  3145. non_revocable = 0;
  3146. } else {
  3147. non_revocable = 1;
  3148. }
  3149. if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
  3150. non_revocable, &num_frs, biggest_newly_acked_tsn,
  3151. this_sack_lowest_newack, rto_ok)) {
  3152. chunk_freed = 1;
  3153. }
  3154. prev_frag_end = frag_end;
  3155. }
  3156. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
  3157. if (num_frs)
  3158. sctp_log_fr(*biggest_tsn_acked,
  3159. *biggest_newly_acked_tsn,
  3160. last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
  3161. }
  3162. return (chunk_freed);
  3163. }
  3164. static void
  3165. sctp_check_for_revoked(struct sctp_tcb *stcb,
  3166. struct sctp_association *asoc, uint32_t cumack,
  3167. uint32_t biggest_tsn_acked)
  3168. {
  3169. struct sctp_tmit_chunk *tp1;
  3170. TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
  3171. if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
  3172. /*
  3173. * ok this guy is either ACK or MARKED. If it is
  3174. * ACKED it has been previously acked but not this
  3175. * time i.e. revoked. If it is MARKED it was ACK'ed
  3176. * again.
  3177. */
  3178. if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
  3179. break;
  3180. }
  3181. if (tp1->sent == SCTP_DATAGRAM_ACKED) {
  3182. /* it has been revoked */
  3183. tp1->sent = SCTP_DATAGRAM_SENT;
  3184. tp1->rec.data.chunk_was_revoked = 1;
  3185. /* We must add this stuff back in to
  3186. * assure timers and such get started.
  3187. */
  3188. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
  3189. sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
  3190. tp1->whoTo->flight_size,
  3191. tp1->book_size,
  3192. (uint32_t)(uintptr_t)tp1->whoTo,
  3193. tp1->rec.data.tsn);
  3194. }
  3195. sctp_flight_size_increase(tp1);
  3196. sctp_total_flight_increase(stcb, tp1);
  3197. /* We inflate the cwnd to compensate for our
  3198. * artificial inflation of the flight_size.
  3199. */
  3200. tp1->whoTo->cwnd += tp1->book_size;
  3201. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
  3202. sctp_log_sack(asoc->last_acked_seq,
  3203. cumack,
  3204. tp1->rec.data.tsn,
  3205. 0,
  3206. 0,
  3207. SCTP_LOG_TSN_REVOKED);
  3208. }
  3209. } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
  3210. /* it has been re-acked in this SACK */
  3211. tp1->sent = SCTP_DATAGRAM_ACKED;
  3212. }
  3213. }
  3214. if (tp1->sent == SCTP_DATAGRAM_UNSENT)
  3215. break;
  3216. }
  3217. }
  3218. static void
  3219. sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
  3220. uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
  3221. {
  3222. struct sctp_tmit_chunk *tp1;
  3223. int strike_flag = 0;
  3224. struct timeval now;
  3225. uint32_t sending_seq;
  3226. struct sctp_nets *net;
  3227. int num_dests_sacked = 0;
  3228. /*
  3229. * select the sending_seq, this is either the next thing ready to be
  3230. * sent but not transmitted, OR, the next seq we assign.
  3231. */
  3232. tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
  3233. if (tp1 == NULL) {
  3234. sending_seq = asoc->sending_seq;
  3235. } else {
  3236. sending_seq = tp1->rec.data.tsn;
  3237. }
  3238. /* CMT DAC algo: finding out if SACK is a mixed SACK */
  3239. if ((asoc->sctp_cmt_on_off > 0) &&
  3240. SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
  3241. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  3242. if (net->saw_newack)
  3243. num_dests_sacked++;
  3244. }
  3245. }
  3246. if (stcb->asoc.prsctp_supported) {
  3247. (void)SCTP_GETTIME_TIMEVAL(&now);
  3248. }
  3249. TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
  3250. strike_flag = 0;
  3251. if (tp1->no_fr_allowed) {
  3252. /* this one had a timeout or something */
  3253. continue;
  3254. }
  3255. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
  3256. if (tp1->sent < SCTP_DATAGRAM_RESEND)
  3257. sctp_log_fr(biggest_tsn_newly_acked,
  3258. tp1->rec.data.tsn,
  3259. tp1->sent,
  3260. SCTP_FR_LOG_CHECK_STRIKE);
  3261. }
  3262. if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
  3263. tp1->sent == SCTP_DATAGRAM_UNSENT) {
  3264. /* done */
  3265. break;
  3266. }
  3267. if (stcb->asoc.prsctp_supported) {
  3268. if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
  3269. /* Is it expired? */
  3270. #if !(defined(__FreeBSD__) && !defined(__Userspace__))
  3271. if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
  3272. #else
  3273. if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
  3274. #endif
  3275. /* Yes so drop it */
  3276. if (tp1->data != NULL) {
  3277. (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
  3278. SCTP_SO_NOT_LOCKED);
  3279. }
  3280. continue;
  3281. }
  3282. }
  3283. }
  3284. if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
  3285. !(accum_moved && asoc->fast_retran_loss_recovery)) {
  3286. /* we are beyond the tsn in the sack */
  3287. break;
  3288. }
  3289. if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
  3290. /* either a RESEND, ACKED, or MARKED */
  3291. /* skip */
  3292. if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
  3293. /* Continue strikin FWD-TSN chunks */
  3294. tp1->rec.data.fwd_tsn_cnt++;
  3295. }
  3296. continue;
  3297. }
  3298. /*
  3299. * CMT : SFR algo (covers part of DAC and HTNA as well)
  3300. */
  3301. if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
  3302. /*
  3303. * No new acks were received for data sent to this
  3304. * dest. Therefore, according to the SFR algo for
  3305. * CMT, no data sent to this dest can be marked for
  3306. * FR using this SACK.
  3307. */
  3308. continue;
  3309. } else if (tp1->whoTo &&
  3310. SCTP_TSN_GT(tp1->rec.data.tsn,
  3311. tp1->whoTo->this_sack_highest_newack) &&
  3312. !(accum_moved && asoc->fast_retran_loss_recovery)) {
  3313. /*
  3314. * CMT: New acks were received for data sent to
  3315. * this dest. But no new acks were seen for data
  3316. * sent after tp1. Therefore, according to the SFR
  3317. * algo for CMT, tp1 cannot be marked for FR using
  3318. * this SACK. This step covers part of the DAC algo
  3319. * and the HTNA algo as well.
  3320. */
  3321. continue;
  3322. }
  3323. /*
  3324. * Here we check to see if we were have already done a FR
  3325. * and if so we see if the biggest TSN we saw in the sack is
  3326. * smaller than the recovery point. If so we don't strike
  3327. * the tsn... otherwise we CAN strike the TSN.
  3328. */
  3329. /*
  3330. * @@@ JRI: Check for CMT
  3331. * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
  3332. */
  3333. if (accum_moved && asoc->fast_retran_loss_recovery) {
  3334. /*
  3335. * Strike the TSN if in fast-recovery and cum-ack
  3336. * moved.
  3337. */
  3338. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
  3339. sctp_log_fr(biggest_tsn_newly_acked,
  3340. tp1->rec.data.tsn,
  3341. tp1->sent,
  3342. SCTP_FR_LOG_STRIKE_CHUNK);
  3343. }
  3344. if (tp1->sent < SCTP_DATAGRAM_RESEND) {
  3345. tp1->sent++;
  3346. }
  3347. if ((asoc->sctp_cmt_on_off > 0) &&
  3348. SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
  3349. /*
  3350. * CMT DAC algorithm: If SACK flag is set to
  3351. * 0, then lowest_newack test will not pass
  3352. * because it would have been set to the
  3353. * cumack earlier. If not already to be
  3354. * rtx'd, If not a mixed sack and if tp1 is
  3355. * not between two sacked TSNs, then mark by
  3356. * one more.
  3357. * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
  3358. * two packets have been received after this missing TSN.
  3359. */
  3360. if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
  3361. SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
  3362. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
  3363. sctp_log_fr(16 + num_dests_sacked,
  3364. tp1->rec.data.tsn,
  3365. tp1->sent,
  3366. SCTP_FR_LOG_STRIKE_CHUNK);
  3367. }
  3368. tp1->sent++;
  3369. }
  3370. }
  3371. } else if ((tp1->rec.data.doing_fast_retransmit) &&
  3372. (asoc->sctp_cmt_on_off == 0)) {
  3373. /*
  3374. * For those that have done a FR we must take
  3375. * special consideration if we strike. I.e the
  3376. * biggest_newly_acked must be higher than the
  3377. * sending_seq at the time we did the FR.
  3378. */
  3379. if (
  3380. #ifdef SCTP_FR_TO_ALTERNATE
  3381. /*
  3382. * If FR's go to new networks, then we must only do
  3383. * this for singly homed asoc's. However if the FR's
  3384. * go to the same network (Armando's work) then its
  3385. * ok to FR multiple times.
  3386. */
  3387. (asoc->numnets < 2)
  3388. #else
  3389. (1)
  3390. #endif
  3391. ) {
  3392. if (SCTP_TSN_GE(biggest_tsn_newly_acked,
  3393. tp1->rec.data.fast_retran_tsn)) {
  3394. /*
  3395. * Strike the TSN, since this ack is
  3396. * beyond where things were when we
  3397. * did a FR.
  3398. */
  3399. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
  3400. sctp_log_fr(biggest_tsn_newly_acked,
  3401. tp1->rec.data.tsn,
  3402. tp1->sent,
  3403. SCTP_FR_LOG_STRIKE_CHUNK);
  3404. }
  3405. if (tp1->sent < SCTP_DATAGRAM_RESEND) {
  3406. tp1->sent++;
  3407. }
  3408. strike_flag = 1;
  3409. if ((asoc->sctp_cmt_on_off > 0) &&
  3410. SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
  3411. /*
  3412. * CMT DAC algorithm: If
  3413. * SACK flag is set to 0,
  3414. * then lowest_newack test
  3415. * will not pass because it
  3416. * would have been set to
  3417. * the cumack earlier. If
  3418. * not already to be rtx'd,
  3419. * If not a mixed sack and
  3420. * if tp1 is not between two
  3421. * sacked TSNs, then mark by
  3422. * one more.
  3423. * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
  3424. * two packets have been received after this missing TSN.
  3425. */
  3426. if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
  3427. (num_dests_sacked == 1) &&
  3428. SCTP_TSN_GT(this_sack_lowest_newack,
  3429. tp1->rec.data.tsn)) {
  3430. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
  3431. sctp_log_fr(32 + num_dests_sacked,
  3432. tp1->rec.data.tsn,
  3433. tp1->sent,
  3434. SCTP_FR_LOG_STRIKE_CHUNK);
  3435. }
  3436. if (tp1->sent < SCTP_DATAGRAM_RESEND) {
  3437. tp1->sent++;
  3438. }
  3439. }
  3440. }
  3441. }
  3442. }
  3443. /*
  3444. * JRI: TODO: remove code for HTNA algo. CMT's
  3445. * SFR algo covers HTNA.
  3446. */
  3447. } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
  3448. biggest_tsn_newly_acked)) {
  3449. /*
  3450. * We don't strike these: This is the HTNA
  3451. * algorithm i.e. we don't strike If our TSN is
  3452. * larger than the Highest TSN Newly Acked.
  3453. */
  3454. ;
  3455. } else {
  3456. /* Strike the TSN */
  3457. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
  3458. sctp_log_fr(biggest_tsn_newly_acked,
  3459. tp1->rec.data.tsn,
  3460. tp1->sent,
  3461. SCTP_FR_LOG_STRIKE_CHUNK);
  3462. }
  3463. if (tp1->sent < SCTP_DATAGRAM_RESEND) {
  3464. tp1->sent++;
  3465. }
  3466. if ((asoc->sctp_cmt_on_off > 0) &&
  3467. SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
  3468. /*
  3469. * CMT DAC algorithm: If SACK flag is set to
  3470. * 0, then lowest_newack test will not pass
  3471. * because it would have been set to the
  3472. * cumack earlier. If not already to be
  3473. * rtx'd, If not a mixed sack and if tp1 is
  3474. * not between two sacked TSNs, then mark by
  3475. * one more.
  3476. * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
  3477. * two packets have been received after this missing TSN.
  3478. */
  3479. if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
  3480. SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
  3481. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
  3482. sctp_log_fr(48 + num_dests_sacked,
  3483. tp1->rec.data.tsn,
  3484. tp1->sent,
  3485. SCTP_FR_LOG_STRIKE_CHUNK);
  3486. }
  3487. tp1->sent++;
  3488. }
  3489. }
  3490. }
  3491. if (tp1->sent == SCTP_DATAGRAM_RESEND) {
  3492. struct sctp_nets *alt;
  3493. /* fix counts and things */
  3494. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
  3495. sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
  3496. (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
  3497. tp1->book_size,
  3498. (uint32_t)(uintptr_t)tp1->whoTo,
  3499. tp1->rec.data.tsn);
  3500. }
  3501. if (tp1->whoTo) {
  3502. tp1->whoTo->net_ack++;
  3503. sctp_flight_size_decrease(tp1);
  3504. if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
  3505. (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
  3506. tp1);
  3507. }
  3508. }
  3509. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
  3510. sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
  3511. asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
  3512. }
  3513. /* add back to the rwnd */
  3514. asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
  3515. /* remove from the total flight */
  3516. sctp_total_flight_decrease(stcb, tp1);
  3517. if ((stcb->asoc.prsctp_supported) &&
  3518. (PR_SCTP_RTX_ENABLED(tp1->flags))) {
  3519. /* Has it been retransmitted tv_sec times? - we store the retran count there. */
  3520. if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
  3521. /* Yes, so drop it */
  3522. if (tp1->data != NULL) {
  3523. (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
  3524. SCTP_SO_NOT_LOCKED);
  3525. }
  3526. /* Make sure to flag we had a FR */
  3527. if (tp1->whoTo != NULL) {
  3528. tp1->whoTo->net_ack++;
  3529. }
  3530. continue;
  3531. }
  3532. }
  3533. /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
  3534. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
  3535. sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
  3536. 0, SCTP_FR_MARKED);
  3537. }
  3538. if (strike_flag) {
  3539. /* This is a subsequent FR */
  3540. SCTP_STAT_INCR(sctps_sendmultfastretrans);
  3541. }
  3542. sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
  3543. if (asoc->sctp_cmt_on_off > 0) {
  3544. /*
  3545. * CMT: Using RTX_SSTHRESH policy for CMT.
  3546. * If CMT is being used, then pick dest with
  3547. * largest ssthresh for any retransmission.
  3548. */
  3549. tp1->no_fr_allowed = 1;
  3550. alt = tp1->whoTo;
  3551. /*sa_ignore NO_NULL_CHK*/
  3552. if (asoc->sctp_cmt_pf > 0) {
  3553. /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
  3554. alt = sctp_find_alternate_net(stcb, alt, 2);
  3555. } else {
  3556. /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
  3557. /*sa_ignore NO_NULL_CHK*/
  3558. alt = sctp_find_alternate_net(stcb, alt, 1);
  3559. }
  3560. if (alt == NULL) {
  3561. alt = tp1->whoTo;
  3562. }
  3563. /*
  3564. * CUCv2: If a different dest is picked for
  3565. * the retransmission, then new
  3566. * (rtx-)pseudo_cumack needs to be tracked
  3567. * for orig dest. Let CUCv2 track new (rtx-)
  3568. * pseudo-cumack always.
  3569. */
  3570. if (tp1->whoTo) {
  3571. tp1->whoTo->find_pseudo_cumack = 1;
  3572. tp1->whoTo->find_rtx_pseudo_cumack = 1;
  3573. }
  3574. } else {/* CMT is OFF */
  3575. #ifdef SCTP_FR_TO_ALTERNATE
  3576. /* Can we find an alternate? */
  3577. alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
  3578. #else
  3579. /*
  3580. * default behavior is to NOT retransmit
  3581. * FR's to an alternate. Armando Caro's
  3582. * paper details why.
  3583. */
  3584. alt = tp1->whoTo;
  3585. #endif
  3586. }
  3587. tp1->rec.data.doing_fast_retransmit = 1;
  3588. /* mark the sending seq for possible subsequent FR's */
  3589. /*
  3590. * SCTP_PRINTF("Marking TSN for FR new value %x\n",
  3591. * (uint32_t)tpi->rec.data.tsn);
  3592. */
  3593. if (TAILQ_EMPTY(&asoc->send_queue)) {
  3594. /*
  3595. * If the queue of send is empty then its
  3596. * the next sequence number that will be
  3597. * assigned so we subtract one from this to
  3598. * get the one we last sent.
  3599. */
  3600. tp1->rec.data.fast_retran_tsn = sending_seq;
  3601. } else {
  3602. /*
  3603. * If there are chunks on the send queue
  3604. * (unsent data that has made it from the
  3605. * stream queues but not out the door, we
  3606. * take the first one (which will have the
  3607. * lowest TSN) and subtract one to get the
  3608. * one we last sent.
  3609. */
  3610. struct sctp_tmit_chunk *ttt;
  3611. ttt = TAILQ_FIRST(&asoc->send_queue);
  3612. tp1->rec.data.fast_retran_tsn =
  3613. ttt->rec.data.tsn;
  3614. }
  3615. if (tp1->do_rtt) {
  3616. /*
  3617. * this guy had a RTO calculation pending on
  3618. * it, cancel it
  3619. */
  3620. if ((tp1->whoTo != NULL) &&
  3621. (tp1->whoTo->rto_needed == 0)) {
  3622. tp1->whoTo->rto_needed = 1;
  3623. }
  3624. tp1->do_rtt = 0;
  3625. }
  3626. if (alt != tp1->whoTo) {
  3627. /* yes, there is an alternate. */
  3628. sctp_free_remote_addr(tp1->whoTo);
  3629. /*sa_ignore FREED_MEMORY*/
  3630. tp1->whoTo = alt;
  3631. atomic_add_int(&alt->ref_count, 1);
  3632. }
  3633. }
  3634. }
  3635. }
  3636. struct sctp_tmit_chunk *
  3637. sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
  3638. struct sctp_association *asoc)
  3639. {
  3640. struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
  3641. struct timeval now;
  3642. int now_filled = 0;
  3643. if (asoc->prsctp_supported == 0) {
  3644. return (NULL);
  3645. }
  3646. TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
  3647. if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
  3648. tp1->sent != SCTP_DATAGRAM_RESEND &&
  3649. tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
  3650. /* no chance to advance, out of here */
  3651. break;
  3652. }
  3653. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
  3654. if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
  3655. (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
  3656. sctp_misc_ints(SCTP_FWD_TSN_CHECK,
  3657. asoc->advanced_peer_ack_point,
  3658. tp1->rec.data.tsn, 0, 0);
  3659. }
  3660. }
  3661. if (!PR_SCTP_ENABLED(tp1->flags)) {
  3662. /*
  3663. * We can't fwd-tsn past any that are reliable aka
  3664. * retransmitted until the asoc fails.
  3665. */
  3666. break;
  3667. }
  3668. if (!now_filled) {
  3669. (void)SCTP_GETTIME_TIMEVAL(&now);
  3670. now_filled = 1;
  3671. }
  3672. /*
  3673. * now we got a chunk which is marked for another
  3674. * retransmission to a PR-stream but has run out its chances
  3675. * already maybe OR has been marked to skip now. Can we skip
  3676. * it if its a resend?
  3677. */
  3678. if (tp1->sent == SCTP_DATAGRAM_RESEND &&
  3679. (PR_SCTP_TTL_ENABLED(tp1->flags))) {
  3680. /*
  3681. * Now is this one marked for resend and its time is
  3682. * now up?
  3683. */
  3684. #if !(defined(__FreeBSD__) && !defined(__Userspace__))
  3685. if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
  3686. #else
  3687. if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
  3688. #endif
  3689. /* Yes so drop it */
  3690. if (tp1->data) {
  3691. (void)sctp_release_pr_sctp_chunk(stcb, tp1,
  3692. 1, SCTP_SO_NOT_LOCKED);
  3693. }
  3694. } else {
  3695. /*
  3696. * No, we are done when hit one for resend
  3697. * whos time as not expired.
  3698. */
  3699. break;
  3700. }
  3701. }
  3702. /*
  3703. * Ok now if this chunk is marked to drop it we can clean up
  3704. * the chunk, advance our peer ack point and we can check
  3705. * the next chunk.
  3706. */
  3707. if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
  3708. (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
  3709. /* advance PeerAckPoint goes forward */
  3710. if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
  3711. asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
  3712. a_adv = tp1;
  3713. } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
  3714. /* No update but we do save the chk */
  3715. a_adv = tp1;
  3716. }
  3717. } else {
  3718. /*
  3719. * If it is still in RESEND we can advance no
  3720. * further
  3721. */
  3722. break;
  3723. }
  3724. }
  3725. return (a_adv);
  3726. }
  3727. static int
  3728. sctp_fs_audit(struct sctp_association *asoc)
  3729. {
  3730. struct sctp_tmit_chunk *chk;
  3731. int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
  3732. int ret;
  3733. #ifndef INVARIANTS
  3734. int entry_flight, entry_cnt;
  3735. #endif
  3736. ret = 0;
  3737. #ifndef INVARIANTS
  3738. entry_flight = asoc->total_flight;
  3739. entry_cnt = asoc->total_flight_count;
  3740. #endif
  3741. if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
  3742. return (0);
  3743. TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
  3744. if (chk->sent < SCTP_DATAGRAM_RESEND) {
  3745. SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
  3746. chk->rec.data.tsn,
  3747. chk->send_size,
  3748. chk->snd_count);
  3749. inflight++;
  3750. } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
  3751. resend++;
  3752. } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
  3753. inbetween++;
  3754. } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
  3755. above++;
  3756. } else {
  3757. acked++;
  3758. }
  3759. }
  3760. if ((inflight > 0) || (inbetween > 0)) {
  3761. #ifdef INVARIANTS
  3762. panic("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d",
  3763. inflight, inbetween, resend, above, acked);
  3764. #else
  3765. SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
  3766. entry_flight, entry_cnt);
  3767. SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
  3768. inflight, inbetween, resend, above, acked);
  3769. ret = 1;
  3770. #endif
  3771. }
  3772. return (ret);
  3773. }
  3774. static void
  3775. sctp_window_probe_recovery(struct sctp_tcb *stcb,
  3776. struct sctp_association *asoc,
  3777. struct sctp_tmit_chunk *tp1)
  3778. {
  3779. tp1->window_probe = 0;
  3780. if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
  3781. /* TSN's skipped we do NOT move back. */
  3782. sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
  3783. tp1->whoTo ? tp1->whoTo->flight_size : 0,
  3784. tp1->book_size,
  3785. (uint32_t)(uintptr_t)tp1->whoTo,
  3786. tp1->rec.data.tsn);
  3787. return;
  3788. }
  3789. /* First setup this by shrinking flight */
  3790. if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
  3791. (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
  3792. tp1);
  3793. }
  3794. sctp_flight_size_decrease(tp1);
  3795. sctp_total_flight_decrease(stcb, tp1);
  3796. /* Now mark for resend */
  3797. tp1->sent = SCTP_DATAGRAM_RESEND;
  3798. sctp_ucount_incr(asoc->sent_queue_retran_cnt);
  3799. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
  3800. sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
  3801. tp1->whoTo->flight_size,
  3802. tp1->book_size,
  3803. (uint32_t)(uintptr_t)tp1->whoTo,
  3804. tp1->rec.data.tsn);
  3805. }
  3806. }
  3807. void
  3808. sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
  3809. uint32_t rwnd, int *abort_now, int ecne_seen)
  3810. {
  3811. struct sctp_nets *net;
  3812. struct sctp_association *asoc;
  3813. struct sctp_tmit_chunk *tp1, *tp2;
  3814. uint32_t old_rwnd;
  3815. int win_probe_recovery = 0;
  3816. int win_probe_recovered = 0;
  3817. int j, done_once = 0;
  3818. int rto_ok = 1;
  3819. uint32_t send_s;
  3820. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
  3821. sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
  3822. rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
  3823. }
  3824. SCTP_TCB_LOCK_ASSERT(stcb);
  3825. #ifdef SCTP_ASOCLOG_OF_TSNS
  3826. stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
  3827. stcb->asoc.cumack_log_at++;
  3828. if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
  3829. stcb->asoc.cumack_log_at = 0;
  3830. }
  3831. #endif
  3832. asoc = &stcb->asoc;
  3833. old_rwnd = asoc->peers_rwnd;
  3834. if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
  3835. /* old ack */
  3836. return;
  3837. } else if (asoc->last_acked_seq == cumack) {
  3838. /* Window update sack */
  3839. asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
  3840. (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
  3841. if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
  3842. /* SWS sender side engages */
  3843. asoc->peers_rwnd = 0;
  3844. }
  3845. if (asoc->peers_rwnd > old_rwnd) {
  3846. goto again;
  3847. }
  3848. return;
  3849. }
  3850. /* First setup for CC stuff */
  3851. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  3852. if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
  3853. /* Drag along the window_tsn for cwr's */
  3854. net->cwr_window_tsn = cumack;
  3855. }
  3856. net->prev_cwnd = net->cwnd;
  3857. net->net_ack = 0;
  3858. net->net_ack2 = 0;
  3859. /*
  3860. * CMT: Reset CUC and Fast recovery algo variables before
  3861. * SACK processing
  3862. */
  3863. net->new_pseudo_cumack = 0;
  3864. net->will_exit_fast_recovery = 0;
  3865. if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
  3866. (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
  3867. }
  3868. }
  3869. if (!TAILQ_EMPTY(&asoc->sent_queue)) {
  3870. tp1 = TAILQ_LAST(&asoc->sent_queue,
  3871. sctpchunk_listhead);
  3872. send_s = tp1->rec.data.tsn + 1;
  3873. } else {
  3874. send_s = asoc->sending_seq;
  3875. }
  3876. if (SCTP_TSN_GE(cumack, send_s)) {
  3877. struct mbuf *op_err;
  3878. char msg[SCTP_DIAG_INFO_LEN];
  3879. *abort_now = 1;
  3880. /* XXX */
  3881. SCTP_SNPRINTF(msg, sizeof(msg),
  3882. "Cum ack %8.8x greater or equal than TSN %8.8x",
  3883. cumack, send_s);
  3884. op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  3885. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
  3886. sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  3887. return;
  3888. }
  3889. asoc->this_sack_highest_gap = cumack;
  3890. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
  3891. sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
  3892. stcb->asoc.overall_error_count,
  3893. 0,
  3894. SCTP_FROM_SCTP_INDATA,
  3895. __LINE__);
  3896. }
  3897. stcb->asoc.overall_error_count = 0;
  3898. if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
  3899. /* process the new consecutive TSN first */
  3900. TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
  3901. if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
  3902. if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
  3903. SCTP_PRINTF("Warning, an unsent is now acked?\n");
  3904. }
  3905. if (tp1->sent < SCTP_DATAGRAM_ACKED) {
  3906. /*
  3907. * If it is less than ACKED, it is
  3908. * now no-longer in flight. Higher
  3909. * values may occur during marking
  3910. */
  3911. if (tp1->sent < SCTP_DATAGRAM_RESEND) {
  3912. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
  3913. sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
  3914. tp1->whoTo->flight_size,
  3915. tp1->book_size,
  3916. (uint32_t)(uintptr_t)tp1->whoTo,
  3917. tp1->rec.data.tsn);
  3918. }
  3919. sctp_flight_size_decrease(tp1);
  3920. if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
  3921. (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
  3922. tp1);
  3923. }
  3924. /* sa_ignore NO_NULL_CHK */
  3925. sctp_total_flight_decrease(stcb, tp1);
  3926. }
  3927. tp1->whoTo->net_ack += tp1->send_size;
  3928. if (tp1->snd_count < 2) {
  3929. /*
  3930. * True non-retransmitted
  3931. * chunk
  3932. */
  3933. tp1->whoTo->net_ack2 +=
  3934. tp1->send_size;
  3935. /* update RTO too? */
  3936. if (tp1->do_rtt) {
  3937. if (rto_ok &&
  3938. sctp_calculate_rto(stcb,
  3939. &stcb->asoc,
  3940. tp1->whoTo,
  3941. &tp1->sent_rcv_time,
  3942. SCTP_RTT_FROM_DATA)) {
  3943. rto_ok = 0;
  3944. }
  3945. if (tp1->whoTo->rto_needed == 0) {
  3946. tp1->whoTo->rto_needed = 1;
  3947. }
  3948. tp1->do_rtt = 0;
  3949. }
  3950. }
  3951. /*
  3952. * CMT: CUCv2 algorithm. From the
  3953. * cumack'd TSNs, for each TSN being
  3954. * acked for the first time, set the
  3955. * following variables for the
  3956. * corresp destination.
  3957. * new_pseudo_cumack will trigger a
  3958. * cwnd update.
  3959. * find_(rtx_)pseudo_cumack will
  3960. * trigger search for the next
  3961. * expected (rtx-)pseudo-cumack.
  3962. */
  3963. tp1->whoTo->new_pseudo_cumack = 1;
  3964. tp1->whoTo->find_pseudo_cumack = 1;
  3965. tp1->whoTo->find_rtx_pseudo_cumack = 1;
  3966. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
  3967. /* sa_ignore NO_NULL_CHK */
  3968. sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
  3969. }
  3970. }
  3971. if (tp1->sent == SCTP_DATAGRAM_RESEND) {
  3972. sctp_ucount_decr(asoc->sent_queue_retran_cnt);
  3973. }
  3974. if (tp1->rec.data.chunk_was_revoked) {
  3975. /* deflate the cwnd */
  3976. tp1->whoTo->cwnd -= tp1->book_size;
  3977. tp1->rec.data.chunk_was_revoked = 0;
  3978. }
  3979. if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
  3980. if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
  3981. asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
  3982. #ifdef INVARIANTS
  3983. } else {
  3984. panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
  3985. #endif
  3986. }
  3987. }
  3988. if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
  3989. (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
  3990. TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
  3991. asoc->trigger_reset = 1;
  3992. }
  3993. TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
  3994. if (tp1->data) {
  3995. /* sa_ignore NO_NULL_CHK */
  3996. sctp_free_bufspace(stcb, asoc, tp1, 1);
  3997. sctp_m_freem(tp1->data);
  3998. tp1->data = NULL;
  3999. }
  4000. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
  4001. sctp_log_sack(asoc->last_acked_seq,
  4002. cumack,
  4003. tp1->rec.data.tsn,
  4004. 0,
  4005. 0,
  4006. SCTP_LOG_FREE_SENT);
  4007. }
  4008. asoc->sent_queue_cnt--;
  4009. sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
  4010. } else {
  4011. break;
  4012. }
  4013. }
  4014. }
  4015. #if defined(__Userspace__)
  4016. if (stcb->sctp_ep->recv_callback) {
  4017. if (stcb->sctp_socket) {
  4018. uint32_t inqueue_bytes, sb_free_now;
  4019. struct sctp_inpcb *inp;
  4020. inp = stcb->sctp_ep;
  4021. inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
  4022. sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
  4023. /* check if the amount free in the send socket buffer crossed the threshold */
  4024. if (inp->send_callback &&
  4025. (((inp->send_sb_threshold > 0) &&
  4026. (sb_free_now >= inp->send_sb_threshold) &&
  4027. (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
  4028. (inp->send_sb_threshold == 0))) {
  4029. atomic_add_int(&stcb->asoc.refcnt, 1);
  4030. SCTP_TCB_UNLOCK(stcb);
  4031. inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
  4032. SCTP_TCB_LOCK(stcb);
  4033. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4034. }
  4035. }
  4036. } else if (stcb->sctp_socket) {
  4037. #else
  4038. /* sa_ignore NO_NULL_CHK */
  4039. if (stcb->sctp_socket) {
  4040. #endif
  4041. #if defined(__APPLE__) && !defined(__Userspace__)
  4042. struct socket *so;
  4043. #endif
  4044. SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
  4045. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
  4046. /* sa_ignore NO_NULL_CHK */
  4047. sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
  4048. }
  4049. #if defined(__APPLE__) && !defined(__Userspace__)
  4050. so = SCTP_INP_SO(stcb->sctp_ep);
  4051. atomic_add_int(&stcb->asoc.refcnt, 1);
  4052. SCTP_TCB_UNLOCK(stcb);
  4053. SCTP_SOCKET_LOCK(so, 1);
  4054. SCTP_TCB_LOCK(stcb);
  4055. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4056. if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  4057. /* assoc was freed while we were unlocked */
  4058. SCTP_SOCKET_UNLOCK(so, 1);
  4059. return;
  4060. }
  4061. #endif
  4062. sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
  4063. #if defined(__APPLE__) && !defined(__Userspace__)
  4064. SCTP_SOCKET_UNLOCK(so, 1);
  4065. #endif
  4066. } else {
  4067. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
  4068. sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
  4069. }
  4070. }
  4071. /* JRS - Use the congestion control given in the CC module */
  4072. if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
  4073. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  4074. if (net->net_ack2 > 0) {
  4075. /*
  4076. * Karn's rule applies to clearing error count, this
  4077. * is optional.
  4078. */
  4079. net->error_count = 0;
  4080. if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
  4081. /* addr came good */
  4082. net->dest_state |= SCTP_ADDR_REACHABLE;
  4083. sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
  4084. 0, (void *)net, SCTP_SO_NOT_LOCKED);
  4085. }
  4086. if (net == stcb->asoc.primary_destination) {
  4087. if (stcb->asoc.alternate) {
  4088. /* release the alternate, primary is good */
  4089. sctp_free_remote_addr(stcb->asoc.alternate);
  4090. stcb->asoc.alternate = NULL;
  4091. }
  4092. }
  4093. if (net->dest_state & SCTP_ADDR_PF) {
  4094. net->dest_state &= ~SCTP_ADDR_PF;
  4095. sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
  4096. stcb->sctp_ep, stcb, net,
  4097. SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
  4098. sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
  4099. asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
  4100. /* Done with this net */
  4101. net->net_ack = 0;
  4102. }
  4103. /* restore any doubled timers */
  4104. net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
  4105. if (net->RTO < stcb->asoc.minrto) {
  4106. net->RTO = stcb->asoc.minrto;
  4107. }
  4108. if (net->RTO > stcb->asoc.maxrto) {
  4109. net->RTO = stcb->asoc.maxrto;
  4110. }
  4111. }
  4112. }
  4113. asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
  4114. }
  4115. asoc->last_acked_seq = cumack;
  4116. if (TAILQ_EMPTY(&asoc->sent_queue)) {
  4117. /* nothing left in-flight */
  4118. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  4119. net->flight_size = 0;
  4120. net->partial_bytes_acked = 0;
  4121. }
  4122. asoc->total_flight = 0;
  4123. asoc->total_flight_count = 0;
  4124. }
  4125. /* RWND update */
  4126. asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
  4127. (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
  4128. if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
  4129. /* SWS sender side engages */
  4130. asoc->peers_rwnd = 0;
  4131. }
  4132. if (asoc->peers_rwnd > old_rwnd) {
  4133. win_probe_recovery = 1;
  4134. }
  4135. /* Now assure a timer where data is queued at */
  4136. again:
  4137. j = 0;
  4138. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  4139. if (win_probe_recovery && (net->window_probe)) {
  4140. win_probe_recovered = 1;
  4141. /*
  4142. * Find first chunk that was used with window probe
  4143. * and clear the sent
  4144. */
  4145. /* sa_ignore FREED_MEMORY */
  4146. TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
  4147. if (tp1->window_probe) {
  4148. /* move back to data send queue */
  4149. sctp_window_probe_recovery(stcb, asoc, tp1);
  4150. break;
  4151. }
  4152. }
  4153. }
  4154. if (net->flight_size) {
  4155. j++;
  4156. sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
  4157. if (net->window_probe) {
  4158. net->window_probe = 0;
  4159. }
  4160. } else {
  4161. if (net->window_probe) {
  4162. /* In window probes we must assure a timer is still running there */
  4163. net->window_probe = 0;
  4164. if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
  4165. sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
  4166. }
  4167. } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
  4168. sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
  4169. stcb, net,
  4170. SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
  4171. }
  4172. }
  4173. }
  4174. if ((j == 0) &&
  4175. (!TAILQ_EMPTY(&asoc->sent_queue)) &&
  4176. (asoc->sent_queue_retran_cnt == 0) &&
  4177. (win_probe_recovered == 0) &&
  4178. (done_once == 0)) {
  4179. /* huh, this should not happen unless all packets
  4180. * are PR-SCTP and marked to skip of course.
  4181. */
  4182. if (sctp_fs_audit(asoc)) {
  4183. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  4184. net->flight_size = 0;
  4185. }
  4186. asoc->total_flight = 0;
  4187. asoc->total_flight_count = 0;
  4188. asoc->sent_queue_retran_cnt = 0;
  4189. TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
  4190. if (tp1->sent < SCTP_DATAGRAM_RESEND) {
  4191. sctp_flight_size_increase(tp1);
  4192. sctp_total_flight_increase(stcb, tp1);
  4193. } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
  4194. sctp_ucount_incr(asoc->sent_queue_retran_cnt);
  4195. }
  4196. }
  4197. }
  4198. done_once = 1;
  4199. goto again;
  4200. }
  4201. /**********************************/
  4202. /* Now what about shutdown issues */
  4203. /**********************************/
  4204. if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
  4205. /* nothing left on sendqueue.. consider done */
  4206. /* clean up */
  4207. if ((asoc->stream_queue_cnt == 1) &&
  4208. ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
  4209. (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
  4210. ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
  4211. SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
  4212. }
  4213. if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
  4214. (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
  4215. (asoc->stream_queue_cnt == 1) &&
  4216. (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
  4217. struct mbuf *op_err;
  4218. *abort_now = 1;
  4219. /* XXX */
  4220. op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
  4221. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
  4222. sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  4223. return;
  4224. }
  4225. if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
  4226. (asoc->stream_queue_cnt == 0)) {
  4227. struct sctp_nets *netp;
  4228. if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
  4229. (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
  4230. SCTP_STAT_DECR_GAUGE32(sctps_currestab);
  4231. }
  4232. SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
  4233. sctp_stop_timers_for_shutdown(stcb);
  4234. if (asoc->alternate) {
  4235. netp = asoc->alternate;
  4236. } else {
  4237. netp = asoc->primary_destination;
  4238. }
  4239. sctp_send_shutdown(stcb, netp);
  4240. sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
  4241. stcb->sctp_ep, stcb, netp);
  4242. sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
  4243. stcb->sctp_ep, stcb, NULL);
  4244. } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
  4245. (asoc->stream_queue_cnt == 0)) {
  4246. struct sctp_nets *netp;
  4247. SCTP_STAT_DECR_GAUGE32(sctps_currestab);
  4248. SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
  4249. sctp_stop_timers_for_shutdown(stcb);
  4250. if (asoc->alternate) {
  4251. netp = asoc->alternate;
  4252. } else {
  4253. netp = asoc->primary_destination;
  4254. }
  4255. sctp_send_shutdown_ack(stcb, netp);
  4256. sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
  4257. stcb->sctp_ep, stcb, netp);
  4258. }
  4259. }
  4260. /*********************************************/
  4261. /* Here we perform PR-SCTP procedures */
  4262. /* (section 4.2) */
  4263. /*********************************************/
  4264. /* C1. update advancedPeerAckPoint */
  4265. if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
  4266. asoc->advanced_peer_ack_point = cumack;
  4267. }
  4268. /* PR-Sctp issues need to be addressed too */
  4269. if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
  4270. struct sctp_tmit_chunk *lchk;
  4271. uint32_t old_adv_peer_ack_point;
  4272. old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
  4273. lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
  4274. /* C3. See if we need to send a Fwd-TSN */
  4275. if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
  4276. /*
  4277. * ISSUE with ECN, see FWD-TSN processing.
  4278. */
  4279. if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
  4280. send_forward_tsn(stcb, asoc);
  4281. } else if (lchk) {
  4282. /* try to FR fwd-tsn's that get lost too */
  4283. if (lchk->rec.data.fwd_tsn_cnt >= 3) {
  4284. send_forward_tsn(stcb, asoc);
  4285. }
  4286. }
  4287. }
  4288. for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
  4289. if (lchk->whoTo != NULL) {
  4290. break;
  4291. }
  4292. }
  4293. if (lchk != NULL) {
  4294. /* Assure a timer is up */
  4295. sctp_timer_start(SCTP_TIMER_TYPE_SEND,
  4296. stcb->sctp_ep, stcb, lchk->whoTo);
  4297. }
  4298. }
  4299. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
  4300. sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
  4301. rwnd,
  4302. stcb->asoc.peers_rwnd,
  4303. stcb->asoc.total_flight,
  4304. stcb->asoc.total_output_queue_size);
  4305. }
  4306. }
  4307. void
  4308. sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
  4309. struct sctp_tcb *stcb,
  4310. uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
  4311. int *abort_now, uint8_t flags,
  4312. uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
  4313. {
  4314. struct sctp_association *asoc;
  4315. struct sctp_tmit_chunk *tp1, *tp2;
  4316. uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
  4317. uint16_t wake_him = 0;
  4318. uint32_t send_s = 0;
  4319. long j;
  4320. int accum_moved = 0;
  4321. int will_exit_fast_recovery = 0;
  4322. uint32_t a_rwnd, old_rwnd;
  4323. int win_probe_recovery = 0;
  4324. int win_probe_recovered = 0;
  4325. struct sctp_nets *net = NULL;
  4326. int done_once;
  4327. int rto_ok = 1;
  4328. uint8_t reneged_all = 0;
  4329. uint8_t cmt_dac_flag;
  4330. /*
  4331. * we take any chance we can to service our queues since we cannot
  4332. * get awoken when the socket is read from :<
  4333. */
  4334. /*
  4335. * Now perform the actual SACK handling: 1) Verify that it is not an
  4336. * old sack, if so discard. 2) If there is nothing left in the send
  4337. * queue (cum-ack is equal to last acked) then you have a duplicate
  4338. * too, update any rwnd change and verify no timers are running.
  4339. * then return. 3) Process any new consecutive data i.e. cum-ack
  4340. * moved process these first and note that it moved. 4) Process any
  4341. * sack blocks. 5) Drop any acked from the queue. 6) Check for any
  4342. * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
  4343. * sync up flightsizes and things, stop all timers and also check
  4344. * for shutdown_pending state. If so then go ahead and send off the
  4345. * shutdown. If in shutdown recv, send off the shutdown-ack and
  4346. * start that timer, Ret. 9) Strike any non-acked things and do FR
  4347. * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
  4348. * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
  4349. * if in shutdown_recv state.
  4350. */
  4351. SCTP_TCB_LOCK_ASSERT(stcb);
  4352. /* CMT DAC algo */
  4353. this_sack_lowest_newack = 0;
  4354. SCTP_STAT_INCR(sctps_slowpath_sack);
  4355. last_tsn = cum_ack;
  4356. cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
  4357. #ifdef SCTP_ASOCLOG_OF_TSNS
  4358. stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
  4359. stcb->asoc.cumack_log_at++;
  4360. if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
  4361. stcb->asoc.cumack_log_at = 0;
  4362. }
  4363. #endif
  4364. a_rwnd = rwnd;
  4365. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
  4366. sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
  4367. rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
  4368. }
  4369. old_rwnd = stcb->asoc.peers_rwnd;
  4370. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
  4371. sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
  4372. stcb->asoc.overall_error_count,
  4373. 0,
  4374. SCTP_FROM_SCTP_INDATA,
  4375. __LINE__);
  4376. }
  4377. stcb->asoc.overall_error_count = 0;
  4378. asoc = &stcb->asoc;
  4379. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
  4380. sctp_log_sack(asoc->last_acked_seq,
  4381. cum_ack,
  4382. 0,
  4383. num_seg,
  4384. num_dup,
  4385. SCTP_LOG_NEW_SACK);
  4386. }
  4387. if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
  4388. uint16_t i;
  4389. uint32_t *dupdata, dblock;
  4390. for (i = 0; i < num_dup; i++) {
  4391. dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
  4392. sizeof(uint32_t), (uint8_t *)&dblock);
  4393. if (dupdata == NULL) {
  4394. break;
  4395. }
  4396. sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
  4397. }
  4398. }
  4399. /* reality check */
  4400. if (!TAILQ_EMPTY(&asoc->sent_queue)) {
  4401. tp1 = TAILQ_LAST(&asoc->sent_queue,
  4402. sctpchunk_listhead);
  4403. send_s = tp1->rec.data.tsn + 1;
  4404. } else {
  4405. tp1 = NULL;
  4406. send_s = asoc->sending_seq;
  4407. }
  4408. if (SCTP_TSN_GE(cum_ack, send_s)) {
  4409. struct mbuf *op_err;
  4410. char msg[SCTP_DIAG_INFO_LEN];
  4411. /*
  4412. * no way, we have not even sent this TSN out yet.
  4413. * Peer is hopelessly messed up with us.
  4414. */
  4415. SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
  4416. cum_ack, send_s);
  4417. if (tp1) {
  4418. SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
  4419. tp1->rec.data.tsn, (void *)tp1);
  4420. }
  4421. hopeless_peer:
  4422. *abort_now = 1;
  4423. /* XXX */
  4424. SCTP_SNPRINTF(msg, sizeof(msg),
  4425. "Cum ack %8.8x greater or equal than TSN %8.8x",
  4426. cum_ack, send_s);
  4427. op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  4428. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
  4429. sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  4430. return;
  4431. }
  4432. /**********************/
  4433. /* 1) check the range */
  4434. /**********************/
  4435. if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
  4436. /* acking something behind */
  4437. return;
  4438. }
  4439. /* update the Rwnd of the peer */
  4440. if (TAILQ_EMPTY(&asoc->sent_queue) &&
  4441. TAILQ_EMPTY(&asoc->send_queue) &&
  4442. (asoc->stream_queue_cnt == 0)) {
  4443. /* nothing left on send/sent and strmq */
  4444. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
  4445. sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
  4446. asoc->peers_rwnd, 0, 0, a_rwnd);
  4447. }
  4448. asoc->peers_rwnd = a_rwnd;
  4449. if (asoc->sent_queue_retran_cnt) {
  4450. asoc->sent_queue_retran_cnt = 0;
  4451. }
  4452. if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
  4453. /* SWS sender side engages */
  4454. asoc->peers_rwnd = 0;
  4455. }
  4456. /* stop any timers */
  4457. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  4458. sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
  4459. stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
  4460. net->partial_bytes_acked = 0;
  4461. net->flight_size = 0;
  4462. }
  4463. asoc->total_flight = 0;
  4464. asoc->total_flight_count = 0;
  4465. return;
  4466. }
  4467. /*
  4468. * We init netAckSz and netAckSz2 to 0. These are used to track 2
  4469. * things. The total byte count acked is tracked in netAckSz AND
  4470. * netAck2 is used to track the total bytes acked that are un-
  4471. * ambiguous and were never retransmitted. We track these on a per
  4472. * destination address basis.
  4473. */
  4474. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  4475. if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
  4476. /* Drag along the window_tsn for cwr's */
  4477. net->cwr_window_tsn = cum_ack;
  4478. }
  4479. net->prev_cwnd = net->cwnd;
  4480. net->net_ack = 0;
  4481. net->net_ack2 = 0;
  4482. /*
  4483. * CMT: Reset CUC and Fast recovery algo variables before
  4484. * SACK processing
  4485. */
  4486. net->new_pseudo_cumack = 0;
  4487. net->will_exit_fast_recovery = 0;
  4488. if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
  4489. (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
  4490. }
  4491. /*
  4492. * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
  4493. * to be greater than the cumack. Also reset saw_newack to 0
  4494. * for all dests.
  4495. */
  4496. net->saw_newack = 0;
  4497. net->this_sack_highest_newack = last_tsn;
  4498. }
  4499. /* process the new consecutive TSN first */
  4500. TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
  4501. if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
  4502. if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
  4503. accum_moved = 1;
  4504. if (tp1->sent < SCTP_DATAGRAM_ACKED) {
  4505. /*
  4506. * If it is less than ACKED, it is
  4507. * now no-longer in flight. Higher
  4508. * values may occur during marking
  4509. */
  4510. if ((tp1->whoTo->dest_state &
  4511. SCTP_ADDR_UNCONFIRMED) &&
  4512. (tp1->snd_count < 2)) {
  4513. /*
  4514. * If there was no retran
  4515. * and the address is
  4516. * un-confirmed and we sent
  4517. * there and are now
  4518. * sacked.. its confirmed,
  4519. * mark it so.
  4520. */
  4521. tp1->whoTo->dest_state &=
  4522. ~SCTP_ADDR_UNCONFIRMED;
  4523. }
  4524. if (tp1->sent < SCTP_DATAGRAM_RESEND) {
  4525. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
  4526. sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
  4527. tp1->whoTo->flight_size,
  4528. tp1->book_size,
  4529. (uint32_t)(uintptr_t)tp1->whoTo,
  4530. tp1->rec.data.tsn);
  4531. }
  4532. sctp_flight_size_decrease(tp1);
  4533. sctp_total_flight_decrease(stcb, tp1);
  4534. if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
  4535. (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
  4536. tp1);
  4537. }
  4538. }
  4539. tp1->whoTo->net_ack += tp1->send_size;
  4540. /* CMT SFR and DAC algos */
  4541. this_sack_lowest_newack = tp1->rec.data.tsn;
  4542. tp1->whoTo->saw_newack = 1;
  4543. if (tp1->snd_count < 2) {
  4544. /*
  4545. * True non-retransmitted
  4546. * chunk
  4547. */
  4548. tp1->whoTo->net_ack2 +=
  4549. tp1->send_size;
  4550. /* update RTO too? */
  4551. if (tp1->do_rtt) {
  4552. if (rto_ok &&
  4553. sctp_calculate_rto(stcb,
  4554. &stcb->asoc,
  4555. tp1->whoTo,
  4556. &tp1->sent_rcv_time,
  4557. SCTP_RTT_FROM_DATA)) {
  4558. rto_ok = 0;
  4559. }
  4560. if (tp1->whoTo->rto_needed == 0) {
  4561. tp1->whoTo->rto_needed = 1;
  4562. }
  4563. tp1->do_rtt = 0;
  4564. }
  4565. }
  4566. /*
  4567. * CMT: CUCv2 algorithm. From the
  4568. * cumack'd TSNs, for each TSN being
  4569. * acked for the first time, set the
  4570. * following variables for the
  4571. * corresp destination.
  4572. * new_pseudo_cumack will trigger a
  4573. * cwnd update.
  4574. * find_(rtx_)pseudo_cumack will
  4575. * trigger search for the next
  4576. * expected (rtx-)pseudo-cumack.
  4577. */
  4578. tp1->whoTo->new_pseudo_cumack = 1;
  4579. tp1->whoTo->find_pseudo_cumack = 1;
  4580. tp1->whoTo->find_rtx_pseudo_cumack = 1;
  4581. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
  4582. sctp_log_sack(asoc->last_acked_seq,
  4583. cum_ack,
  4584. tp1->rec.data.tsn,
  4585. 0,
  4586. 0,
  4587. SCTP_LOG_TSN_ACKED);
  4588. }
  4589. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
  4590. sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
  4591. }
  4592. }
  4593. if (tp1->sent == SCTP_DATAGRAM_RESEND) {
  4594. sctp_ucount_decr(asoc->sent_queue_retran_cnt);
  4595. #ifdef SCTP_AUDITING_ENABLED
  4596. sctp_audit_log(0xB3,
  4597. (asoc->sent_queue_retran_cnt & 0x000000ff));
  4598. #endif
  4599. }
  4600. if (tp1->rec.data.chunk_was_revoked) {
  4601. /* deflate the cwnd */
  4602. tp1->whoTo->cwnd -= tp1->book_size;
  4603. tp1->rec.data.chunk_was_revoked = 0;
  4604. }
  4605. if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
  4606. tp1->sent = SCTP_DATAGRAM_ACKED;
  4607. }
  4608. }
  4609. } else {
  4610. break;
  4611. }
  4612. }
  4613. biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
  4614. /* always set this up to cum-ack */
  4615. asoc->this_sack_highest_gap = last_tsn;
  4616. if ((num_seg > 0) || (num_nr_seg > 0)) {
  4617. /*
  4618. * thisSackHighestGap will increase while handling NEW
  4619. * segments this_sack_highest_newack will increase while
  4620. * handling NEWLY ACKED chunks. this_sack_lowest_newack is
  4621. * used for CMT DAC algo. saw_newack will also change.
  4622. */
  4623. if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
  4624. &biggest_tsn_newly_acked, &this_sack_lowest_newack,
  4625. num_seg, num_nr_seg, &rto_ok)) {
  4626. wake_him++;
  4627. }
  4628. /*
  4629. * validate the biggest_tsn_acked in the gap acks if
  4630. * strict adherence is wanted.
  4631. */
  4632. if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
  4633. /*
  4634. * peer is either confused or we are under
  4635. * attack. We must abort.
  4636. */
  4637. SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
  4638. biggest_tsn_acked, send_s);
  4639. goto hopeless_peer;
  4640. }
  4641. }
  4642. /*******************************************/
  4643. /* cancel ALL T3-send timer if accum moved */
  4644. /*******************************************/
  4645. if (asoc->sctp_cmt_on_off > 0) {
  4646. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  4647. if (net->new_pseudo_cumack)
  4648. sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
  4649. stcb, net,
  4650. SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
  4651. }
  4652. } else {
  4653. if (accum_moved) {
  4654. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  4655. sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
  4656. stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
  4657. }
  4658. }
  4659. }
  4660. /********************************************/
  4661. /* drop the acked chunks from the sentqueue */
  4662. /********************************************/
  4663. asoc->last_acked_seq = cum_ack;
  4664. TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
  4665. if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
  4666. break;
  4667. }
  4668. if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
  4669. if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
  4670. asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
  4671. #ifdef INVARIANTS
  4672. } else {
  4673. panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
  4674. #endif
  4675. }
  4676. }
  4677. if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
  4678. (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
  4679. TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
  4680. asoc->trigger_reset = 1;
  4681. }
  4682. TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
  4683. if (PR_SCTP_ENABLED(tp1->flags)) {
  4684. if (asoc->pr_sctp_cnt != 0)
  4685. asoc->pr_sctp_cnt--;
  4686. }
  4687. asoc->sent_queue_cnt--;
  4688. if (tp1->data) {
  4689. /* sa_ignore NO_NULL_CHK */
  4690. sctp_free_bufspace(stcb, asoc, tp1, 1);
  4691. sctp_m_freem(tp1->data);
  4692. tp1->data = NULL;
  4693. if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
  4694. asoc->sent_queue_cnt_removeable--;
  4695. }
  4696. }
  4697. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
  4698. sctp_log_sack(asoc->last_acked_seq,
  4699. cum_ack,
  4700. tp1->rec.data.tsn,
  4701. 0,
  4702. 0,
  4703. SCTP_LOG_FREE_SENT);
  4704. }
  4705. sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
  4706. wake_him++;
  4707. }
  4708. if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
  4709. #ifdef INVARIANTS
  4710. panic("Warning flight size is positive and should be 0");
  4711. #else
  4712. SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
  4713. asoc->total_flight);
  4714. #endif
  4715. asoc->total_flight = 0;
  4716. }
  4717. #if defined(__Userspace__)
  4718. if (stcb->sctp_ep->recv_callback) {
  4719. if (stcb->sctp_socket) {
  4720. uint32_t inqueue_bytes, sb_free_now;
  4721. struct sctp_inpcb *inp;
  4722. inp = stcb->sctp_ep;
  4723. inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
  4724. sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
  4725. /* check if the amount free in the send socket buffer crossed the threshold */
  4726. if (inp->send_callback &&
  4727. (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
  4728. (inp->send_sb_threshold == 0))) {
  4729. atomic_add_int(&stcb->asoc.refcnt, 1);
  4730. SCTP_TCB_UNLOCK(stcb);
  4731. inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
  4732. SCTP_TCB_LOCK(stcb);
  4733. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4734. }
  4735. }
  4736. } else if ((wake_him) && (stcb->sctp_socket)) {
  4737. #else
  4738. /* sa_ignore NO_NULL_CHK */
  4739. if ((wake_him) && (stcb->sctp_socket)) {
  4740. #endif
  4741. #if defined(__APPLE__) && !defined(__Userspace__)
  4742. struct socket *so;
  4743. #endif
  4744. SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
  4745. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
  4746. sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
  4747. }
  4748. #if defined(__APPLE__) && !defined(__Userspace__)
  4749. so = SCTP_INP_SO(stcb->sctp_ep);
  4750. atomic_add_int(&stcb->asoc.refcnt, 1);
  4751. SCTP_TCB_UNLOCK(stcb);
  4752. SCTP_SOCKET_LOCK(so, 1);
  4753. SCTP_TCB_LOCK(stcb);
  4754. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4755. if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  4756. /* assoc was freed while we were unlocked */
  4757. SCTP_SOCKET_UNLOCK(so, 1);
  4758. return;
  4759. }
  4760. #endif
  4761. sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
  4762. #if defined(__APPLE__) && !defined(__Userspace__)
  4763. SCTP_SOCKET_UNLOCK(so, 1);
  4764. #endif
  4765. } else {
  4766. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
  4767. sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
  4768. }
  4769. }
  4770. if (asoc->fast_retran_loss_recovery && accum_moved) {
  4771. if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
  4772. /* Setup so we will exit RFC2582 fast recovery */
  4773. will_exit_fast_recovery = 1;
  4774. }
  4775. }
  4776. /*
  4777. * Check for revoked fragments:
  4778. *
  4779. * if Previous sack - Had no frags then we can't have any revoked if
  4780. * Previous sack - Had frag's then - If we now have frags aka
  4781. * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
  4782. * some of them. else - The peer revoked all ACKED fragments, since
  4783. * we had some before and now we have NONE.
  4784. */
  4785. if (num_seg) {
  4786. sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
  4787. asoc->saw_sack_with_frags = 1;
  4788. } else if (asoc->saw_sack_with_frags) {
  4789. int cnt_revoked = 0;
  4790. /* Peer revoked all dg's marked or acked */
  4791. TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
  4792. if (tp1->sent == SCTP_DATAGRAM_ACKED) {
  4793. tp1->sent = SCTP_DATAGRAM_SENT;
  4794. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
  4795. sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
  4796. tp1->whoTo->flight_size,
  4797. tp1->book_size,
  4798. (uint32_t)(uintptr_t)tp1->whoTo,
  4799. tp1->rec.data.tsn);
  4800. }
  4801. sctp_flight_size_increase(tp1);
  4802. sctp_total_flight_increase(stcb, tp1);
  4803. tp1->rec.data.chunk_was_revoked = 1;
  4804. /*
  4805. * To ensure that this increase in
  4806. * flightsize, which is artificial,
  4807. * does not throttle the sender, we
  4808. * also increase the cwnd
  4809. * artificially.
  4810. */
  4811. tp1->whoTo->cwnd += tp1->book_size;
  4812. cnt_revoked++;
  4813. }
  4814. }
  4815. if (cnt_revoked) {
  4816. reneged_all = 1;
  4817. }
  4818. asoc->saw_sack_with_frags = 0;
  4819. }
  4820. if (num_nr_seg > 0)
  4821. asoc->saw_sack_with_nr_frags = 1;
  4822. else
  4823. asoc->saw_sack_with_nr_frags = 0;
  4824. /* JRS - Use the congestion control given in the CC module */
  4825. if (ecne_seen == 0) {
  4826. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  4827. if (net->net_ack2 > 0) {
  4828. /*
  4829. * Karn's rule applies to clearing error count, this
  4830. * is optional.
  4831. */
  4832. net->error_count = 0;
  4833. if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
  4834. /* addr came good */
  4835. net->dest_state |= SCTP_ADDR_REACHABLE;
  4836. sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
  4837. 0, (void *)net, SCTP_SO_NOT_LOCKED);
  4838. }
  4839. if (net == stcb->asoc.primary_destination) {
  4840. if (stcb->asoc.alternate) {
  4841. /* release the alternate, primary is good */
  4842. sctp_free_remote_addr(stcb->asoc.alternate);
  4843. stcb->asoc.alternate = NULL;
  4844. }
  4845. }
  4846. if (net->dest_state & SCTP_ADDR_PF) {
  4847. net->dest_state &= ~SCTP_ADDR_PF;
  4848. sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
  4849. stcb->sctp_ep, stcb, net,
  4850. SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
  4851. sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
  4852. asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
  4853. /* Done with this net */
  4854. net->net_ack = 0;
  4855. }
  4856. /* restore any doubled timers */
  4857. net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
  4858. if (net->RTO < stcb->asoc.minrto) {
  4859. net->RTO = stcb->asoc.minrto;
  4860. }
  4861. if (net->RTO > stcb->asoc.maxrto) {
  4862. net->RTO = stcb->asoc.maxrto;
  4863. }
  4864. }
  4865. }
  4866. asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
  4867. }
  4868. if (TAILQ_EMPTY(&asoc->sent_queue)) {
  4869. /* nothing left in-flight */
  4870. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  4871. /* stop all timers */
  4872. sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
  4873. stcb, net,
  4874. SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
  4875. net->flight_size = 0;
  4876. net->partial_bytes_acked = 0;
  4877. }
  4878. asoc->total_flight = 0;
  4879. asoc->total_flight_count = 0;
  4880. }
  4881. /**********************************/
  4882. /* Now what about shutdown issues */
  4883. /**********************************/
  4884. if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
  4885. /* nothing left on sendqueue.. consider done */
  4886. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
  4887. sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
  4888. asoc->peers_rwnd, 0, 0, a_rwnd);
  4889. }
  4890. asoc->peers_rwnd = a_rwnd;
  4891. if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
  4892. /* SWS sender side engages */
  4893. asoc->peers_rwnd = 0;
  4894. }
  4895. /* clean up */
  4896. if ((asoc->stream_queue_cnt == 1) &&
  4897. ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
  4898. (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
  4899. ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
  4900. SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
  4901. }
  4902. if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
  4903. (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
  4904. (asoc->stream_queue_cnt == 1) &&
  4905. (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
  4906. struct mbuf *op_err;
  4907. *abort_now = 1;
  4908. /* XXX */
  4909. op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
  4910. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
  4911. sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  4912. return;
  4913. }
  4914. if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
  4915. (asoc->stream_queue_cnt == 0)) {
  4916. struct sctp_nets *netp;
  4917. if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
  4918. (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
  4919. SCTP_STAT_DECR_GAUGE32(sctps_currestab);
  4920. }
  4921. SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
  4922. sctp_stop_timers_for_shutdown(stcb);
  4923. if (asoc->alternate) {
  4924. netp = asoc->alternate;
  4925. } else {
  4926. netp = asoc->primary_destination;
  4927. }
  4928. sctp_send_shutdown(stcb, netp);
  4929. sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
  4930. stcb->sctp_ep, stcb, netp);
  4931. sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
  4932. stcb->sctp_ep, stcb, NULL);
  4933. return;
  4934. } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
  4935. (asoc->stream_queue_cnt == 0)) {
  4936. struct sctp_nets *netp;
  4937. SCTP_STAT_DECR_GAUGE32(sctps_currestab);
  4938. SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
  4939. sctp_stop_timers_for_shutdown(stcb);
  4940. if (asoc->alternate) {
  4941. netp = asoc->alternate;
  4942. } else {
  4943. netp = asoc->primary_destination;
  4944. }
  4945. sctp_send_shutdown_ack(stcb, netp);
  4946. sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
  4947. stcb->sctp_ep, stcb, netp);
  4948. return;
  4949. }
  4950. }
  4951. /*
  4952. * Now here we are going to recycle net_ack for a different use...
  4953. * HEADS UP.
  4954. */
  4955. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  4956. net->net_ack = 0;
  4957. }
  4958. /*
  4959. * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
  4960. * to be done. Setting this_sack_lowest_newack to the cum_ack will
  4961. * automatically ensure that.
  4962. */
  4963. if ((asoc->sctp_cmt_on_off > 0) &&
  4964. SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
  4965. (cmt_dac_flag == 0)) {
  4966. this_sack_lowest_newack = cum_ack;
  4967. }
  4968. if ((num_seg > 0) || (num_nr_seg > 0)) {
  4969. sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
  4970. biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
  4971. }
  4972. /* JRS - Use the congestion control given in the CC module */
  4973. asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
  4974. /* Now are we exiting loss recovery ? */
  4975. if (will_exit_fast_recovery) {
  4976. /* Ok, we must exit fast recovery */
  4977. asoc->fast_retran_loss_recovery = 0;
  4978. }
  4979. if ((asoc->sat_t3_loss_recovery) &&
  4980. SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
  4981. /* end satellite t3 loss recovery */
  4982. asoc->sat_t3_loss_recovery = 0;
  4983. }
  4984. /*
  4985. * CMT Fast recovery
  4986. */
  4987. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  4988. if (net->will_exit_fast_recovery) {
  4989. /* Ok, we must exit fast recovery */
  4990. net->fast_retran_loss_recovery = 0;
  4991. }
  4992. }
  4993. /* Adjust and set the new rwnd value */
  4994. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
  4995. sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
  4996. asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
  4997. }
  4998. asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
  4999. (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
  5000. if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
  5001. /* SWS sender side engages */
  5002. asoc->peers_rwnd = 0;
  5003. }
  5004. if (asoc->peers_rwnd > old_rwnd) {
  5005. win_probe_recovery = 1;
  5006. }
  5007. /*
  5008. * Now we must setup so we have a timer up for anyone with
  5009. * outstanding data.
  5010. */
  5011. done_once = 0;
  5012. again:
  5013. j = 0;
  5014. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  5015. if (win_probe_recovery && (net->window_probe)) {
  5016. win_probe_recovered = 1;
  5017. /*-
  5018. * Find first chunk that was used with
  5019. * window probe and clear the event. Put
  5020. * it back into the send queue as if has
  5021. * not been sent.
  5022. */
  5023. TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
  5024. if (tp1->window_probe) {
  5025. sctp_window_probe_recovery(stcb, asoc, tp1);
  5026. break;
  5027. }
  5028. }
  5029. }
  5030. if (net->flight_size) {
  5031. j++;
  5032. if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
  5033. sctp_timer_start(SCTP_TIMER_TYPE_SEND,
  5034. stcb->sctp_ep, stcb, net);
  5035. }
  5036. if (net->window_probe) {
  5037. net->window_probe = 0;
  5038. }
  5039. } else {
  5040. if (net->window_probe) {
  5041. /* In window probes we must assure a timer is still running there */
  5042. if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
  5043. sctp_timer_start(SCTP_TIMER_TYPE_SEND,
  5044. stcb->sctp_ep, stcb, net);
  5045. }
  5046. } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
  5047. sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
  5048. stcb, net,
  5049. SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
  5050. }
  5051. }
  5052. }
  5053. if ((j == 0) &&
  5054. (!TAILQ_EMPTY(&asoc->sent_queue)) &&
  5055. (asoc->sent_queue_retran_cnt == 0) &&
  5056. (win_probe_recovered == 0) &&
  5057. (done_once == 0)) {
  5058. /* huh, this should not happen unless all packets
  5059. * are PR-SCTP and marked to skip of course.
  5060. */
  5061. if (sctp_fs_audit(asoc)) {
  5062. TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
  5063. net->flight_size = 0;
  5064. }
  5065. asoc->total_flight = 0;
  5066. asoc->total_flight_count = 0;
  5067. asoc->sent_queue_retran_cnt = 0;
  5068. TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
  5069. if (tp1->sent < SCTP_DATAGRAM_RESEND) {
  5070. sctp_flight_size_increase(tp1);
  5071. sctp_total_flight_increase(stcb, tp1);
  5072. } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
  5073. sctp_ucount_incr(asoc->sent_queue_retran_cnt);
  5074. }
  5075. }
  5076. }
  5077. done_once = 1;
  5078. goto again;
  5079. }
  5080. /*********************************************/
  5081. /* Here we perform PR-SCTP procedures */
  5082. /* (section 4.2) */
  5083. /*********************************************/
  5084. /* C1. update advancedPeerAckPoint */
  5085. if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
  5086. asoc->advanced_peer_ack_point = cum_ack;
  5087. }
  5088. /* C2. try to further move advancedPeerAckPoint ahead */
  5089. if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
  5090. struct sctp_tmit_chunk *lchk;
  5091. uint32_t old_adv_peer_ack_point;
  5092. old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
  5093. lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
  5094. /* C3. See if we need to send a Fwd-TSN */
  5095. if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
  5096. /*
  5097. * ISSUE with ECN, see FWD-TSN processing.
  5098. */
  5099. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
  5100. sctp_misc_ints(SCTP_FWD_TSN_CHECK,
  5101. 0xee, cum_ack, asoc->advanced_peer_ack_point,
  5102. old_adv_peer_ack_point);
  5103. }
  5104. if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
  5105. send_forward_tsn(stcb, asoc);
  5106. } else if (lchk) {
  5107. /* try to FR fwd-tsn's that get lost too */
  5108. if (lchk->rec.data.fwd_tsn_cnt >= 3) {
  5109. send_forward_tsn(stcb, asoc);
  5110. }
  5111. }
  5112. }
  5113. for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
  5114. if (lchk->whoTo != NULL) {
  5115. break;
  5116. }
  5117. }
  5118. if (lchk != NULL) {
  5119. /* Assure a timer is up */
  5120. sctp_timer_start(SCTP_TIMER_TYPE_SEND,
  5121. stcb->sctp_ep, stcb, lchk->whoTo);
  5122. }
  5123. }
  5124. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
  5125. sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
  5126. a_rwnd,
  5127. stcb->asoc.peers_rwnd,
  5128. stcb->asoc.total_flight,
  5129. stcb->asoc.total_output_queue_size);
  5130. }
  5131. }
  5132. void
  5133. sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
  5134. {
  5135. /* Copy cum-ack */
  5136. uint32_t cum_ack, a_rwnd;
  5137. cum_ack = ntohl(cp->cumulative_tsn_ack);
  5138. /* Arrange so a_rwnd does NOT change */
  5139. a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
  5140. /* Now call the express sack handling */
  5141. sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
  5142. }
  5143. static void
  5144. sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
  5145. struct sctp_stream_in *strmin)
  5146. {
  5147. struct sctp_queued_to_read *control, *ncontrol;
  5148. struct sctp_association *asoc;
  5149. uint32_t mid;
  5150. int need_reasm_check = 0;
  5151. asoc = &stcb->asoc;
  5152. mid = strmin->last_mid_delivered;
  5153. /*
  5154. * First deliver anything prior to and including the stream no that
  5155. * came in.
  5156. */
  5157. TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
  5158. if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
  5159. /* this is deliverable now */
  5160. if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
  5161. if (control->on_strm_q) {
  5162. if (control->on_strm_q == SCTP_ON_ORDERED) {
  5163. TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
  5164. } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
  5165. TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
  5166. #ifdef INVARIANTS
  5167. } else {
  5168. panic("strmin: %p ctl: %p unknown %d",
  5169. strmin, control, control->on_strm_q);
  5170. #endif
  5171. }
  5172. control->on_strm_q = 0;
  5173. }
  5174. /* subtract pending on streams */
  5175. if (asoc->size_on_all_streams >= control->length) {
  5176. asoc->size_on_all_streams -= control->length;
  5177. } else {
  5178. #ifdef INVARIANTS
  5179. panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
  5180. #else
  5181. asoc->size_on_all_streams = 0;
  5182. #endif
  5183. }
  5184. sctp_ucount_decr(asoc->cnt_on_all_streams);
  5185. /* deliver it to at least the delivery-q */
  5186. if (stcb->sctp_socket) {
  5187. sctp_mark_non_revokable(asoc, control->sinfo_tsn);
  5188. sctp_add_to_readq(stcb->sctp_ep, stcb,
  5189. control,
  5190. &stcb->sctp_socket->so_rcv,
  5191. 1, SCTP_READ_LOCK_HELD,
  5192. SCTP_SO_NOT_LOCKED);
  5193. }
  5194. } else {
  5195. /* Its a fragmented message */
  5196. if (control->first_frag_seen) {
  5197. /* Make it so this is next to deliver, we restore later */
  5198. strmin->last_mid_delivered = control->mid - 1;
  5199. need_reasm_check = 1;
  5200. break;
  5201. }
  5202. }
  5203. } else {
  5204. /* no more delivery now. */
  5205. break;
  5206. }
  5207. }
  5208. if (need_reasm_check) {
  5209. int ret;
  5210. ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
  5211. if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
  5212. /* Restore the next to deliver unless we are ahead */
  5213. strmin->last_mid_delivered = mid;
  5214. }
  5215. if (ret == 0) {
  5216. /* Left the front Partial one on */
  5217. return;
  5218. }
  5219. need_reasm_check = 0;
  5220. }
  5221. /*
  5222. * now we must deliver things in queue the normal way if any are
  5223. * now ready.
  5224. */
  5225. mid = strmin->last_mid_delivered + 1;
  5226. TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
  5227. if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
  5228. if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
  5229. /* this is deliverable now */
  5230. if (control->on_strm_q) {
  5231. if (control->on_strm_q == SCTP_ON_ORDERED) {
  5232. TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
  5233. } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
  5234. TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
  5235. #ifdef INVARIANTS
  5236. } else {
  5237. panic("strmin: %p ctl: %p unknown %d",
  5238. strmin, control, control->on_strm_q);
  5239. #endif
  5240. }
  5241. control->on_strm_q = 0;
  5242. }
  5243. /* subtract pending on streams */
  5244. if (asoc->size_on_all_streams >= control->length) {
  5245. asoc->size_on_all_streams -= control->length;
  5246. } else {
  5247. #ifdef INVARIANTS
  5248. panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
  5249. #else
  5250. asoc->size_on_all_streams = 0;
  5251. #endif
  5252. }
  5253. sctp_ucount_decr(asoc->cnt_on_all_streams);
  5254. /* deliver it to at least the delivery-q */
  5255. strmin->last_mid_delivered = control->mid;
  5256. if (stcb->sctp_socket) {
  5257. sctp_mark_non_revokable(asoc, control->sinfo_tsn);
  5258. sctp_add_to_readq(stcb->sctp_ep, stcb,
  5259. control,
  5260. &stcb->sctp_socket->so_rcv, 1,
  5261. SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
  5262. }
  5263. mid = strmin->last_mid_delivered + 1;
  5264. } else {
  5265. /* Its a fragmented message */
  5266. if (control->first_frag_seen) {
  5267. /* Make it so this is next to deliver */
  5268. strmin->last_mid_delivered = control->mid - 1;
  5269. need_reasm_check = 1;
  5270. break;
  5271. }
  5272. }
  5273. } else {
  5274. break;
  5275. }
  5276. }
  5277. if (need_reasm_check) {
  5278. (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
  5279. }
  5280. }
  5281. static void
  5282. sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
  5283. struct sctp_association *asoc, struct sctp_stream_in *strm,
  5284. struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
  5285. {
  5286. struct sctp_tmit_chunk *chk, *nchk;
  5287. /*
  5288. * For now large messages held on the stream reasm that are
  5289. * complete will be tossed too. We could in theory do more
  5290. * work to spin through and stop after dumping one msg aka
  5291. * seeing the start of a new msg at the head, and call the
  5292. * delivery function... to see if it can be delivered... But
  5293. * for now we just dump everything on the queue.
  5294. */
  5295. if (!asoc->idata_supported && !ordered &&
  5296. control->first_frag_seen &&
  5297. SCTP_TSN_GT(control->fsn_included, cumtsn)) {
  5298. return;
  5299. }
  5300. TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
  5301. /* Purge hanging chunks */
  5302. if (!asoc->idata_supported && !ordered) {
  5303. if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
  5304. break;
  5305. }
  5306. }
  5307. TAILQ_REMOVE(&control->reasm, chk, sctp_next);
  5308. if (asoc->size_on_reasm_queue >= chk->send_size) {
  5309. asoc->size_on_reasm_queue -= chk->send_size;
  5310. } else {
  5311. #ifdef INVARIANTS
  5312. panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
  5313. #else
  5314. asoc->size_on_reasm_queue = 0;
  5315. #endif
  5316. }
  5317. sctp_ucount_decr(asoc->cnt_on_reasm_queue);
  5318. if (chk->data) {
  5319. sctp_m_freem(chk->data);
  5320. chk->data = NULL;
  5321. }
  5322. sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
  5323. }
  5324. if (!TAILQ_EMPTY(&control->reasm)) {
  5325. /* This has to be old data, unordered */
  5326. if (control->data) {
  5327. sctp_m_freem(control->data);
  5328. control->data = NULL;
  5329. }
  5330. sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
  5331. chk = TAILQ_FIRST(&control->reasm);
  5332. if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
  5333. TAILQ_REMOVE(&control->reasm, chk, sctp_next);
  5334. sctp_add_chk_to_control(control, strm, stcb, asoc,
  5335. chk, SCTP_READ_LOCK_HELD);
  5336. }
  5337. sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
  5338. return;
  5339. }
  5340. if (control->on_strm_q == SCTP_ON_ORDERED) {
  5341. TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
  5342. if (asoc->size_on_all_streams >= control->length) {
  5343. asoc->size_on_all_streams -= control->length;
  5344. } else {
  5345. #ifdef INVARIANTS
  5346. panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
  5347. #else
  5348. asoc->size_on_all_streams = 0;
  5349. #endif
  5350. }
  5351. sctp_ucount_decr(asoc->cnt_on_all_streams);
  5352. control->on_strm_q = 0;
  5353. } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
  5354. TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
  5355. control->on_strm_q = 0;
  5356. #ifdef INVARIANTS
  5357. } else if (control->on_strm_q) {
  5358. panic("strm: %p ctl: %p unknown %d",
  5359. strm, control, control->on_strm_q);
  5360. #endif
  5361. }
  5362. control->on_strm_q = 0;
  5363. if (control->on_read_q == 0) {
  5364. sctp_free_remote_addr(control->whoFrom);
  5365. if (control->data) {
  5366. sctp_m_freem(control->data);
  5367. control->data = NULL;
  5368. }
  5369. sctp_free_a_readq(stcb, control);
  5370. }
  5371. }
  5372. void
  5373. sctp_handle_forward_tsn(struct sctp_tcb *stcb,
  5374. struct sctp_forward_tsn_chunk *fwd,
  5375. int *abort_flag, struct mbuf *m , int offset)
  5376. {
  5377. /* The pr-sctp fwd tsn */
  5378. /*
  5379. * here we will perform all the data receiver side steps for
  5380. * processing FwdTSN, as required in by pr-sctp draft:
  5381. *
  5382. * Assume we get FwdTSN(x):
  5383. *
  5384. * 1) update local cumTSN to x
  5385. * 2) try to further advance cumTSN to x + others we have
  5386. * 3) examine and update re-ordering queue on pr-in-streams
  5387. * 4) clean up re-assembly queue
  5388. * 5) Send a sack to report where we are.
  5389. */
  5390. struct sctp_association *asoc;
  5391. uint32_t new_cum_tsn, gap;
  5392. unsigned int i, fwd_sz, m_size;
  5393. uint32_t str_seq;
  5394. struct sctp_stream_in *strm;
  5395. struct sctp_queued_to_read *control, *ncontrol, *sv;
  5396. asoc = &stcb->asoc;
  5397. if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
  5398. SCTPDBG(SCTP_DEBUG_INDATA1,
  5399. "Bad size too small/big fwd-tsn\n");
  5400. return;
  5401. }
  5402. m_size = (stcb->asoc.mapping_array_size << 3);
  5403. /*************************************************************/
  5404. /* 1. Here we update local cumTSN and shift the bitmap array */
  5405. /*************************************************************/
  5406. new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
  5407. if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
  5408. /* Already got there ... */
  5409. return;
  5410. }
  5411. /*
  5412. * now we know the new TSN is more advanced, let's find the actual
  5413. * gap
  5414. */
  5415. SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
  5416. asoc->cumulative_tsn = new_cum_tsn;
  5417. if (gap >= m_size) {
  5418. if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
  5419. struct mbuf *op_err;
  5420. char msg[SCTP_DIAG_INFO_LEN];
  5421. /*
  5422. * out of range (of single byte chunks in the rwnd I
  5423. * give out). This must be an attacker.
  5424. */
  5425. *abort_flag = 1;
  5426. SCTP_SNPRINTF(msg, sizeof(msg),
  5427. "New cum ack %8.8x too high, highest TSN %8.8x",
  5428. new_cum_tsn, asoc->highest_tsn_inside_map);
  5429. op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  5430. stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
  5431. sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  5432. return;
  5433. }
  5434. SCTP_STAT_INCR(sctps_fwdtsn_map_over);
  5435. memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
  5436. asoc->mapping_array_base_tsn = new_cum_tsn + 1;
  5437. asoc->highest_tsn_inside_map = new_cum_tsn;
  5438. memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
  5439. asoc->highest_tsn_inside_nr_map = new_cum_tsn;
  5440. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
  5441. sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
  5442. }
  5443. } else {
  5444. SCTP_TCB_LOCK_ASSERT(stcb);
  5445. for (i = 0; i <= gap; i++) {
  5446. if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
  5447. !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
  5448. SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
  5449. if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
  5450. asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
  5451. }
  5452. }
  5453. }
  5454. }
  5455. /*************************************************************/
  5456. /* 2. Clear up re-assembly queue */
  5457. /*************************************************************/
  5458. /* This is now done as part of clearing up the stream/seq */
  5459. if (asoc->idata_supported == 0) {
  5460. uint16_t sid;
  5461. /* Flush all the un-ordered data based on cum-tsn */
  5462. SCTP_INP_READ_LOCK(stcb->sctp_ep);
  5463. for (sid = 0; sid < asoc->streamincnt; sid++) {
  5464. strm = &asoc->strmin[sid];
  5465. if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
  5466. sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
  5467. }
  5468. }
  5469. SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
  5470. }
  5471. /*******************************************************/
  5472. /* 3. Update the PR-stream re-ordering queues and fix */
  5473. /* delivery issues as needed. */
  5474. /*******************************************************/
  5475. fwd_sz -= sizeof(*fwd);
  5476. if (m && fwd_sz) {
  5477. /* New method. */
  5478. unsigned int num_str;
  5479. uint32_t mid;
  5480. uint16_t sid;
  5481. uint16_t ordered, flags;
  5482. struct sctp_strseq *stseq, strseqbuf;
  5483. struct sctp_strseq_mid *stseq_m, strseqbuf_m;
  5484. offset += sizeof(*fwd);
  5485. SCTP_INP_READ_LOCK(stcb->sctp_ep);
  5486. if (asoc->idata_supported) {
  5487. num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
  5488. } else {
  5489. num_str = fwd_sz / sizeof(struct sctp_strseq);
  5490. }
  5491. for (i = 0; i < num_str; i++) {
  5492. if (asoc->idata_supported) {
  5493. stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
  5494. sizeof(struct sctp_strseq_mid),
  5495. (uint8_t *)&strseqbuf_m);
  5496. offset += sizeof(struct sctp_strseq_mid);
  5497. if (stseq_m == NULL) {
  5498. break;
  5499. }
  5500. sid = ntohs(stseq_m->sid);
  5501. mid = ntohl(stseq_m->mid);
  5502. flags = ntohs(stseq_m->flags);
  5503. if (flags & PR_SCTP_UNORDERED_FLAG) {
  5504. ordered = 0;
  5505. } else {
  5506. ordered = 1;
  5507. }
  5508. } else {
  5509. stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
  5510. sizeof(struct sctp_strseq),
  5511. (uint8_t *)&strseqbuf);
  5512. offset += sizeof(struct sctp_strseq);
  5513. if (stseq == NULL) {
  5514. break;
  5515. }
  5516. sid = ntohs(stseq->sid);
  5517. mid = (uint32_t)ntohs(stseq->ssn);
  5518. ordered = 1;
  5519. }
  5520. /* Convert */
  5521. /* now process */
  5522. /*
  5523. * Ok we now look for the stream/seq on the read queue
  5524. * where its not all delivered. If we find it we transmute the
  5525. * read entry into a PDI_ABORTED.
  5526. */
  5527. if (sid >= asoc->streamincnt) {
  5528. /* screwed up streams, stop! */
  5529. break;
  5530. }
  5531. if ((asoc->str_of_pdapi == sid) &&
  5532. (asoc->ssn_of_pdapi == mid)) {
  5533. /* If this is the one we were partially delivering
  5534. * now then we no longer are. Note this will change
  5535. * with the reassembly re-write.
  5536. */
  5537. asoc->fragmented_delivery_inprogress = 0;
  5538. }
  5539. strm = &asoc->strmin[sid];
  5540. if (ordered) {
  5541. TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) {
  5542. if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
  5543. sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
  5544. }
  5545. }
  5546. } else {
  5547. if (asoc->idata_supported) {
  5548. TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) {
  5549. if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
  5550. sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
  5551. }
  5552. }
  5553. } else {
  5554. if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
  5555. sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
  5556. }
  5557. }
  5558. }
  5559. TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
  5560. if ((control->sinfo_stream == sid) &&
  5561. (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
  5562. str_seq = (sid << 16) | (0x0000ffff & mid);
  5563. control->pdapi_aborted = 1;
  5564. sv = stcb->asoc.control_pdapi;
  5565. control->end_added = 1;
  5566. if (control->on_strm_q == SCTP_ON_ORDERED) {
  5567. TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
  5568. if (asoc->size_on_all_streams >= control->length) {
  5569. asoc->size_on_all_streams -= control->length;
  5570. } else {
  5571. #ifdef INVARIANTS
  5572. panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
  5573. #else
  5574. asoc->size_on_all_streams = 0;
  5575. #endif
  5576. }
  5577. sctp_ucount_decr(asoc->cnt_on_all_streams);
  5578. } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
  5579. TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
  5580. #ifdef INVARIANTS
  5581. } else if (control->on_strm_q) {
  5582. panic("strm: %p ctl: %p unknown %d",
  5583. strm, control, control->on_strm_q);
  5584. #endif
  5585. }
  5586. control->on_strm_q = 0;
  5587. stcb->asoc.control_pdapi = control;
  5588. sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
  5589. stcb,
  5590. SCTP_PARTIAL_DELIVERY_ABORTED,
  5591. (void *)&str_seq,
  5592. SCTP_SO_NOT_LOCKED);
  5593. stcb->asoc.control_pdapi = sv;
  5594. break;
  5595. } else if ((control->sinfo_stream == sid) &&
  5596. SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
  5597. /* We are past our victim SSN */
  5598. break;
  5599. }
  5600. }
  5601. if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
  5602. /* Update the sequence number */
  5603. strm->last_mid_delivered = mid;
  5604. }
  5605. /* now kick the stream the new way */
  5606. /*sa_ignore NO_NULL_CHK*/
  5607. sctp_kick_prsctp_reorder_queue(stcb, strm);
  5608. }
  5609. SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
  5610. }
  5611. /*
  5612. * Now slide thing forward.
  5613. */
  5614. sctp_slide_mapping_arrays(stcb);
  5615. }