sctp_input.c 207 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416
  1. /*-
  2. * SPDX-License-Identifier: BSD-3-Clause
  3. *
  4. * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
  5. * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  6. * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * a) Redistributions of source code must retain the above copyright notice,
  12. * this list of conditions and the following disclaimer.
  13. *
  14. * b) Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in
  16. * the documentation and/or other materials provided with the distribution.
  17. *
  18. * c) Neither the name of Cisco Systems, Inc. nor the names of its
  19. * contributors may be used to endorse or promote products derived
  20. * from this software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  23. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  24. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  32. * THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #if defined(__FreeBSD__) && !defined(__Userspace__)
  35. #include <sys/cdefs.h>
  36. __FBSDID("$FreeBSD$");
  37. #endif
  38. #include <netinet/sctp_os.h>
  39. #include <netinet/sctp_var.h>
  40. #include <netinet/sctp_sysctl.h>
  41. #include <netinet/sctp_pcb.h>
  42. #include <netinet/sctp_header.h>
  43. #include <netinet/sctputil.h>
  44. #include <netinet/sctp_output.h>
  45. #include <netinet/sctp_input.h>
  46. #include <netinet/sctp_auth.h>
  47. #include <netinet/sctp_indata.h>
  48. #include <netinet/sctp_asconf.h>
  49. #include <netinet/sctp_bsd_addr.h>
  50. #include <netinet/sctp_timer.h>
  51. #include <netinet/sctp_crc32.h>
  52. #if defined(__FreeBSD__) && !defined(__Userspace__)
  53. #include <netinet/sctp_kdtrace.h>
  54. #endif
  55. #if defined(INET) || defined(INET6)
  56. #if !defined(_WIN32)
  57. #include <netinet/udp.h>
  58. #endif
  59. #endif
  60. #if defined(__FreeBSD__) && !defined(__Userspace__)
  61. #include <sys/smp.h>
  62. #endif
  63. static void
  64. sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
  65. {
  66. struct sctp_nets *net;
  67. /* This now not only stops all cookie timers
  68. * it also stops any INIT timers as well. This
  69. * will make sure that the timers are stopped in
  70. * all collision cases.
  71. */
  72. SCTP_TCB_LOCK_ASSERT(stcb);
  73. TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
  74. if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
  75. sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
  76. stcb->sctp_ep,
  77. stcb,
  78. net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
  79. } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
  80. sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
  81. stcb->sctp_ep,
  82. stcb,
  83. net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
  84. }
  85. }
  86. }
  87. /* INIT handler */
  88. static void
  89. sctp_handle_init(struct mbuf *m, int iphlen, int offset,
  90. struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
  91. struct sctp_init_chunk *cp, struct sctp_inpcb *inp,
  92. struct sctp_tcb *stcb, struct sctp_nets *net,
  93. #if defined(__FreeBSD__) && !defined(__Userspace__)
  94. uint8_t mflowtype, uint32_t mflowid,
  95. #endif
  96. uint32_t vrf_id, uint16_t port)
  97. {
  98. struct sctp_init *init;
  99. struct mbuf *op_err;
  100. SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
  101. (void *)stcb);
  102. if (stcb == NULL) {
  103. SCTP_INP_RLOCK(inp);
  104. }
  105. /* Validate parameters */
  106. init = &cp->init;
  107. if (ntohl(init->initiate_tag) == 0) {
  108. goto outnow;
  109. }
  110. if ((ntohl(init->a_rwnd) < SCTP_MIN_RWND) ||
  111. (ntohs(init->num_inbound_streams) == 0) ||
  112. (ntohs(init->num_outbound_streams) == 0)) {
  113. /* protocol error... send abort */
  114. op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
  115. sctp_send_abort(m, iphlen, src, dst, sh, init->initiate_tag, op_err,
  116. #if defined(__FreeBSD__) && !defined(__Userspace__)
  117. mflowtype, mflowid, inp->fibnum,
  118. #endif
  119. vrf_id, port);
  120. goto outnow;
  121. }
  122. if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
  123. offset + ntohs(cp->ch.chunk_length))) {
  124. /* auth parameter(s) error... send abort */
  125. op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
  126. "Problem with AUTH parameters");
  127. sctp_send_abort(m, iphlen, src, dst, sh, init->initiate_tag, op_err,
  128. #if defined(__FreeBSD__) && !defined(__Userspace__)
  129. mflowtype, mflowid, inp->fibnum,
  130. #endif
  131. vrf_id, port);
  132. goto outnow;
  133. }
  134. /* We are only accepting if we have a listening socket.*/
  135. if ((stcb == NULL) &&
  136. ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
  137. (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
  138. (!SCTP_IS_LISTENING(inp)))) {
  139. /*
  140. * FIX ME ?? What about TCP model and we have a
  141. * match/restart case? Actually no fix is needed.
  142. * the lookup will always find the existing assoc so stcb
  143. * would not be NULL. It may be questionable to do this
  144. * since we COULD just send back the INIT-ACK and hope that
  145. * the app did accept()'s by the time the COOKIE was sent. But
  146. * there is a price to pay for COOKIE generation and I don't
  147. * want to pay it on the chance that the app will actually do
  148. * some accepts(). The App just looses and should NOT be in
  149. * this state :-)
  150. */
  151. if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) {
  152. op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
  153. "No listener");
  154. sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
  155. #if defined(__FreeBSD__) && !defined(__Userspace__)
  156. mflowtype, mflowid, inp->fibnum,
  157. #endif
  158. vrf_id, port);
  159. }
  160. goto outnow;
  161. }
  162. if ((stcb != NULL) &&
  163. (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
  164. SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n");
  165. sctp_send_shutdown_ack(stcb, NULL);
  166. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
  167. } else {
  168. SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
  169. sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset,
  170. src, dst, sh, cp,
  171. #if defined(__FreeBSD__) && !defined(__Userspace__)
  172. mflowtype, mflowid,
  173. #endif
  174. vrf_id, port);
  175. }
  176. outnow:
  177. if (stcb == NULL) {
  178. SCTP_INP_RUNLOCK(inp);
  179. }
  180. }
  181. /*
  182. * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
  183. */
  184. int
  185. sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked)
  186. {
  187. int unsent_data;
  188. unsigned int i;
  189. struct sctp_stream_queue_pending *sp;
  190. struct sctp_association *asoc;
  191. SCTP_TCB_LOCK_ASSERT(stcb);
  192. /* This function returns if any stream has true unsent data on it.
  193. * Note that as it looks through it will clean up any places that
  194. * have old data that has been sent but left at top of stream queue.
  195. */
  196. asoc = &stcb->asoc;
  197. unsent_data = 0;
  198. if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
  199. /* Check to see if some data queued */
  200. for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
  201. /*sa_ignore FREED_MEMORY*/
  202. sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
  203. if (sp == NULL) {
  204. continue;
  205. }
  206. if ((sp->msg_is_complete) &&
  207. (sp->length == 0) &&
  208. (sp->sender_all_done)) {
  209. /* We are doing differed cleanup. Last
  210. * time through when we took all the data
  211. * the sender_all_done was not set.
  212. */
  213. if (sp->put_last_out == 0) {
  214. SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
  215. SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
  216. sp->sender_all_done,
  217. sp->length,
  218. sp->msg_is_complete,
  219. sp->put_last_out);
  220. }
  221. atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
  222. TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
  223. stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, &asoc->strmout[i], sp);
  224. if (sp->net) {
  225. sctp_free_remote_addr(sp->net);
  226. sp->net = NULL;
  227. }
  228. if (sp->data) {
  229. sctp_m_freem(sp->data);
  230. sp->data = NULL;
  231. }
  232. sctp_free_a_strmoq(stcb, sp, so_locked);
  233. if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
  234. unsent_data++;
  235. }
  236. } else {
  237. unsent_data++;
  238. }
  239. if (unsent_data > 0) {
  240. break;
  241. }
  242. }
  243. }
  244. return (unsent_data);
  245. }
  246. static int
  247. sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
  248. {
  249. struct sctp_init *init;
  250. struct sctp_association *asoc;
  251. struct sctp_nets *lnet;
  252. unsigned int i;
  253. SCTP_TCB_LOCK_ASSERT(stcb);
  254. init = &cp->init;
  255. asoc = &stcb->asoc;
  256. /* save off parameters */
  257. asoc->peer_vtag = ntohl(init->initiate_tag);
  258. asoc->peers_rwnd = ntohl(init->a_rwnd);
  259. /* init tsn's */
  260. asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
  261. if (!TAILQ_EMPTY(&asoc->nets)) {
  262. /* update any ssthresh's that may have a default */
  263. TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
  264. lnet->ssthresh = asoc->peers_rwnd;
  265. if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
  266. sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
  267. }
  268. }
  269. }
  270. if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
  271. unsigned int newcnt;
  272. struct sctp_stream_out *outs;
  273. struct sctp_stream_queue_pending *sp, *nsp;
  274. struct sctp_tmit_chunk *chk, *nchk;
  275. /* abandon the upper streams */
  276. newcnt = ntohs(init->num_inbound_streams);
  277. TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
  278. if (chk->rec.data.sid >= newcnt) {
  279. TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
  280. asoc->send_queue_cnt--;
  281. if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
  282. asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
  283. #ifdef INVARIANTS
  284. } else {
  285. panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
  286. #endif
  287. }
  288. if (chk->data != NULL) {
  289. sctp_free_bufspace(stcb, asoc, chk, 1);
  290. sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
  291. 0, chk, SCTP_SO_NOT_LOCKED);
  292. if (chk->data) {
  293. sctp_m_freem(chk->data);
  294. chk->data = NULL;
  295. }
  296. }
  297. sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
  298. /*sa_ignore FREED_MEMORY*/
  299. }
  300. }
  301. if (asoc->strmout) {
  302. for (i = newcnt; i < asoc->pre_open_streams; i++) {
  303. outs = &asoc->strmout[i];
  304. TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
  305. atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
  306. TAILQ_REMOVE(&outs->outqueue, sp, next);
  307. stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp);
  308. sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
  309. stcb, 0, sp, SCTP_SO_NOT_LOCKED);
  310. if (sp->data) {
  311. sctp_m_freem(sp->data);
  312. sp->data = NULL;
  313. }
  314. if (sp->net) {
  315. sctp_free_remote_addr(sp->net);
  316. sp->net = NULL;
  317. }
  318. /* Free the chunk */
  319. sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED);
  320. /*sa_ignore FREED_MEMORY*/
  321. }
  322. outs->state = SCTP_STREAM_CLOSED;
  323. }
  324. }
  325. /* cut back the count */
  326. asoc->pre_open_streams = newcnt;
  327. }
  328. asoc->streamoutcnt = asoc->pre_open_streams;
  329. if (asoc->strmout) {
  330. for (i = 0; i < asoc->streamoutcnt; i++) {
  331. asoc->strmout[i].state = SCTP_STREAM_OPEN;
  332. }
  333. }
  334. /* EY - nr_sack: initialize highest tsn in nr_mapping_array */
  335. asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
  336. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
  337. sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
  338. }
  339. /* This is the next one we expect */
  340. asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
  341. asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
  342. asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
  343. asoc->advanced_peer_ack_point = asoc->last_acked_seq;
  344. /* open the requested streams */
  345. if (asoc->strmin != NULL) {
  346. /* Free the old ones */
  347. for (i = 0; i < asoc->streamincnt; i++) {
  348. sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue);
  349. sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue);
  350. }
  351. SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
  352. }
  353. if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) {
  354. asoc->streamincnt = ntohs(init->num_outbound_streams);
  355. } else {
  356. asoc->streamincnt = asoc->max_inbound_streams;
  357. }
  358. SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
  359. sizeof(struct sctp_stream_in), SCTP_M_STRMI);
  360. if (asoc->strmin == NULL) {
  361. /* we didn't get memory for the streams! */
  362. SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
  363. return (-1);
  364. }
  365. for (i = 0; i < asoc->streamincnt; i++) {
  366. asoc->strmin[i].sid = i;
  367. asoc->strmin[i].last_mid_delivered = 0xffffffff;
  368. TAILQ_INIT(&asoc->strmin[i].inqueue);
  369. TAILQ_INIT(&asoc->strmin[i].uno_inqueue);
  370. asoc->strmin[i].pd_api_started = 0;
  371. asoc->strmin[i].delivery_started = 0;
  372. }
  373. /*
  374. * load_address_from_init will put the addresses into the
  375. * association when the COOKIE is processed or the INIT-ACK is
  376. * processed. Both types of COOKIE's existing and new call this
  377. * routine. It will remove addresses that are no longer in the
  378. * association (for the restarting case where addresses are
  379. * removed). Up front when the INIT arrives we will discard it if it
  380. * is a restart and new addresses have been added.
  381. */
  382. /* sa_ignore MEMLEAK */
  383. return (0);
  384. }
  385. /*
  386. * INIT-ACK message processing/consumption returns value < 0 on error
  387. */
  388. static int
  389. sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
  390. struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
  391. struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
  392. struct sctp_nets *net, int *abort_no_unlock,
  393. #if defined(__FreeBSD__) && !defined(__Userspace__)
  394. uint8_t mflowtype, uint32_t mflowid,
  395. #endif
  396. uint32_t vrf_id)
  397. {
  398. struct sctp_association *asoc;
  399. struct mbuf *op_err;
  400. int retval, abort_flag, cookie_found;
  401. int initack_limit;
  402. int nat_friendly = 0;
  403. /* First verify that we have no illegal param's */
  404. abort_flag = 0;
  405. cookie_found = 0;
  406. op_err = sctp_arethere_unrecognized_parameters(m,
  407. (offset + sizeof(struct sctp_init_chunk)),
  408. &abort_flag, (struct sctp_chunkhdr *)cp,
  409. &nat_friendly, &cookie_found);
  410. if (abort_flag) {
  411. /* Send an abort and notify peer */
  412. sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  413. *abort_no_unlock = 1;
  414. return (-1);
  415. }
  416. if (!cookie_found) {
  417. uint16_t len;
  418. /* Only report the missing cookie parameter */
  419. if (op_err != NULL) {
  420. sctp_m_freem(op_err);
  421. }
  422. len = (uint16_t)(sizeof(struct sctp_error_missing_param) + sizeof(uint16_t));
  423. /* We abort with an error of missing mandatory param */
  424. op_err = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
  425. if (op_err != NULL) {
  426. struct sctp_error_missing_param *cause;
  427. SCTP_BUF_LEN(op_err) = len;
  428. cause = mtod(op_err, struct sctp_error_missing_param *);
  429. /* Subtract the reserved param */
  430. cause->cause.code = htons(SCTP_CAUSE_MISSING_PARAM);
  431. cause->cause.length = htons(len);
  432. cause->num_missing_params = htonl(1);
  433. cause->type[0] = htons(SCTP_STATE_COOKIE);
  434. }
  435. sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
  436. src, dst, sh, op_err,
  437. #if defined(__FreeBSD__) && !defined(__Userspace__)
  438. mflowtype, mflowid,
  439. #endif
  440. vrf_id, net->port);
  441. *abort_no_unlock = 1;
  442. return (-3);
  443. }
  444. asoc = &stcb->asoc;
  445. asoc->peer_supports_nat = (uint8_t)nat_friendly;
  446. /* process the peer's parameters in the INIT-ACK */
  447. if (sctp_process_init((struct sctp_init_chunk *)cp, stcb) < 0) {
  448. if (op_err != NULL) {
  449. sctp_m_freem(op_err);
  450. }
  451. op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
  452. SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n");
  453. sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
  454. src, dst, sh, op_err,
  455. #if defined(__FreeBSD__) && !defined(__Userspace__)
  456. mflowtype, mflowid,
  457. #endif
  458. vrf_id, net->port);
  459. *abort_no_unlock = 1;
  460. return (-1);
  461. }
  462. initack_limit = offset + ntohs(cp->ch.chunk_length);
  463. /* load all addresses */
  464. if ((retval = sctp_load_addresses_from_init(stcb, m,
  465. offset + sizeof(struct sctp_init_chunk),
  466. initack_limit, src, dst, NULL, stcb->asoc.port)) < 0) {
  467. if (op_err != NULL) {
  468. sctp_m_freem(op_err);
  469. }
  470. op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
  471. "Problem with address parameters");
  472. SCTPDBG(SCTP_DEBUG_INPUT1,
  473. "Load addresses from INIT causes an abort %d\n",
  474. retval);
  475. sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
  476. src, dst, sh, op_err,
  477. #if defined(__FreeBSD__) && !defined(__Userspace__)
  478. mflowtype, mflowid,
  479. #endif
  480. vrf_id, net->port);
  481. *abort_no_unlock = 1;
  482. return (-1);
  483. }
  484. /* if the peer doesn't support asconf, flush the asconf queue */
  485. if (asoc->asconf_supported == 0) {
  486. struct sctp_asconf_addr *param, *nparam;
  487. TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
  488. TAILQ_REMOVE(&asoc->asconf_queue, param, next);
  489. SCTP_FREE(param, SCTP_M_ASC_ADDR);
  490. }
  491. }
  492. stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
  493. stcb->asoc.local_hmacs);
  494. if (op_err) {
  495. sctp_queue_op_err(stcb, op_err);
  496. /* queuing will steal away the mbuf chain to the out queue */
  497. op_err = NULL;
  498. }
  499. /* extract the cookie and queue it to "echo" it back... */
  500. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
  501. sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
  502. stcb->asoc.overall_error_count,
  503. 0,
  504. SCTP_FROM_SCTP_INPUT,
  505. __LINE__);
  506. }
  507. stcb->asoc.overall_error_count = 0;
  508. net->error_count = 0;
  509. /*
  510. * Cancel the INIT timer, We do this first before queueing the
  511. * cookie. We always cancel at the primary to assume that we are
  512. * canceling the timer started by the INIT which always goes to the
  513. * primary.
  514. */
  515. sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
  516. asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
  517. /* calculate the RTO */
  518. sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
  519. SCTP_RTT_FROM_NON_DATA);
  520. #if defined(__Userspace__)
  521. if (stcb->sctp_ep->recv_callback) {
  522. if (stcb->sctp_socket) {
  523. uint32_t inqueue_bytes, sb_free_now;
  524. struct sctp_inpcb *inp;
  525. inp = stcb->sctp_ep;
  526. inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
  527. sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
  528. /* check if the amount free in the send socket buffer crossed the threshold */
  529. if (inp->send_callback &&
  530. (((inp->send_sb_threshold > 0) &&
  531. (sb_free_now >= inp->send_sb_threshold) &&
  532. (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
  533. (inp->send_sb_threshold == 0))) {
  534. atomic_add_int(&stcb->asoc.refcnt, 1);
  535. SCTP_TCB_UNLOCK(stcb);
  536. inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
  537. SCTP_TCB_LOCK(stcb);
  538. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  539. }
  540. }
  541. }
  542. #endif
  543. retval = sctp_send_cookie_echo(m, offset, initack_limit, stcb, net);
  544. return (retval);
  545. }
  546. static void
  547. sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
  548. struct sctp_tcb *stcb, struct sctp_nets *net)
  549. {
  550. union sctp_sockstore store;
  551. struct sctp_nets *r_net, *f_net;
  552. struct timeval tv;
  553. int req_prim = 0;
  554. uint16_t old_error_counter;
  555. if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
  556. /* Invalid length */
  557. return;
  558. }
  559. memset(&store, 0, sizeof(store));
  560. switch (cp->heartbeat.hb_info.addr_family) {
  561. #ifdef INET
  562. case AF_INET:
  563. if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
  564. store.sin.sin_family = cp->heartbeat.hb_info.addr_family;
  565. #ifdef HAVE_SIN_LEN
  566. store.sin.sin_len = cp->heartbeat.hb_info.addr_len;
  567. #endif
  568. store.sin.sin_port = stcb->rport;
  569. memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address,
  570. sizeof(store.sin.sin_addr));
  571. } else {
  572. return;
  573. }
  574. break;
  575. #endif
  576. #ifdef INET6
  577. case AF_INET6:
  578. if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
  579. store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family;
  580. #ifdef HAVE_SIN6_LEN
  581. store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len;
  582. #endif
  583. store.sin6.sin6_port = stcb->rport;
  584. memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr));
  585. } else {
  586. return;
  587. }
  588. break;
  589. #endif
  590. #if defined(__Userspace__)
  591. case AF_CONN:
  592. if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_conn)) {
  593. store.sconn.sconn_family = cp->heartbeat.hb_info.addr_family;
  594. #ifdef HAVE_SCONN_LEN
  595. store.sconn.sconn_len = cp->heartbeat.hb_info.addr_len;
  596. #endif
  597. store.sconn.sconn_port = stcb->rport;
  598. memcpy(&store.sconn.sconn_addr, cp->heartbeat.hb_info.address, sizeof(void *));
  599. } else {
  600. return;
  601. }
  602. break;
  603. #endif
  604. default:
  605. return;
  606. }
  607. r_net = sctp_findnet(stcb, &store.sa);
  608. if (r_net == NULL) {
  609. SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
  610. return;
  611. }
  612. if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
  613. (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
  614. (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
  615. /*
  616. * If the its a HB and it's random value is correct when can
  617. * confirm the destination.
  618. */
  619. r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
  620. if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
  621. stcb->asoc.primary_destination = r_net;
  622. r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
  623. f_net = TAILQ_FIRST(&stcb->asoc.nets);
  624. if (f_net != r_net) {
  625. /* first one on the list is NOT the primary
  626. * sctp_cmpaddr() is much more efficient if
  627. * the primary is the first on the list, make it
  628. * so.
  629. */
  630. TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
  631. TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
  632. }
  633. req_prim = 1;
  634. }
  635. sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
  636. stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
  637. sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb,
  638. r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
  639. sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
  640. }
  641. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
  642. sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
  643. stcb->asoc.overall_error_count,
  644. 0,
  645. SCTP_FROM_SCTP_INPUT,
  646. __LINE__);
  647. }
  648. stcb->asoc.overall_error_count = 0;
  649. old_error_counter = r_net->error_count;
  650. r_net->error_count = 0;
  651. r_net->hb_responded = 1;
  652. tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
  653. tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
  654. /* Now lets do a RTO with this */
  655. sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv,
  656. SCTP_RTT_FROM_NON_DATA);
  657. if ((r_net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
  658. r_net->dest_state |= SCTP_ADDR_REACHABLE;
  659. sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
  660. 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
  661. }
  662. if (r_net->dest_state & SCTP_ADDR_PF) {
  663. r_net->dest_state &= ~SCTP_ADDR_PF;
  664. stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
  665. }
  666. if (old_error_counter > 0) {
  667. sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
  668. stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
  669. sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
  670. }
  671. if (r_net == stcb->asoc.primary_destination) {
  672. if (stcb->asoc.alternate) {
  673. /* release the alternate, primary is good */
  674. sctp_free_remote_addr(stcb->asoc.alternate);
  675. stcb->asoc.alternate = NULL;
  676. }
  677. }
  678. /* Mobility adaptation */
  679. if (req_prim) {
  680. if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
  681. SCTP_MOBILITY_BASE) ||
  682. sctp_is_mobility_feature_on(stcb->sctp_ep,
  683. SCTP_MOBILITY_FASTHANDOFF)) &&
  684. sctp_is_mobility_feature_on(stcb->sctp_ep,
  685. SCTP_MOBILITY_PRIM_DELETED)) {
  686. sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED,
  687. stcb->sctp_ep, stcb, NULL,
  688. SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
  689. if (sctp_is_mobility_feature_on(stcb->sctp_ep,
  690. SCTP_MOBILITY_FASTHANDOFF)) {
  691. sctp_assoc_immediate_retrans(stcb,
  692. stcb->asoc.primary_destination);
  693. }
  694. if (sctp_is_mobility_feature_on(stcb->sctp_ep,
  695. SCTP_MOBILITY_BASE)) {
  696. sctp_move_chunks_from_net(stcb,
  697. stcb->asoc.deleted_primary);
  698. }
  699. sctp_delete_prim_timer(stcb->sctp_ep, stcb);
  700. }
  701. }
  702. }
  703. static int
  704. sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
  705. {
  706. /*
  707. * Return 0 means we want you to proceed with the abort
  708. * non-zero means no abort processing.
  709. */
  710. uint32_t new_vtag;
  711. struct sctpasochead *head;
  712. if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
  713. (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
  714. atomic_add_int(&stcb->asoc.refcnt, 1);
  715. SCTP_TCB_UNLOCK(stcb);
  716. SCTP_INP_INFO_WLOCK();
  717. SCTP_TCB_LOCK(stcb);
  718. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  719. } else {
  720. return (0);
  721. }
  722. new_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
  723. if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
  724. /* generate a new vtag and send init */
  725. LIST_REMOVE(stcb, sctp_asocs);
  726. stcb->asoc.my_vtag = new_vtag;
  727. head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
  728. /* put it in the bucket in the vtag hash of assoc's for the system */
  729. LIST_INSERT_HEAD(head, stcb, sctp_asocs);
  730. SCTP_INP_INFO_WUNLOCK();
  731. sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
  732. return (1);
  733. } else {
  734. /* treat like a case where the cookie expired i.e.:
  735. * - dump current cookie.
  736. * - generate a new vtag.
  737. * - resend init.
  738. */
  739. /* generate a new vtag and send init */
  740. LIST_REMOVE(stcb, sctp_asocs);
  741. SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
  742. sctp_stop_all_cookie_timers(stcb);
  743. sctp_toss_old_cookies(stcb, &stcb->asoc);
  744. stcb->asoc.my_vtag = new_vtag;
  745. head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
  746. /* put it in the bucket in the vtag hash of assoc's for the system */
  747. LIST_INSERT_HEAD(head, stcb, sctp_asocs);
  748. SCTP_INP_INFO_WUNLOCK();
  749. sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
  750. return (1);
  751. }
  752. return (0);
  753. }
  754. static int
  755. sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
  756. struct sctp_nets *net)
  757. {
  758. /* return 0 means we want you to proceed with the abort
  759. * non-zero means no abort processing
  760. */
  761. if (stcb->asoc.auth_supported == 0) {
  762. SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
  763. return (0);
  764. }
  765. sctp_asconf_send_nat_state_update(stcb, net);
  766. return (1);
  767. }
  768. /* Returns 1 if the stcb was aborted, 0 otherwise */
  769. static int
  770. sctp_handle_abort(struct sctp_abort_chunk *abort,
  771. struct sctp_tcb *stcb, struct sctp_nets *net)
  772. {
  773. #if defined(__APPLE__) && !defined(__Userspace__)
  774. struct socket *so;
  775. #endif
  776. uint16_t len;
  777. uint16_t error;
  778. SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
  779. if (stcb == NULL)
  780. return (0);
  781. len = ntohs(abort->ch.chunk_length);
  782. if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_error_cause)) {
  783. /* Need to check the cause codes for our
  784. * two magic nat aborts which don't kill the assoc
  785. * necessarily.
  786. */
  787. struct sctp_error_cause *cause;
  788. cause = (struct sctp_error_cause *)(abort + 1);
  789. error = ntohs(cause->code);
  790. if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) {
  791. SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state, ABORT flags:%x\n",
  792. abort->ch.chunk_flags);
  793. if (sctp_handle_nat_colliding_state(stcb)) {
  794. return (0);
  795. }
  796. } else if (error == SCTP_CAUSE_NAT_MISSING_STATE) {
  797. SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state, ABORT flags:%x\n",
  798. abort->ch.chunk_flags);
  799. if (sctp_handle_nat_missing_state(stcb, net)) {
  800. return (0);
  801. }
  802. }
  803. } else {
  804. error = 0;
  805. }
  806. /* stop any receive timers */
  807. sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
  808. SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
  809. /* notify user of the abort and clean up... */
  810. sctp_abort_notification(stcb, true, false, error, abort, SCTP_SO_NOT_LOCKED);
  811. /* free the tcb */
  812. SCTP_STAT_INCR_COUNTER32(sctps_aborted);
  813. if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
  814. (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
  815. SCTP_STAT_DECR_GAUGE32(sctps_currestab);
  816. }
  817. #ifdef SCTP_ASOCLOG_OF_TSNS
  818. sctp_print_out_track_log(stcb);
  819. #endif
  820. #if defined(__APPLE__) && !defined(__Userspace__)
  821. so = SCTP_INP_SO(stcb->sctp_ep);
  822. atomic_add_int(&stcb->asoc.refcnt, 1);
  823. SCTP_TCB_UNLOCK(stcb);
  824. SCTP_SOCKET_LOCK(so, 1);
  825. SCTP_TCB_LOCK(stcb);
  826. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  827. #endif
  828. (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
  829. SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
  830. #if defined(__APPLE__) && !defined(__Userspace__)
  831. SCTP_SOCKET_UNLOCK(so, 1);
  832. #endif
  833. SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
  834. return (1);
  835. }
  836. static void
  837. sctp_start_net_timers(struct sctp_tcb *stcb)
  838. {
  839. uint32_t cnt_hb_sent;
  840. struct sctp_nets *net;
  841. cnt_hb_sent = 0;
  842. TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
  843. /* For each network start:
  844. * 1) A pmtu timer.
  845. * 2) A HB timer
  846. * 3) If the dest in unconfirmed send
  847. * a hb as well if under max_hb_burst have
  848. * been sent.
  849. */
  850. sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
  851. sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
  852. if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
  853. (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) {
  854. sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
  855. cnt_hb_sent++;
  856. }
  857. }
  858. if (cnt_hb_sent) {
  859. sctp_chunk_output(stcb->sctp_ep, stcb,
  860. SCTP_OUTPUT_FROM_COOKIE_ACK,
  861. SCTP_SO_NOT_LOCKED);
  862. }
  863. }
  864. static void
  865. sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
  866. struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
  867. {
  868. struct sctp_association *asoc;
  869. int some_on_streamwheel;
  870. int old_state;
  871. #if defined(__APPLE__) && !defined(__Userspace__)
  872. struct socket *so;
  873. #endif
  874. SCTPDBG(SCTP_DEBUG_INPUT2,
  875. "sctp_handle_shutdown: handling SHUTDOWN\n");
  876. if (stcb == NULL)
  877. return;
  878. asoc = &stcb->asoc;
  879. if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
  880. (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
  881. return;
  882. }
  883. if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
  884. /* Shutdown NOT the expected size */
  885. return;
  886. }
  887. old_state = SCTP_GET_STATE(stcb);
  888. sctp_update_acked(stcb, cp, abort_flag);
  889. if (*abort_flag) {
  890. return;
  891. }
  892. if (asoc->control_pdapi) {
  893. /* With a normal shutdown
  894. * we assume the end of last record.
  895. */
  896. SCTP_INP_READ_LOCK(stcb->sctp_ep);
  897. if (asoc->control_pdapi->on_strm_q) {
  898. struct sctp_stream_in *strm;
  899. strm = &asoc->strmin[asoc->control_pdapi->sinfo_stream];
  900. if (asoc->control_pdapi->on_strm_q == SCTP_ON_UNORDERED) {
  901. /* Unordered */
  902. TAILQ_REMOVE(&strm->uno_inqueue, asoc->control_pdapi, next_instrm);
  903. asoc->control_pdapi->on_strm_q = 0;
  904. } else if (asoc->control_pdapi->on_strm_q == SCTP_ON_ORDERED) {
  905. /* Ordered */
  906. TAILQ_REMOVE(&strm->inqueue, asoc->control_pdapi, next_instrm);
  907. asoc->control_pdapi->on_strm_q = 0;
  908. #ifdef INVARIANTS
  909. } else {
  910. panic("Unknown state on ctrl:%p on_strm_q:%d",
  911. asoc->control_pdapi,
  912. asoc->control_pdapi->on_strm_q);
  913. #endif
  914. }
  915. }
  916. asoc->control_pdapi->end_added = 1;
  917. asoc->control_pdapi->pdapi_aborted = 1;
  918. asoc->control_pdapi = NULL;
  919. SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
  920. #if defined(__APPLE__) && !defined(__Userspace__)
  921. so = SCTP_INP_SO(stcb->sctp_ep);
  922. atomic_add_int(&stcb->asoc.refcnt, 1);
  923. SCTP_TCB_UNLOCK(stcb);
  924. SCTP_SOCKET_LOCK(so, 1);
  925. SCTP_TCB_LOCK(stcb);
  926. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  927. if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  928. /* assoc was freed while we were unlocked */
  929. SCTP_SOCKET_UNLOCK(so, 1);
  930. return;
  931. }
  932. #endif
  933. if (stcb->sctp_socket) {
  934. sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
  935. }
  936. #if defined(__APPLE__) && !defined(__Userspace__)
  937. SCTP_SOCKET_UNLOCK(so, 1);
  938. #endif
  939. }
  940. /* goto SHUTDOWN_RECEIVED state to block new requests */
  941. if (stcb->sctp_socket) {
  942. if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
  943. (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
  944. (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
  945. SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_RECEIVED);
  946. /* notify upper layer that peer has initiated a shutdown */
  947. sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
  948. /* reset time */
  949. (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
  950. }
  951. }
  952. if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
  953. /*
  954. * stop the shutdown timer, since we WILL move to
  955. * SHUTDOWN-ACK-SENT.
  956. */
  957. sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
  958. net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
  959. }
  960. /* Now is there unsent data on a stream somewhere? */
  961. some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
  962. if (!TAILQ_EMPTY(&asoc->send_queue) ||
  963. !TAILQ_EMPTY(&asoc->sent_queue) ||
  964. some_on_streamwheel) {
  965. /* By returning we will push more data out */
  966. return;
  967. } else {
  968. /* no outstanding data to send, so move on... */
  969. /* send SHUTDOWN-ACK */
  970. /* move to SHUTDOWN-ACK-SENT state */
  971. if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
  972. (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
  973. SCTP_STAT_DECR_GAUGE32(sctps_currestab);
  974. }
  975. if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
  976. SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
  977. sctp_stop_timers_for_shutdown(stcb);
  978. sctp_send_shutdown_ack(stcb, net);
  979. sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
  980. stcb->sctp_ep, stcb, net);
  981. } else if (old_state == SCTP_STATE_SHUTDOWN_ACK_SENT) {
  982. sctp_send_shutdown_ack(stcb, net);
  983. }
  984. }
  985. }
  986. static void
  987. sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
  988. struct sctp_tcb *stcb,
  989. struct sctp_nets *net)
  990. {
  991. struct sctp_association *asoc;
  992. #if defined(__APPLE__) && !defined(__Userspace__)
  993. struct socket *so;
  994. so = SCTP_INP_SO(stcb->sctp_ep);
  995. #endif
  996. SCTPDBG(SCTP_DEBUG_INPUT2,
  997. "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
  998. if (stcb == NULL)
  999. return;
  1000. asoc = &stcb->asoc;
  1001. /* process according to association state */
  1002. if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
  1003. (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
  1004. /* unexpected SHUTDOWN-ACK... do OOTB handling... */
  1005. sctp_send_shutdown_complete(stcb, net, 1);
  1006. SCTP_TCB_UNLOCK(stcb);
  1007. return;
  1008. }
  1009. if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
  1010. (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
  1011. /* unexpected SHUTDOWN-ACK... so ignore... */
  1012. SCTP_TCB_UNLOCK(stcb);
  1013. return;
  1014. }
  1015. if (asoc->control_pdapi) {
  1016. /* With a normal shutdown
  1017. * we assume the end of last record.
  1018. */
  1019. SCTP_INP_READ_LOCK(stcb->sctp_ep);
  1020. asoc->control_pdapi->end_added = 1;
  1021. asoc->control_pdapi->pdapi_aborted = 1;
  1022. asoc->control_pdapi = NULL;
  1023. SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
  1024. #if defined(__APPLE__) && !defined(__Userspace__)
  1025. atomic_add_int(&stcb->asoc.refcnt, 1);
  1026. SCTP_TCB_UNLOCK(stcb);
  1027. SCTP_SOCKET_LOCK(so, 1);
  1028. SCTP_TCB_LOCK(stcb);
  1029. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  1030. if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  1031. /* assoc was freed while we were unlocked */
  1032. SCTP_SOCKET_UNLOCK(so, 1);
  1033. return;
  1034. }
  1035. #endif
  1036. sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
  1037. #if defined(__APPLE__) && !defined(__Userspace__)
  1038. SCTP_SOCKET_UNLOCK(so, 1);
  1039. #endif
  1040. }
  1041. #ifdef INVARIANTS
  1042. if (!TAILQ_EMPTY(&asoc->send_queue) ||
  1043. !TAILQ_EMPTY(&asoc->sent_queue) ||
  1044. sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
  1045. panic("Queues are not empty when handling SHUTDOWN-ACK");
  1046. }
  1047. #endif
  1048. /* stop the timer */
  1049. sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net,
  1050. SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
  1051. /* send SHUTDOWN-COMPLETE */
  1052. sctp_send_shutdown_complete(stcb, net, 0);
  1053. /* notify upper layer protocol */
  1054. if (stcb->sctp_socket) {
  1055. if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  1056. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
  1057. SCTP_SB_CLEAR(stcb->sctp_socket->so_snd);
  1058. }
  1059. sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
  1060. }
  1061. SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
  1062. /* free the TCB but first save off the ep */
  1063. #if defined(__APPLE__) && !defined(__Userspace__)
  1064. atomic_add_int(&stcb->asoc.refcnt, 1);
  1065. SCTP_TCB_UNLOCK(stcb);
  1066. SCTP_SOCKET_LOCK(so, 1);
  1067. SCTP_TCB_LOCK(stcb);
  1068. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  1069. #endif
  1070. (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
  1071. SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
  1072. #if defined(__APPLE__) && !defined(__Userspace__)
  1073. SCTP_SOCKET_UNLOCK(so, 1);
  1074. #endif
  1075. }
  1076. static void
  1077. sctp_process_unrecog_chunk(struct sctp_tcb *stcb, uint8_t chunk_type)
  1078. {
  1079. switch (chunk_type) {
  1080. case SCTP_ASCONF_ACK:
  1081. case SCTP_ASCONF:
  1082. sctp_asconf_cleanup(stcb);
  1083. break;
  1084. case SCTP_IFORWARD_CUM_TSN:
  1085. case SCTP_FORWARD_CUM_TSN:
  1086. stcb->asoc.prsctp_supported = 0;
  1087. break;
  1088. default:
  1089. SCTPDBG(SCTP_DEBUG_INPUT2,
  1090. "Peer does not support chunk type %d (0x%x).\n",
  1091. chunk_type, chunk_type);
  1092. break;
  1093. }
  1094. }
  1095. /*
  1096. * Skip past the param header and then we will find the param that caused the
  1097. * problem. There are a number of param's in a ASCONF OR the prsctp param
  1098. * these will turn of specific features.
  1099. * XXX: Is this the right thing to do?
  1100. */
  1101. static void
  1102. sctp_process_unrecog_param(struct sctp_tcb *stcb, uint16_t parameter_type)
  1103. {
  1104. switch (parameter_type) {
  1105. /* pr-sctp draft */
  1106. case SCTP_PRSCTP_SUPPORTED:
  1107. stcb->asoc.prsctp_supported = 0;
  1108. break;
  1109. case SCTP_SUPPORTED_CHUNK_EXT:
  1110. break;
  1111. /* draft-ietf-tsvwg-addip-sctp */
  1112. case SCTP_HAS_NAT_SUPPORT:
  1113. stcb->asoc.peer_supports_nat = 0;
  1114. break;
  1115. case SCTP_ADD_IP_ADDRESS:
  1116. case SCTP_DEL_IP_ADDRESS:
  1117. case SCTP_SET_PRIM_ADDR:
  1118. stcb->asoc.asconf_supported = 0;
  1119. break;
  1120. case SCTP_SUCCESS_REPORT:
  1121. case SCTP_ERROR_CAUSE_IND:
  1122. SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
  1123. SCTPDBG(SCTP_DEBUG_INPUT2,
  1124. "Turning off ASCONF to this strange peer\n");
  1125. stcb->asoc.asconf_supported = 0;
  1126. break;
  1127. default:
  1128. SCTPDBG(SCTP_DEBUG_INPUT2,
  1129. "Peer does not support param type %d (0x%x)??\n",
  1130. parameter_type, parameter_type);
  1131. break;
  1132. }
  1133. }
  1134. static int
  1135. sctp_handle_error(struct sctp_chunkhdr *ch,
  1136. struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
  1137. {
  1138. struct sctp_error_cause *cause;
  1139. struct sctp_association *asoc;
  1140. uint32_t remaining_length, adjust;
  1141. uint16_t code, cause_code, cause_length;
  1142. #if defined(__APPLE__) && !defined(__Userspace__)
  1143. struct socket *so;
  1144. #endif
  1145. /* parse through all of the errors and process */
  1146. asoc = &stcb->asoc;
  1147. cause = (struct sctp_error_cause *)((caddr_t)ch +
  1148. sizeof(struct sctp_chunkhdr));
  1149. remaining_length = ntohs(ch->chunk_length);
  1150. if (remaining_length > limit) {
  1151. remaining_length = limit;
  1152. }
  1153. if (remaining_length >= sizeof(struct sctp_chunkhdr)) {
  1154. remaining_length -= sizeof(struct sctp_chunkhdr);
  1155. } else {
  1156. remaining_length = 0;
  1157. }
  1158. code = 0;
  1159. while (remaining_length >= sizeof(struct sctp_error_cause)) {
  1160. /* Process an Error Cause */
  1161. cause_code = ntohs(cause->code);
  1162. cause_length = ntohs(cause->length);
  1163. if ((cause_length > remaining_length) || (cause_length == 0)) {
  1164. /* Invalid cause length, possibly due to truncation. */
  1165. SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in cause - bytes left: %u cause length: %u\n",
  1166. remaining_length, cause_length);
  1167. return (0);
  1168. }
  1169. if (code == 0) {
  1170. /* report the first error cause */
  1171. code = cause_code;
  1172. }
  1173. switch (cause_code) {
  1174. case SCTP_CAUSE_INVALID_STREAM:
  1175. case SCTP_CAUSE_MISSING_PARAM:
  1176. case SCTP_CAUSE_INVALID_PARAM:
  1177. case SCTP_CAUSE_NO_USER_DATA:
  1178. SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %u back? We have a bug :/ (or do they?)\n",
  1179. cause_code);
  1180. break;
  1181. case SCTP_CAUSE_NAT_COLLIDING_STATE:
  1182. SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state, ERROR flags: %x\n",
  1183. ch->chunk_flags);
  1184. if (sctp_handle_nat_colliding_state(stcb)) {
  1185. return (0);
  1186. }
  1187. break;
  1188. case SCTP_CAUSE_NAT_MISSING_STATE:
  1189. SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state, ERROR flags: %x\n",
  1190. ch->chunk_flags);
  1191. if (sctp_handle_nat_missing_state(stcb, net)) {
  1192. return (0);
  1193. }
  1194. break;
  1195. case SCTP_CAUSE_STALE_COOKIE:
  1196. /*
  1197. * We only act if we have echoed a cookie and are
  1198. * waiting.
  1199. */
  1200. if ((cause_length >= sizeof(struct sctp_error_stale_cookie)) &&
  1201. (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
  1202. struct sctp_error_stale_cookie *stale_cookie;
  1203. stale_cookie = (struct sctp_error_stale_cookie *)cause;
  1204. /* stable_time is in usec, convert to msec. */
  1205. asoc->cookie_preserve_req = ntohl(stale_cookie->stale_time) / 1000;
  1206. /* Double it to be more robust on RTX. */
  1207. asoc->cookie_preserve_req *= 2;
  1208. asoc->stale_cookie_count++;
  1209. if (asoc->stale_cookie_count >
  1210. asoc->max_init_times) {
  1211. sctp_abort_notification(stcb, false, true, 0, NULL, SCTP_SO_NOT_LOCKED);
  1212. /* now free the asoc */
  1213. #if defined(__APPLE__) && !defined(__Userspace__)
  1214. so = SCTP_INP_SO(stcb->sctp_ep);
  1215. atomic_add_int(&stcb->asoc.refcnt, 1);
  1216. SCTP_TCB_UNLOCK(stcb);
  1217. SCTP_SOCKET_LOCK(so, 1);
  1218. SCTP_TCB_LOCK(stcb);
  1219. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  1220. #endif
  1221. (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
  1222. SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
  1223. #if defined(__APPLE__) && !defined(__Userspace__)
  1224. SCTP_SOCKET_UNLOCK(so, 1);
  1225. #endif
  1226. return (-1);
  1227. }
  1228. /* blast back to INIT state */
  1229. sctp_toss_old_cookies(stcb, &stcb->asoc);
  1230. SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
  1231. sctp_stop_all_cookie_timers(stcb);
  1232. sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
  1233. }
  1234. break;
  1235. case SCTP_CAUSE_UNRESOLVABLE_ADDR:
  1236. /*
  1237. * Nothing we can do here, we don't do hostname
  1238. * addresses so if the peer does not like my IPv6
  1239. * (or IPv4 for that matter) it does not matter. If
  1240. * they don't support that type of address, they can
  1241. * NOT possibly get that packet type... i.e. with no
  1242. * IPv6 you can't receive a IPv6 packet. so we can
  1243. * safely ignore this one. If we ever added support
  1244. * for HOSTNAME Addresses, then we would need to do
  1245. * something here.
  1246. */
  1247. break;
  1248. case SCTP_CAUSE_UNRECOG_CHUNK:
  1249. if (cause_length >= sizeof(struct sctp_error_unrecognized_chunk)) {
  1250. struct sctp_error_unrecognized_chunk *unrec_chunk;
  1251. unrec_chunk = (struct sctp_error_unrecognized_chunk *)cause;
  1252. sctp_process_unrecog_chunk(stcb, unrec_chunk->ch.chunk_type);
  1253. }
  1254. break;
  1255. case SCTP_CAUSE_UNRECOG_PARAM:
  1256. /* XXX: We only consider the first parameter */
  1257. if (cause_length >= sizeof(struct sctp_error_cause) + sizeof(struct sctp_paramhdr)) {
  1258. struct sctp_paramhdr *unrec_parameter;
  1259. unrec_parameter = (struct sctp_paramhdr *)(cause + 1);
  1260. sctp_process_unrecog_param(stcb, ntohs(unrec_parameter->param_type));
  1261. }
  1262. break;
  1263. case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
  1264. /*
  1265. * We ignore this since the timer will drive out a
  1266. * new cookie anyway and there timer will drive us
  1267. * to send a SHUTDOWN_COMPLETE. We can't send one
  1268. * here since we don't have their tag.
  1269. */
  1270. break;
  1271. case SCTP_CAUSE_DELETING_LAST_ADDR:
  1272. case SCTP_CAUSE_RESOURCE_SHORTAGE:
  1273. case SCTP_CAUSE_DELETING_SRC_ADDR:
  1274. /*
  1275. * We should NOT get these here, but in a
  1276. * ASCONF-ACK.
  1277. */
  1278. SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a error cause with code %u.\n",
  1279. cause_code);
  1280. break;
  1281. case SCTP_CAUSE_OUT_OF_RESC:
  1282. /*
  1283. * And what, pray tell do we do with the fact that
  1284. * the peer is out of resources? Not really sure we
  1285. * could do anything but abort. I suspect this
  1286. * should have came WITH an abort instead of in a
  1287. * OP-ERROR.
  1288. */
  1289. break;
  1290. default:
  1291. SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown code 0x%x\n",
  1292. cause_code);
  1293. break;
  1294. }
  1295. adjust = SCTP_SIZE32(cause_length);
  1296. if (remaining_length >= adjust) {
  1297. remaining_length -= adjust;
  1298. } else {
  1299. remaining_length = 0;
  1300. }
  1301. cause = (struct sctp_error_cause *)((caddr_t)cause + adjust);
  1302. }
  1303. sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, code, ch, SCTP_SO_NOT_LOCKED);
  1304. return (0);
  1305. }
  1306. static int
  1307. sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
  1308. struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
  1309. struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
  1310. struct sctp_nets *net, int *abort_no_unlock,
  1311. #if defined(__FreeBSD__) && !defined(__Userspace__)
  1312. uint8_t mflowtype, uint32_t mflowid,
  1313. #endif
  1314. uint32_t vrf_id)
  1315. {
  1316. struct sctp_init_ack *init_ack;
  1317. struct mbuf *op_err;
  1318. SCTPDBG(SCTP_DEBUG_INPUT2,
  1319. "sctp_handle_init_ack: handling INIT-ACK\n");
  1320. if (stcb == NULL) {
  1321. SCTPDBG(SCTP_DEBUG_INPUT2,
  1322. "sctp_handle_init_ack: TCB is null\n");
  1323. return (-1);
  1324. }
  1325. /* Only process the INIT-ACK chunk in COOKIE WAIT state.*/
  1326. if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
  1327. init_ack = &cp->init;
  1328. /* Validate parameters. */
  1329. if ((ntohl(init_ack->initiate_tag) == 0) ||
  1330. (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) ||
  1331. (ntohs(init_ack->num_inbound_streams) == 0) ||
  1332. (ntohs(init_ack->num_outbound_streams) == 0)) {
  1333. /* One of the mandatory parameters is illegal. */
  1334. op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
  1335. sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
  1336. src, dst, sh, op_err,
  1337. #if defined(__FreeBSD__) && !defined(__Userspace__)
  1338. mflowtype, mflowid,
  1339. #endif
  1340. vrf_id, net->port);
  1341. *abort_no_unlock = 1;
  1342. return (-1);
  1343. }
  1344. if (stcb->asoc.primary_destination->dest_state &
  1345. SCTP_ADDR_UNCONFIRMED) {
  1346. /*
  1347. * The primary is where we sent the INIT, we can
  1348. * always consider it confirmed when the INIT-ACK is
  1349. * returned. Do this before we load addresses
  1350. * though.
  1351. */
  1352. stcb->asoc.primary_destination->dest_state &=
  1353. ~SCTP_ADDR_UNCONFIRMED;
  1354. sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
  1355. stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
  1356. }
  1357. if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb,
  1358. net, abort_no_unlock,
  1359. #if defined(__FreeBSD__) && !defined(__Userspace__)
  1360. mflowtype, mflowid,
  1361. #endif
  1362. vrf_id) < 0) {
  1363. /* error in parsing parameters */
  1364. return (-1);
  1365. }
  1366. /* Update our state. */
  1367. SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
  1368. SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_ECHOED);
  1369. /* Reset the RTO calculation. */
  1370. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
  1371. sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
  1372. stcb->asoc.overall_error_count,
  1373. 0,
  1374. SCTP_FROM_SCTP_INPUT,
  1375. __LINE__);
  1376. }
  1377. stcb->asoc.overall_error_count = 0;
  1378. (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
  1379. /*
  1380. * Collapse the init timer back in case of a exponential
  1381. * backoff.
  1382. */
  1383. sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
  1384. stcb, net);
  1385. /*
  1386. * The output routine at the end of the inbound data processing
  1387. * will cause the cookie to be sent.
  1388. */
  1389. SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
  1390. return (0);
  1391. } else {
  1392. return (-1);
  1393. }
  1394. }
  1395. static struct sctp_tcb *
  1396. sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
  1397. struct sockaddr *src, struct sockaddr *dst,
  1398. struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
  1399. struct sctp_inpcb *inp, struct sctp_nets **netp,
  1400. struct sockaddr *init_src, int *notification,
  1401. int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
  1402. #if defined(__FreeBSD__) && !defined(__Userspace__)
  1403. uint8_t mflowtype, uint32_t mflowid,
  1404. #endif
  1405. uint32_t vrf_id, uint16_t port);
  1406. /*
  1407. * handle a state cookie for an existing association m: input packet mbuf
  1408. * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
  1409. * "split" mbuf and the cookie signature does not exist offset: offset into
  1410. * mbuf to the cookie-echo chunk
  1411. */
  1412. static struct sctp_tcb *
  1413. sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
  1414. struct sockaddr *src, struct sockaddr *dst,
  1415. struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
  1416. struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
  1417. struct sockaddr *init_src, int *notification,
  1418. int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
  1419. #if defined(__FreeBSD__) && !defined(__Userspace__)
  1420. uint8_t mflowtype, uint32_t mflowid,
  1421. #endif
  1422. uint32_t vrf_id, uint16_t port)
  1423. {
  1424. struct sctp_association *asoc;
  1425. struct sctp_init_chunk *init_cp, init_buf;
  1426. struct sctp_init_ack_chunk *initack_cp, initack_buf;
  1427. struct sctp_asconf_addr *aparam, *naparam;
  1428. struct sctp_asconf_ack *aack, *naack;
  1429. struct sctp_tmit_chunk *chk, *nchk;
  1430. struct sctp_stream_reset_list *strrst, *nstrrst;
  1431. struct sctp_queued_to_read *sq, *nsq;
  1432. struct sctp_nets *net;
  1433. struct mbuf *op_err;
  1434. struct timeval old;
  1435. int init_offset, initack_offset, i;
  1436. int retval;
  1437. int spec_flag = 0;
  1438. uint32_t how_indx;
  1439. #if defined(SCTP_DETAILED_STR_STATS)
  1440. int j;
  1441. #endif
  1442. net = *netp;
  1443. /* I know that the TCB is non-NULL from the caller */
  1444. asoc = &stcb->asoc;
  1445. for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
  1446. if (asoc->cookie_how[how_indx] == 0)
  1447. break;
  1448. }
  1449. if (how_indx < sizeof(asoc->cookie_how)) {
  1450. asoc->cookie_how[how_indx] = 1;
  1451. }
  1452. if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
  1453. /* SHUTDOWN came in after sending INIT-ACK */
  1454. sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
  1455. op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, "");
  1456. sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
  1457. #if defined(__FreeBSD__) && !defined(__Userspace__)
  1458. mflowtype, mflowid, inp->fibnum,
  1459. #endif
  1460. vrf_id, net->port);
  1461. if (how_indx < sizeof(asoc->cookie_how))
  1462. asoc->cookie_how[how_indx] = 2;
  1463. SCTP_TCB_UNLOCK(stcb);
  1464. return (NULL);
  1465. }
  1466. /*
  1467. * find and validate the INIT chunk in the cookie (peer's info) the
  1468. * INIT should start after the cookie-echo header struct (chunk
  1469. * header, state cookie header struct)
  1470. */
  1471. init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
  1472. init_cp = (struct sctp_init_chunk *)
  1473. sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
  1474. (uint8_t *) & init_buf);
  1475. if (init_cp == NULL) {
  1476. /* could not pull a INIT chunk in cookie */
  1477. SCTP_TCB_UNLOCK(stcb);
  1478. return (NULL);
  1479. }
  1480. if (init_cp->ch.chunk_type != SCTP_INITIATION) {
  1481. SCTP_TCB_UNLOCK(stcb);
  1482. return (NULL);
  1483. }
  1484. /*
  1485. * find and validate the INIT-ACK chunk in the cookie (my info) the
  1486. * INIT-ACK follows the INIT chunk
  1487. */
  1488. initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
  1489. initack_cp = (struct sctp_init_ack_chunk *)
  1490. sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
  1491. (uint8_t *) & initack_buf);
  1492. if (initack_cp == NULL) {
  1493. /* could not pull INIT-ACK chunk in cookie */
  1494. SCTP_TCB_UNLOCK(stcb);
  1495. return (NULL);
  1496. }
  1497. if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
  1498. SCTP_TCB_UNLOCK(stcb);
  1499. return (NULL);
  1500. }
  1501. if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
  1502. (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
  1503. /*
  1504. * case D in Section 5.2.4 Table 2: MMAA process accordingly
  1505. * to get into the OPEN state
  1506. */
  1507. if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
  1508. /*-
  1509. * Opps, this means that we somehow generated two vtag's
  1510. * the same. I.e. we did:
  1511. * Us Peer
  1512. * <---INIT(tag=a)------
  1513. * ----INIT-ACK(tag=t)-->
  1514. * ----INIT(tag=t)------> *1
  1515. * <---INIT-ACK(tag=a)---
  1516. * <----CE(tag=t)------------- *2
  1517. *
  1518. * At point *1 we should be generating a different
  1519. * tag t'. Which means we would throw away the CE and send
  1520. * ours instead. Basically this is case C (throw away side).
  1521. */
  1522. if (how_indx < sizeof(asoc->cookie_how))
  1523. asoc->cookie_how[how_indx] = 17;
  1524. SCTP_TCB_UNLOCK(stcb);
  1525. return (NULL);
  1526. }
  1527. switch (SCTP_GET_STATE(stcb)) {
  1528. case SCTP_STATE_COOKIE_WAIT:
  1529. case SCTP_STATE_COOKIE_ECHOED:
  1530. /*
  1531. * INIT was sent but got a COOKIE_ECHO with the
  1532. * correct tags... just accept it...but we must
  1533. * process the init so that we can make sure we
  1534. * have the right seq no's.
  1535. */
  1536. /* First we must process the INIT !! */
  1537. if (sctp_process_init(init_cp, stcb) < 0) {
  1538. if (how_indx < sizeof(asoc->cookie_how))
  1539. asoc->cookie_how[how_indx] = 3;
  1540. op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
  1541. SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n");
  1542. sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
  1543. src, dst, sh, op_err,
  1544. #if defined(__FreeBSD__) && !defined(__Userspace__)
  1545. mflowtype, mflowid,
  1546. #endif
  1547. vrf_id, net->port);
  1548. return (NULL);
  1549. }
  1550. /* we have already processed the INIT so no problem */
  1551. sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp,
  1552. stcb, net,
  1553. SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
  1554. sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp,
  1555. stcb, net,
  1556. SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
  1557. /* update current state */
  1558. if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
  1559. SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
  1560. else
  1561. SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
  1562. SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
  1563. if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
  1564. sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
  1565. stcb->sctp_ep, stcb, NULL);
  1566. }
  1567. SCTP_STAT_INCR_GAUGE32(sctps_currestab);
  1568. sctp_stop_all_cookie_timers(stcb);
  1569. if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  1570. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
  1571. (!SCTP_IS_LISTENING(inp))) {
  1572. #if defined(__APPLE__) && !defined(__Userspace__)
  1573. struct socket *so;
  1574. #endif
  1575. /*
  1576. * Here is where collision would go if we
  1577. * did a connect() and instead got a
  1578. * init/init-ack/cookie done before the
  1579. * init-ack came back..
  1580. */
  1581. sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED);
  1582. #if defined(__APPLE__) && !defined(__Userspace__)
  1583. so = SCTP_INP_SO(stcb->sctp_ep);
  1584. atomic_add_int(&stcb->asoc.refcnt, 1);
  1585. SCTP_TCB_UNLOCK(stcb);
  1586. SCTP_SOCKET_LOCK(so, 1);
  1587. SCTP_TCB_LOCK(stcb);
  1588. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  1589. if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  1590. SCTP_TCB_UNLOCK(stcb);
  1591. SCTP_SOCKET_UNLOCK(so, 1);
  1592. return (NULL);
  1593. }
  1594. #endif
  1595. soisconnected(stcb->sctp_socket);
  1596. #if defined(__APPLE__) && !defined(__Userspace__)
  1597. SCTP_SOCKET_UNLOCK(so, 1);
  1598. #endif
  1599. }
  1600. /* notify upper layer */
  1601. *notification = SCTP_NOTIFY_ASSOC_UP;
  1602. /*
  1603. * since we did not send a HB make sure we
  1604. * don't double things
  1605. */
  1606. old.tv_sec = cookie->time_entered.tv_sec;
  1607. old.tv_usec = cookie->time_entered.tv_usec;
  1608. net->hb_responded = 1;
  1609. sctp_calculate_rto(stcb, asoc, net, &old,
  1610. SCTP_RTT_FROM_NON_DATA);
  1611. if (stcb->asoc.sctp_autoclose_ticks &&
  1612. (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
  1613. sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
  1614. inp, stcb, NULL);
  1615. }
  1616. break;
  1617. default:
  1618. /*
  1619. * we're in the OPEN state (or beyond), so
  1620. * peer must have simply lost the COOKIE-ACK
  1621. */
  1622. break;
  1623. } /* end switch */
  1624. sctp_stop_all_cookie_timers(stcb);
  1625. if ((retval = sctp_load_addresses_from_init(stcb, m,
  1626. init_offset + sizeof(struct sctp_init_chunk),
  1627. initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) {
  1628. if (how_indx < sizeof(asoc->cookie_how))
  1629. asoc->cookie_how[how_indx] = 4;
  1630. op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
  1631. "Problem with address parameters");
  1632. SCTPDBG(SCTP_DEBUG_INPUT1,
  1633. "Load addresses from INIT causes an abort %d\n",
  1634. retval);
  1635. sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
  1636. src, dst, sh, op_err,
  1637. #if defined(__FreeBSD__) && !defined(__Userspace__)
  1638. mflowtype, mflowid,
  1639. #endif
  1640. vrf_id, net->port);
  1641. return (NULL);
  1642. }
  1643. /* respond with a COOKIE-ACK */
  1644. sctp_toss_old_cookies(stcb, asoc);
  1645. sctp_send_cookie_ack(stcb);
  1646. if (how_indx < sizeof(asoc->cookie_how))
  1647. asoc->cookie_how[how_indx] = 5;
  1648. return (stcb);
  1649. }
  1650. if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
  1651. ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
  1652. cookie->tie_tag_my_vtag == 0 &&
  1653. cookie->tie_tag_peer_vtag == 0) {
  1654. /*
  1655. * case C in Section 5.2.4 Table 2: XMOO silently discard
  1656. */
  1657. if (how_indx < sizeof(asoc->cookie_how))
  1658. asoc->cookie_how[how_indx] = 6;
  1659. SCTP_TCB_UNLOCK(stcb);
  1660. return (NULL);
  1661. }
  1662. /* If nat support, and the below and stcb is established,
  1663. * send back a ABORT(colliding state) if we are established.
  1664. */
  1665. if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) &&
  1666. (asoc->peer_supports_nat) &&
  1667. ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
  1668. ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
  1669. (asoc->peer_vtag == 0)))) {
  1670. /* Special case - Peer's support nat. We may have
  1671. * two init's that we gave out the same tag on since
  1672. * one was not established.. i.e. we get INIT from host-1
  1673. * behind the nat and we respond tag-a, we get a INIT from
  1674. * host-2 behind the nat and we get tag-a again. Then we
  1675. * bring up host-1 (or 2's) assoc, Then comes the cookie
  1676. * from hsot-2 (or 1). Now we have colliding state. We must
  1677. * send an abort here with colliding state indication.
  1678. */
  1679. op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, "");
  1680. sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
  1681. #if defined(__FreeBSD__) && !defined(__Userspace__)
  1682. mflowtype, mflowid, inp->fibnum,
  1683. #endif
  1684. vrf_id, port);
  1685. SCTP_TCB_UNLOCK(stcb);
  1686. return (NULL);
  1687. }
  1688. if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
  1689. ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
  1690. (asoc->peer_vtag == 0))) {
  1691. /*
  1692. * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
  1693. * should be ok, re-accept peer info
  1694. */
  1695. if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
  1696. /* Extension of case C.
  1697. * If we hit this, then the random number
  1698. * generator returned the same vtag when we
  1699. * first sent our INIT-ACK and when we later sent
  1700. * our INIT. The side with the seq numbers that are
  1701. * different will be the one that normally would
  1702. * have hit case C. This in effect "extends" our vtags
  1703. * in this collision case to be 64 bits. The same collision
  1704. * could occur aka you get both vtag and seq number the
  1705. * same twice in a row.. but is much less likely. If it
  1706. * did happen then we would proceed through and bring
  1707. * up the assoc.. we may end up with the wrong stream
  1708. * setup however.. which would be bad.. but there is
  1709. * no way to tell.. until we send on a stream that does
  1710. * not exist :-)
  1711. */
  1712. if (how_indx < sizeof(asoc->cookie_how))
  1713. asoc->cookie_how[how_indx] = 7;
  1714. SCTP_TCB_UNLOCK(stcb);
  1715. return (NULL);
  1716. }
  1717. if (how_indx < sizeof(asoc->cookie_how))
  1718. asoc->cookie_how[how_indx] = 8;
  1719. sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
  1720. SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
  1721. sctp_stop_all_cookie_timers(stcb);
  1722. /*
  1723. * since we did not send a HB make sure we don't double
  1724. * things
  1725. */
  1726. net->hb_responded = 1;
  1727. if (stcb->asoc.sctp_autoclose_ticks &&
  1728. sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
  1729. sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
  1730. NULL);
  1731. }
  1732. asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
  1733. if (asoc->pre_open_streams < asoc->streamoutcnt) {
  1734. asoc->pre_open_streams = asoc->streamoutcnt;
  1735. }
  1736. if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
  1737. /* Ok the peer probably discarded our
  1738. * data (if we echoed a cookie+data). So anything
  1739. * on the sent_queue should be marked for
  1740. * retransmit, we may not get something to
  1741. * kick us so it COULD still take a timeout
  1742. * to move these.. but it can't hurt to mark them.
  1743. */
  1744. TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
  1745. if (chk->sent < SCTP_DATAGRAM_RESEND) {
  1746. chk->sent = SCTP_DATAGRAM_RESEND;
  1747. sctp_flight_size_decrease(chk);
  1748. sctp_total_flight_decrease(stcb, chk);
  1749. sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
  1750. spec_flag++;
  1751. }
  1752. }
  1753. }
  1754. /* process the INIT info (peer's info) */
  1755. if (sctp_process_init(init_cp, stcb) < 0) {
  1756. if (how_indx < sizeof(asoc->cookie_how))
  1757. asoc->cookie_how[how_indx] = 9;
  1758. op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
  1759. SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n");
  1760. sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
  1761. src, dst, sh, op_err,
  1762. #if defined(__FreeBSD__) && !defined(__Userspace__)
  1763. mflowtype, mflowid,
  1764. #endif
  1765. vrf_id, net->port);
  1766. return (NULL);
  1767. }
  1768. if ((retval = sctp_load_addresses_from_init(stcb, m,
  1769. init_offset + sizeof(struct sctp_init_chunk),
  1770. initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) {
  1771. if (how_indx < sizeof(asoc->cookie_how))
  1772. asoc->cookie_how[how_indx] = 10;
  1773. op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
  1774. "Problem with address parameters");
  1775. SCTPDBG(SCTP_DEBUG_INPUT1,
  1776. "Load addresses from INIT causes an abort %d\n",
  1777. retval);
  1778. sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
  1779. src, dst, sh, op_err,
  1780. #if defined(__FreeBSD__) && !defined(__Userspace__)
  1781. mflowtype, mflowid,
  1782. #endif
  1783. vrf_id, net->port);
  1784. return (NULL);
  1785. }
  1786. if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
  1787. (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
  1788. *notification = SCTP_NOTIFY_ASSOC_UP;
  1789. if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  1790. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
  1791. (!SCTP_IS_LISTENING(inp))) {
  1792. #if defined(__APPLE__) && !defined(__Userspace__)
  1793. struct socket *so;
  1794. #endif
  1795. sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED);
  1796. #if defined(__APPLE__) && !defined(__Userspace__)
  1797. so = SCTP_INP_SO(stcb->sctp_ep);
  1798. atomic_add_int(&stcb->asoc.refcnt, 1);
  1799. SCTP_TCB_UNLOCK(stcb);
  1800. SCTP_SOCKET_LOCK(so, 1);
  1801. SCTP_TCB_LOCK(stcb);
  1802. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  1803. if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  1804. SCTP_TCB_UNLOCK(stcb);
  1805. SCTP_SOCKET_UNLOCK(so, 1);
  1806. return (NULL);
  1807. }
  1808. #endif
  1809. soisconnected(stcb->sctp_socket);
  1810. #if defined(__APPLE__) && !defined(__Userspace__)
  1811. SCTP_SOCKET_UNLOCK(so, 1);
  1812. #endif
  1813. }
  1814. if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)
  1815. SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
  1816. else
  1817. SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
  1818. SCTP_STAT_INCR_GAUGE32(sctps_currestab);
  1819. } else if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
  1820. SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
  1821. } else {
  1822. SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
  1823. }
  1824. SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
  1825. if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
  1826. sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
  1827. stcb->sctp_ep, stcb, NULL);
  1828. }
  1829. sctp_stop_all_cookie_timers(stcb);
  1830. sctp_toss_old_cookies(stcb, asoc);
  1831. sctp_send_cookie_ack(stcb);
  1832. if (spec_flag) {
  1833. /* only if we have retrans set do we do this. What
  1834. * this call does is get only the COOKIE-ACK out
  1835. * and then when we return the normal call to
  1836. * sctp_chunk_output will get the retrans out
  1837. * behind this.
  1838. */
  1839. sctp_chunk_output(inp,stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
  1840. }
  1841. if (how_indx < sizeof(asoc->cookie_how))
  1842. asoc->cookie_how[how_indx] = 11;
  1843. return (stcb);
  1844. }
  1845. if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
  1846. ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
  1847. cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
  1848. cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
  1849. cookie->tie_tag_peer_vtag != 0) {
  1850. struct sctpasochead *head;
  1851. #if defined(__APPLE__) && !defined(__Userspace__)
  1852. struct socket *so;
  1853. #endif
  1854. if (asoc->peer_supports_nat) {
  1855. struct sctp_tcb *local_stcb;
  1856. /* This is a gross gross hack.
  1857. * Just call the cookie_new code since we
  1858. * are allowing a duplicate association.
  1859. * I hope this works...
  1860. */
  1861. local_stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst,
  1862. sh, cookie, cookie_len,
  1863. inp, netp, init_src,notification,
  1864. auth_skipped, auth_offset, auth_len,
  1865. #if defined(__FreeBSD__) && !defined(__Userspace__)
  1866. mflowtype, mflowid,
  1867. #endif
  1868. vrf_id, port);
  1869. if (local_stcb == NULL) {
  1870. SCTP_TCB_UNLOCK(stcb);
  1871. }
  1872. return (local_stcb);
  1873. }
  1874. /*
  1875. * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
  1876. */
  1877. /* temp code */
  1878. if (how_indx < sizeof(asoc->cookie_how))
  1879. asoc->cookie_how[how_indx] = 12;
  1880. sctp_stop_association_timers(stcb, false);
  1881. /* notify upper layer */
  1882. *notification = SCTP_NOTIFY_ASSOC_RESTART;
  1883. atomic_add_int(&stcb->asoc.refcnt, 1);
  1884. if ((SCTP_GET_STATE(stcb) != SCTP_STATE_OPEN) &&
  1885. (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
  1886. (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT)) {
  1887. SCTP_STAT_INCR_GAUGE32(sctps_currestab);
  1888. }
  1889. if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
  1890. SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
  1891. } else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
  1892. SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
  1893. }
  1894. if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
  1895. SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
  1896. sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
  1897. stcb->sctp_ep, stcb, NULL);
  1898. } else if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) {
  1899. /* move to OPEN state, if not in SHUTDOWN_SENT */
  1900. SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
  1901. }
  1902. if (asoc->pre_open_streams < asoc->streamoutcnt) {
  1903. asoc->pre_open_streams = asoc->streamoutcnt;
  1904. }
  1905. asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
  1906. asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
  1907. asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
  1908. asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
  1909. asoc->str_reset_seq_in = asoc->init_seq_number;
  1910. asoc->advanced_peer_ack_point = asoc->last_acked_seq;
  1911. asoc->send_sack = 1;
  1912. asoc->data_pkts_seen = 0;
  1913. asoc->last_data_chunk_from = NULL;
  1914. asoc->last_control_chunk_from = NULL;
  1915. asoc->last_net_cmt_send_started = NULL;
  1916. if (asoc->mapping_array) {
  1917. memset(asoc->mapping_array, 0,
  1918. asoc->mapping_array_size);
  1919. }
  1920. if (asoc->nr_mapping_array) {
  1921. memset(asoc->nr_mapping_array, 0,
  1922. asoc->mapping_array_size);
  1923. }
  1924. SCTP_TCB_UNLOCK(stcb);
  1925. #if defined(__APPLE__) && !defined(__Userspace__)
  1926. so = SCTP_INP_SO(stcb->sctp_ep);
  1927. SCTP_SOCKET_LOCK(so, 1);
  1928. #endif
  1929. SCTP_INP_INFO_WLOCK();
  1930. SCTP_INP_WLOCK(stcb->sctp_ep);
  1931. SCTP_TCB_LOCK(stcb);
  1932. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  1933. /* send up all the data */
  1934. sctp_report_all_outbound(stcb, 0, SCTP_SO_LOCKED);
  1935. for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
  1936. stcb->asoc.strmout[i].chunks_on_queues = 0;
  1937. #if defined(SCTP_DETAILED_STR_STATS)
  1938. for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
  1939. asoc->strmout[i].abandoned_sent[j] = 0;
  1940. asoc->strmout[i].abandoned_unsent[j] = 0;
  1941. }
  1942. #else
  1943. asoc->strmout[i].abandoned_sent[0] = 0;
  1944. asoc->strmout[i].abandoned_unsent[0] = 0;
  1945. #endif
  1946. stcb->asoc.strmout[i].next_mid_ordered = 0;
  1947. stcb->asoc.strmout[i].next_mid_unordered = 0;
  1948. stcb->asoc.strmout[i].sid = i;
  1949. stcb->asoc.strmout[i].last_msg_incomplete = 0;
  1950. }
  1951. TAILQ_FOREACH_SAFE(strrst, &asoc->resetHead, next_resp, nstrrst) {
  1952. TAILQ_REMOVE(&asoc->resetHead, strrst, next_resp);
  1953. SCTP_FREE(strrst, SCTP_M_STRESET);
  1954. }
  1955. TAILQ_FOREACH_SAFE(sq, &asoc->pending_reply_queue, next, nsq) {
  1956. TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next);
  1957. if (sq->data) {
  1958. sctp_m_freem(sq->data);
  1959. sq->data = NULL;
  1960. }
  1961. sctp_free_remote_addr(sq->whoFrom);
  1962. sq->whoFrom = NULL;
  1963. sq->stcb = NULL;
  1964. sctp_free_a_readq(stcb, sq);
  1965. }
  1966. TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
  1967. TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
  1968. if (chk->data) {
  1969. sctp_m_freem(chk->data);
  1970. chk->data = NULL;
  1971. }
  1972. if (chk->holds_key_ref)
  1973. sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
  1974. sctp_free_remote_addr(chk->whoTo);
  1975. SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
  1976. SCTP_DECR_CHK_COUNT();
  1977. }
  1978. asoc->ctrl_queue_cnt = 0;
  1979. asoc->str_reset = NULL;
  1980. asoc->stream_reset_outstanding = 0;
  1981. TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
  1982. TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
  1983. if (chk->data) {
  1984. sctp_m_freem(chk->data);
  1985. chk->data = NULL;
  1986. }
  1987. if (chk->holds_key_ref)
  1988. sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
  1989. sctp_free_remote_addr(chk->whoTo);
  1990. SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
  1991. SCTP_DECR_CHK_COUNT();
  1992. }
  1993. TAILQ_FOREACH_SAFE(aparam, &asoc->asconf_queue, next, naparam) {
  1994. TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
  1995. SCTP_FREE(aparam,SCTP_M_ASC_ADDR);
  1996. }
  1997. TAILQ_FOREACH_SAFE(aack, &asoc->asconf_ack_sent, next, naack) {
  1998. TAILQ_REMOVE(&asoc->asconf_ack_sent, aack, next);
  1999. if (aack->data != NULL) {
  2000. sctp_m_freem(aack->data);
  2001. }
  2002. SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), aack);
  2003. }
  2004. /* process the INIT-ACK info (my info) */
  2005. asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
  2006. asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
  2007. /* pull from vtag hash */
  2008. LIST_REMOVE(stcb, sctp_asocs);
  2009. /* re-insert to new vtag position */
  2010. head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
  2011. SCTP_BASE_INFO(hashasocmark))];
  2012. /*
  2013. * put it in the bucket in the vtag hash of assoc's for the
  2014. * system
  2015. */
  2016. LIST_INSERT_HEAD(head, stcb, sctp_asocs);
  2017. SCTP_INP_WUNLOCK(stcb->sctp_ep);
  2018. SCTP_INP_INFO_WUNLOCK();
  2019. #if defined(__APPLE__) && !defined(__Userspace__)
  2020. SCTP_SOCKET_UNLOCK(so, 1);
  2021. #endif
  2022. asoc->total_flight = 0;
  2023. asoc->total_flight_count = 0;
  2024. /* process the INIT info (peer's info) */
  2025. if (sctp_process_init(init_cp, stcb) < 0) {
  2026. if (how_indx < sizeof(asoc->cookie_how))
  2027. asoc->cookie_how[how_indx] = 13;
  2028. op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
  2029. SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_init() failed\n");
  2030. sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
  2031. src, dst, sh, op_err,
  2032. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2033. mflowtype, mflowid,
  2034. #endif
  2035. vrf_id, net->port);
  2036. return (NULL);
  2037. }
  2038. /*
  2039. * since we did not send a HB make sure we don't double
  2040. * things
  2041. */
  2042. net->hb_responded = 1;
  2043. if ((retval = sctp_load_addresses_from_init(stcb, m,
  2044. init_offset + sizeof(struct sctp_init_chunk),
  2045. initack_offset, src, dst, init_src, stcb->asoc.port)) < 0) {
  2046. if (how_indx < sizeof(asoc->cookie_how))
  2047. asoc->cookie_how[how_indx] = 14;
  2048. op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
  2049. "Problem with address parameters");
  2050. SCTPDBG(SCTP_DEBUG_INPUT1,
  2051. "Load addresses from INIT causes an abort %d\n",
  2052. retval);
  2053. sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
  2054. src, dst, sh, op_err,
  2055. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2056. mflowtype, mflowid,
  2057. #endif
  2058. vrf_id, net->port);
  2059. return (NULL);
  2060. }
  2061. /* respond with a COOKIE-ACK */
  2062. sctp_send_cookie_ack(stcb);
  2063. if (how_indx < sizeof(asoc->cookie_how))
  2064. asoc->cookie_how[how_indx] = 15;
  2065. if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE) &&
  2066. (asoc->sctp_autoclose_ticks > 0)) {
  2067. sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
  2068. }
  2069. return (stcb);
  2070. }
  2071. if (how_indx < sizeof(asoc->cookie_how))
  2072. asoc->cookie_how[how_indx] = 16;
  2073. /* all other cases... */
  2074. SCTP_TCB_UNLOCK(stcb);
  2075. return (NULL);
  2076. }
  2077. /*
  2078. * handle a state cookie for a new association m: input packet mbuf chain--
  2079. * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
  2080. * and the cookie signature does not exist offset: offset into mbuf to the
  2081. * cookie-echo chunk length: length of the cookie chunk to: where the init
  2082. * was from returns a new TCB
  2083. */
  2084. static struct sctp_tcb *
  2085. sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
  2086. struct sockaddr *src, struct sockaddr *dst,
  2087. struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
  2088. struct sctp_inpcb *inp, struct sctp_nets **netp,
  2089. struct sockaddr *init_src, int *notification,
  2090. int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
  2091. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2092. uint8_t mflowtype, uint32_t mflowid,
  2093. #endif
  2094. uint32_t vrf_id, uint16_t port)
  2095. {
  2096. struct sctp_tcb *stcb;
  2097. struct sctp_init_chunk *init_cp, init_buf;
  2098. struct sctp_init_ack_chunk *initack_cp, initack_buf;
  2099. union sctp_sockstore store;
  2100. struct sctp_association *asoc;
  2101. int init_offset, initack_offset, initack_limit;
  2102. int error = 0;
  2103. uint8_t auth_chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
  2104. #if defined(__APPLE__) && !defined(__Userspace__)
  2105. struct socket *so;
  2106. so = SCTP_INP_SO(inp);
  2107. #endif
  2108. /*
  2109. * find and validate the INIT chunk in the cookie (peer's info) the
  2110. * INIT should start after the cookie-echo header struct (chunk
  2111. * header, state cookie header struct)
  2112. */
  2113. init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
  2114. init_cp = (struct sctp_init_chunk *)
  2115. sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
  2116. (uint8_t *) & init_buf);
  2117. if (init_cp == NULL) {
  2118. /* could not pull a INIT chunk in cookie */
  2119. SCTPDBG(SCTP_DEBUG_INPUT1,
  2120. "process_cookie_new: could not pull INIT chunk hdr\n");
  2121. return (NULL);
  2122. }
  2123. if (init_cp->ch.chunk_type != SCTP_INITIATION) {
  2124. SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
  2125. return (NULL);
  2126. }
  2127. initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
  2128. /*
  2129. * find and validate the INIT-ACK chunk in the cookie (my info) the
  2130. * INIT-ACK follows the INIT chunk
  2131. */
  2132. initack_cp = (struct sctp_init_ack_chunk *)
  2133. sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
  2134. (uint8_t *) & initack_buf);
  2135. if (initack_cp == NULL) {
  2136. /* could not pull INIT-ACK chunk in cookie */
  2137. SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
  2138. return (NULL);
  2139. }
  2140. if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
  2141. return (NULL);
  2142. }
  2143. /*
  2144. * NOTE: We can't use the INIT_ACK's chk_length to determine the
  2145. * "initack_limit" value. This is because the chk_length field
  2146. * includes the length of the cookie, but the cookie is omitted when
  2147. * the INIT and INIT_ACK are tacked onto the cookie...
  2148. */
  2149. initack_limit = offset + cookie_len;
  2150. /*
  2151. * now that we know the INIT/INIT-ACK are in place, create a new TCB
  2152. * and populate
  2153. */
  2154. /*
  2155. * Here we do a trick, we set in NULL for the proc/thread argument. We
  2156. * do this since in effect we only use the p argument when
  2157. * the socket is unbound and we must do an implicit bind.
  2158. * Since we are getting a cookie, we cannot be unbound.
  2159. */
  2160. stcb = sctp_aloc_assoc(inp, init_src, &error,
  2161. ntohl(initack_cp->init.initiate_tag),
  2162. ntohl(initack_cp->init.initial_tsn), vrf_id,
  2163. ntohs(initack_cp->init.num_outbound_streams),
  2164. port,
  2165. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2166. (struct thread *)NULL,
  2167. #elif defined(_WIN32) && !defined(__Userspace__)
  2168. (PKTHREAD)NULL,
  2169. #else
  2170. (struct proc *)NULL,
  2171. #endif
  2172. SCTP_DONT_INITIALIZE_AUTH_PARAMS);
  2173. if (stcb == NULL) {
  2174. struct mbuf *op_err;
  2175. /* memory problem? */
  2176. SCTPDBG(SCTP_DEBUG_INPUT1,
  2177. "process_cookie_new: no room for another TCB!\n");
  2178. op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
  2179. sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
  2180. src, dst, sh, op_err,
  2181. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2182. mflowtype, mflowid,
  2183. #endif
  2184. vrf_id, port);
  2185. return (NULL);
  2186. }
  2187. asoc = &stcb->asoc;
  2188. /* get scope variables out of cookie */
  2189. asoc->scope.ipv4_local_scope = cookie->ipv4_scope;
  2190. asoc->scope.site_scope = cookie->site_scope;
  2191. asoc->scope.local_scope = cookie->local_scope;
  2192. asoc->scope.loopback_scope = cookie->loopback_scope;
  2193. #if defined(__Userspace__)
  2194. if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
  2195. (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal) ||
  2196. (asoc->scope.conn_addr_legal != cookie->conn_addr_legal)) {
  2197. #else
  2198. if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
  2199. (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) {
  2200. #endif
  2201. struct mbuf *op_err;
  2202. /*
  2203. * Houston we have a problem. The EP changed while the
  2204. * cookie was in flight. Only recourse is to abort the
  2205. * association.
  2206. */
  2207. op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
  2208. sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
  2209. src, dst, sh, op_err,
  2210. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2211. mflowtype, mflowid,
  2212. #endif
  2213. vrf_id, port);
  2214. #if defined(__APPLE__) && !defined(__Userspace__)
  2215. atomic_add_int(&stcb->asoc.refcnt, 1);
  2216. SCTP_TCB_UNLOCK(stcb);
  2217. SCTP_SOCKET_LOCK(so, 1);
  2218. SCTP_TCB_LOCK(stcb);
  2219. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  2220. #endif
  2221. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
  2222. SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
  2223. #if defined(__APPLE__) && !defined(__Userspace__)
  2224. SCTP_SOCKET_UNLOCK(so, 1);
  2225. #endif
  2226. return (NULL);
  2227. }
  2228. /* process the INIT-ACK info (my info) */
  2229. asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
  2230. /* process the INIT info (peer's info) */
  2231. if (sctp_process_init(init_cp, stcb) < 0) {
  2232. #if defined(__APPLE__) && !defined(__Userspace__)
  2233. atomic_add_int(&stcb->asoc.refcnt, 1);
  2234. SCTP_TCB_UNLOCK(stcb);
  2235. SCTP_SOCKET_LOCK(so, 1);
  2236. SCTP_TCB_LOCK(stcb);
  2237. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  2238. #endif
  2239. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
  2240. SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
  2241. #if defined(__APPLE__) && !defined(__Userspace__)
  2242. SCTP_SOCKET_UNLOCK(so, 1);
  2243. #endif
  2244. return (NULL);
  2245. }
  2246. /* load all addresses */
  2247. if (sctp_load_addresses_from_init(stcb, m,
  2248. init_offset + sizeof(struct sctp_init_chunk),
  2249. initack_offset, src, dst, init_src, port) < 0) {
  2250. #if defined(__APPLE__) && !defined(__Userspace__)
  2251. atomic_add_int(&stcb->asoc.refcnt, 1);
  2252. SCTP_TCB_UNLOCK(stcb);
  2253. SCTP_SOCKET_LOCK(so, 1);
  2254. SCTP_TCB_LOCK(stcb);
  2255. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  2256. #endif
  2257. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
  2258. SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
  2259. #if defined(__APPLE__) && !defined(__Userspace__)
  2260. SCTP_SOCKET_UNLOCK(so, 1);
  2261. #endif
  2262. return (NULL);
  2263. }
  2264. /*
  2265. * verify any preceding AUTH chunk that was skipped
  2266. */
  2267. /* pull the local authentication parameters from the cookie/init-ack */
  2268. sctp_auth_get_cookie_params(stcb, m,
  2269. initack_offset + sizeof(struct sctp_init_ack_chunk),
  2270. initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
  2271. if (auth_skipped) {
  2272. struct sctp_auth_chunk *auth;
  2273. if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
  2274. auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
  2275. } else {
  2276. auth = NULL;
  2277. }
  2278. if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
  2279. /* auth HMAC failed, dump the assoc and packet */
  2280. SCTPDBG(SCTP_DEBUG_AUTH1,
  2281. "COOKIE-ECHO: AUTH failed\n");
  2282. #if defined(__APPLE__) && !defined(__Userspace__)
  2283. atomic_add_int(&stcb->asoc.refcnt, 1);
  2284. SCTP_TCB_UNLOCK(stcb);
  2285. SCTP_SOCKET_LOCK(so, 1);
  2286. SCTP_TCB_LOCK(stcb);
  2287. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  2288. #endif
  2289. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
  2290. SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
  2291. #if defined(__APPLE__) && !defined(__Userspace__)
  2292. SCTP_SOCKET_UNLOCK(so, 1);
  2293. #endif
  2294. return (NULL);
  2295. } else {
  2296. /* remaining chunks checked... good to go */
  2297. stcb->asoc.authenticated = 1;
  2298. }
  2299. }
  2300. /*
  2301. * if we're doing ASCONFs, check to see if we have any new local
  2302. * addresses that need to get added to the peer (eg. addresses
  2303. * changed while cookie echo in flight). This needs to be done
  2304. * after we go to the OPEN state to do the correct asconf
  2305. * processing. else, make sure we have the correct addresses in our
  2306. * lists
  2307. */
  2308. /* warning, we re-use sin, sin6, sa_store here! */
  2309. /* pull in local_address (our "from" address) */
  2310. switch (cookie->laddr_type) {
  2311. #ifdef INET
  2312. case SCTP_IPV4_ADDRESS:
  2313. /* source addr is IPv4 */
  2314. memset(&store.sin, 0, sizeof(struct sockaddr_in));
  2315. store.sin.sin_family = AF_INET;
  2316. #ifdef HAVE_SIN_LEN
  2317. store.sin.sin_len = sizeof(struct sockaddr_in);
  2318. #endif
  2319. store.sin.sin_addr.s_addr = cookie->laddress[0];
  2320. break;
  2321. #endif
  2322. #ifdef INET6
  2323. case SCTP_IPV6_ADDRESS:
  2324. /* source addr is IPv6 */
  2325. memset(&store.sin6, 0, sizeof(struct sockaddr_in6));
  2326. store.sin6.sin6_family = AF_INET6;
  2327. #ifdef HAVE_SIN6_LEN
  2328. store.sin6.sin6_len = sizeof(struct sockaddr_in6);
  2329. #endif
  2330. store.sin6.sin6_scope_id = cookie->scope_id;
  2331. memcpy(&store.sin6.sin6_addr, cookie->laddress, sizeof(struct in6_addr));
  2332. break;
  2333. #endif
  2334. #if defined(__Userspace__)
  2335. case SCTP_CONN_ADDRESS:
  2336. /* source addr is conn */
  2337. memset(&store.sconn, 0, sizeof(struct sockaddr_conn));
  2338. store.sconn.sconn_family = AF_CONN;
  2339. #ifdef HAVE_SCONN_LEN
  2340. store.sconn.sconn_len = sizeof(struct sockaddr_conn);
  2341. #endif
  2342. memcpy(&store.sconn.sconn_addr, cookie->laddress, sizeof(void *));
  2343. break;
  2344. #endif
  2345. default:
  2346. #if defined(__APPLE__) && !defined(__Userspace__)
  2347. atomic_add_int(&stcb->asoc.refcnt, 1);
  2348. SCTP_TCB_UNLOCK(stcb);
  2349. SCTP_SOCKET_LOCK(so, 1);
  2350. SCTP_TCB_LOCK(stcb);
  2351. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  2352. #endif
  2353. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
  2354. SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
  2355. #if defined(__APPLE__) && !defined(__Userspace__)
  2356. SCTP_SOCKET_UNLOCK(so, 1);
  2357. #endif
  2358. return (NULL);
  2359. }
  2360. /* update current state */
  2361. SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
  2362. SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
  2363. if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
  2364. sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
  2365. stcb->sctp_ep, stcb, NULL);
  2366. }
  2367. sctp_stop_all_cookie_timers(stcb);
  2368. SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
  2369. SCTP_STAT_INCR_GAUGE32(sctps_currestab);
  2370. /* set up to notify upper layer */
  2371. *notification = SCTP_NOTIFY_ASSOC_UP;
  2372. if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  2373. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
  2374. (!SCTP_IS_LISTENING(inp))) {
  2375. /*
  2376. * This is an endpoint that called connect() how it got a
  2377. * cookie that is NEW is a bit of a mystery. It must be that
  2378. * the INIT was sent, but before it got there.. a complete
  2379. * INIT/INIT-ACK/COOKIE arrived. But of course then it
  2380. * should have went to the other code.. not here.. oh well..
  2381. * a bit of protection is worth having..
  2382. *
  2383. * XXXMJ unlocked
  2384. */
  2385. sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED);
  2386. #if defined(__APPLE__) && !defined(__Userspace__)
  2387. atomic_add_int(&stcb->asoc.refcnt, 1);
  2388. SCTP_TCB_UNLOCK(stcb);
  2389. SCTP_SOCKET_LOCK(so, 1);
  2390. SCTP_TCB_LOCK(stcb);
  2391. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  2392. if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  2393. SCTP_SOCKET_UNLOCK(so, 1);
  2394. return (NULL);
  2395. }
  2396. #endif
  2397. soisconnected(stcb->sctp_socket);
  2398. #if defined(__APPLE__) && !defined(__Userspace__)
  2399. SCTP_SOCKET_UNLOCK(so, 1);
  2400. #endif
  2401. } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
  2402. (SCTP_IS_LISTENING(inp))) {
  2403. /*
  2404. * We don't want to do anything with this one. Since it is
  2405. * the listening guy. The timer will get started for
  2406. * accepted connections in the caller.
  2407. */
  2408. ;
  2409. }
  2410. if (stcb->asoc.sctp_autoclose_ticks &&
  2411. sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
  2412. sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
  2413. }
  2414. (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
  2415. *netp = sctp_findnet(stcb, init_src);
  2416. if (*netp != NULL) {
  2417. struct timeval old;
  2418. /*
  2419. * Since we did not send a HB, make sure we don't double
  2420. * things.
  2421. */
  2422. (*netp)->hb_responded = 1;
  2423. /* Calculate the RTT. */
  2424. old.tv_sec = cookie->time_entered.tv_sec;
  2425. old.tv_usec = cookie->time_entered.tv_usec;
  2426. sctp_calculate_rto(stcb, asoc, *netp, &old, SCTP_RTT_FROM_NON_DATA);
  2427. }
  2428. /* respond with a COOKIE-ACK */
  2429. sctp_send_cookie_ack(stcb);
  2430. /*
  2431. * check the address lists for any ASCONFs that need to be sent
  2432. * AFTER the cookie-ack is sent
  2433. */
  2434. sctp_check_address_list(stcb, m,
  2435. initack_offset + sizeof(struct sctp_init_ack_chunk),
  2436. initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
  2437. &store.sa, cookie->local_scope, cookie->site_scope,
  2438. cookie->ipv4_scope, cookie->loopback_scope);
  2439. return (stcb);
  2440. }
  2441. /*
  2442. * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
  2443. * we NEED to make sure we are not already using the vtag. If so we
  2444. * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
  2445. head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
  2446. SCTP_BASE_INFO(hashasocmark))];
  2447. LIST_FOREACH(stcb, head, sctp_asocs) {
  2448. if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) {
  2449. -- SEND ABORT - TRY AGAIN --
  2450. }
  2451. }
  2452. */
  2453. /*
  2454. * handles a COOKIE-ECHO message stcb: modified to either a new or left as
  2455. * existing (non-NULL) TCB
  2456. */
  2457. static struct mbuf *
  2458. sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
  2459. struct sockaddr *src, struct sockaddr *dst,
  2460. struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
  2461. struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
  2462. int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
  2463. struct sctp_tcb **locked_tcb,
  2464. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2465. uint8_t mflowtype, uint32_t mflowid,
  2466. #endif
  2467. uint32_t vrf_id, uint16_t port)
  2468. {
  2469. struct sctp_state_cookie *cookie;
  2470. struct sctp_tcb *l_stcb = *stcb;
  2471. struct sctp_inpcb *l_inp;
  2472. struct sockaddr *to;
  2473. struct sctp_pcb *ep;
  2474. struct mbuf *m_sig;
  2475. uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
  2476. uint8_t *sig;
  2477. #if defined(__Userspace__) && defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION)
  2478. uint8_t cookie_ok = 1;
  2479. #else
  2480. uint8_t cookie_ok = 0;
  2481. #endif
  2482. unsigned int sig_offset, cookie_offset;
  2483. unsigned int cookie_len;
  2484. struct timeval now;
  2485. struct timeval time_entered, time_expires;
  2486. int notification = 0;
  2487. struct sctp_nets *netl;
  2488. int had_a_existing_tcb = 0;
  2489. int send_int_conf = 0;
  2490. #ifdef INET
  2491. struct sockaddr_in sin;
  2492. #endif
  2493. #ifdef INET6
  2494. struct sockaddr_in6 sin6;
  2495. #endif
  2496. #if defined(__Userspace__)
  2497. struct sockaddr_conn sconn;
  2498. #endif
  2499. SCTPDBG(SCTP_DEBUG_INPUT2,
  2500. "sctp_handle_cookie: handling COOKIE-ECHO\n");
  2501. if (inp_p == NULL) {
  2502. return (NULL);
  2503. }
  2504. cookie = &cp->cookie;
  2505. cookie_offset = offset + sizeof(struct sctp_chunkhdr);
  2506. cookie_len = ntohs(cp->ch.chunk_length);
  2507. if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
  2508. sizeof(struct sctp_init_chunk) +
  2509. sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
  2510. /* cookie too small */
  2511. return (NULL);
  2512. }
  2513. if ((cookie->peerport != sh->src_port) ||
  2514. (cookie->myport != sh->dest_port) ||
  2515. (cookie->my_vtag != sh->v_tag)) {
  2516. /*
  2517. * invalid ports or bad tag. Note that we always leave the
  2518. * v_tag in the header in network order and when we stored
  2519. * it in the my_vtag slot we also left it in network order.
  2520. * This maintains the match even though it may be in the
  2521. * opposite byte order of the machine :->
  2522. */
  2523. return (NULL);
  2524. }
  2525. #if defined(__Userspace__)
  2526. /*
  2527. * Recover the AF_CONN addresses within the cookie.
  2528. * This needs to be done in the buffer provided for later processing
  2529. * of the cookie and in the mbuf chain for HMAC validation.
  2530. */
  2531. if ((cookie->addr_type == SCTP_CONN_ADDRESS) && (src->sa_family == AF_CONN)) {
  2532. struct sockaddr_conn *sconnp = (struct sockaddr_conn *)src;
  2533. memcpy(cookie->address, &sconnp->sconn_addr , sizeof(void *));
  2534. m_copyback(m, cookie_offset + offsetof(struct sctp_state_cookie, address),
  2535. (int)sizeof(void *), (caddr_t)&sconnp->sconn_addr);
  2536. }
  2537. if ((cookie->laddr_type == SCTP_CONN_ADDRESS) && (dst->sa_family == AF_CONN)) {
  2538. struct sockaddr_conn *sconnp = (struct sockaddr_conn *)dst;
  2539. memcpy(cookie->laddress, &sconnp->sconn_addr , sizeof(void *));
  2540. m_copyback(m, cookie_offset + offsetof(struct sctp_state_cookie, laddress),
  2541. (int)sizeof(void *), (caddr_t)&sconnp->sconn_addr);
  2542. }
  2543. #endif
  2544. /*
  2545. * split off the signature into its own mbuf (since it should not be
  2546. * calculated in the sctp_hmac_m() call).
  2547. */
  2548. sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
  2549. m_sig = m_split(m, sig_offset, M_NOWAIT);
  2550. if (m_sig == NULL) {
  2551. /* out of memory or ?? */
  2552. return (NULL);
  2553. }
  2554. #ifdef SCTP_MBUF_LOGGING
  2555. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
  2556. sctp_log_mbc(m_sig, SCTP_MBUF_SPLIT);
  2557. }
  2558. #endif
  2559. /*
  2560. * compute the signature/digest for the cookie
  2561. */
  2562. if (l_stcb != NULL) {
  2563. atomic_add_int(&l_stcb->asoc.refcnt, 1);
  2564. SCTP_TCB_UNLOCK(l_stcb);
  2565. }
  2566. l_inp = *inp_p;
  2567. SCTP_INP_RLOCK(l_inp);
  2568. if (l_stcb != NULL) {
  2569. SCTP_TCB_LOCK(l_stcb);
  2570. atomic_subtract_int(&l_stcb->asoc.refcnt, 1);
  2571. }
  2572. if (l_inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
  2573. SCTP_INP_RUNLOCK(l_inp);
  2574. sctp_m_freem(m_sig);
  2575. return (NULL);
  2576. }
  2577. ep = &(*inp_p)->sctp_ep;
  2578. /* which cookie is it? */
  2579. if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
  2580. (ep->current_secret_number != ep->last_secret_number)) {
  2581. /* it's the old cookie */
  2582. (void)sctp_hmac_m(SCTP_HMAC,
  2583. (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
  2584. SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
  2585. } else {
  2586. /* it's the current cookie */
  2587. (void)sctp_hmac_m(SCTP_HMAC,
  2588. (uint8_t *)ep->secret_key[(int)ep->current_secret_number],
  2589. SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
  2590. }
  2591. /* get the signature */
  2592. SCTP_INP_RUNLOCK(l_inp);
  2593. sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
  2594. if (sig == NULL) {
  2595. /* couldn't find signature */
  2596. sctp_m_freem(m_sig);
  2597. return (NULL);
  2598. }
  2599. /* compare the received digest with the computed digest */
  2600. if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
  2601. /* try the old cookie? */
  2602. if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
  2603. (ep->current_secret_number != ep->last_secret_number)) {
  2604. /* compute digest with old */
  2605. (void)sctp_hmac_m(SCTP_HMAC,
  2606. (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
  2607. SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
  2608. /* compare */
  2609. if (timingsafe_bcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
  2610. cookie_ok = 1;
  2611. }
  2612. } else {
  2613. cookie_ok = 1;
  2614. }
  2615. /*
  2616. * Now before we continue we must reconstruct our mbuf so that
  2617. * normal processing of any other chunks will work.
  2618. */
  2619. {
  2620. struct mbuf *m_at;
  2621. m_at = m;
  2622. while (SCTP_BUF_NEXT(m_at) != NULL) {
  2623. m_at = SCTP_BUF_NEXT(m_at);
  2624. }
  2625. SCTP_BUF_NEXT(m_at) = m_sig;
  2626. }
  2627. if (cookie_ok == 0) {
  2628. SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
  2629. SCTPDBG(SCTP_DEBUG_INPUT2,
  2630. "offset = %u, cookie_offset = %u, sig_offset = %u\n",
  2631. (uint32_t) offset, cookie_offset, sig_offset);
  2632. return (NULL);
  2633. }
  2634. if (sctp_ticks_to_msecs(cookie->cookie_life) > SCTP_MAX_COOKIE_LIFE) {
  2635. SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: Invalid cookie lifetime\n");
  2636. return (NULL);
  2637. }
  2638. time_entered.tv_sec = cookie->time_entered.tv_sec;
  2639. time_entered.tv_usec = cookie->time_entered.tv_usec;
  2640. if ((time_entered.tv_sec < 0) ||
  2641. (time_entered.tv_usec < 0) ||
  2642. (time_entered.tv_usec >= 1000000)) {
  2643. /* Invalid time stamp. Cookie must have been modified. */
  2644. SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: Invalid time stamp\n");
  2645. return (NULL);
  2646. }
  2647. (void)SCTP_GETTIME_TIMEVAL(&now);
  2648. #if !(defined(__FreeBSD__) && !defined(__Userspace__))
  2649. if (timercmp(&now, &time_entered, <)) {
  2650. #else
  2651. if (timevalcmp(&now, &time_entered, <)) {
  2652. #endif
  2653. SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie generated in the future!\n");
  2654. return (NULL);
  2655. }
  2656. /*
  2657. * Check the cookie timestamps to be sure it's not stale.
  2658. * cookie_life is in ticks, so we convert to seconds.
  2659. */
  2660. time_expires.tv_sec = time_entered.tv_sec + sctp_ticks_to_secs(cookie->cookie_life);
  2661. time_expires.tv_usec = time_entered.tv_usec;
  2662. #if !(defined(__FreeBSD__) && !defined(__Userspace__))
  2663. if (timercmp(&now, &time_expires, >))
  2664. #else
  2665. if (timevalcmp(&now, &time_expires, >))
  2666. #endif
  2667. {
  2668. /* cookie is stale! */
  2669. struct mbuf *op_err;
  2670. struct sctp_error_stale_cookie *cause;
  2671. struct timeval diff;
  2672. uint32_t staleness;
  2673. op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_stale_cookie),
  2674. 0, M_NOWAIT, 1, MT_DATA);
  2675. if (op_err == NULL) {
  2676. /* FOOBAR */
  2677. return (NULL);
  2678. }
  2679. /* Set the len */
  2680. SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_stale_cookie);
  2681. cause = mtod(op_err, struct sctp_error_stale_cookie *);
  2682. cause->cause.code = htons(SCTP_CAUSE_STALE_COOKIE);
  2683. cause->cause.length = htons(sizeof(struct sctp_error_stale_cookie));
  2684. #if !(defined(__FreeBSD__) && !defined(__Userspace__))
  2685. timersub(&now, &time_expires, &diff);
  2686. #else
  2687. diff = now;
  2688. timevalsub(&diff, &time_expires);
  2689. #endif
  2690. if ((uint32_t)diff.tv_sec > UINT32_MAX / 1000000) {
  2691. staleness = UINT32_MAX;
  2692. } else {
  2693. staleness = (uint32_t)diff.tv_sec * 1000000;
  2694. }
  2695. if (UINT32_MAX - staleness >= (uint32_t)diff.tv_usec) {
  2696. staleness += (uint32_t)diff.tv_usec;
  2697. } else {
  2698. staleness = UINT32_MAX;
  2699. }
  2700. cause->stale_time = htonl(staleness);
  2701. sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
  2702. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2703. mflowtype, mflowid, l_inp->fibnum,
  2704. #endif
  2705. vrf_id, port);
  2706. return (NULL);
  2707. }
  2708. /*
  2709. * Now we must see with the lookup address if we have an existing
  2710. * asoc. This will only happen if we were in the COOKIE-WAIT state
  2711. * and a INIT collided with us and somewhere the peer sent the
  2712. * cookie on another address besides the single address our assoc
  2713. * had for him. In this case we will have one of the tie-tags set at
  2714. * least AND the address field in the cookie can be used to look it
  2715. * up.
  2716. */
  2717. to = NULL;
  2718. switch (cookie->addr_type) {
  2719. #ifdef INET6
  2720. case SCTP_IPV6_ADDRESS:
  2721. memset(&sin6, 0, sizeof(sin6));
  2722. sin6.sin6_family = AF_INET6;
  2723. #ifdef HAVE_SIN6_LEN
  2724. sin6.sin6_len = sizeof(sin6);
  2725. #endif
  2726. sin6.sin6_port = sh->src_port;
  2727. sin6.sin6_scope_id = cookie->scope_id;
  2728. memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
  2729. sizeof(sin6.sin6_addr.s6_addr));
  2730. to = (struct sockaddr *)&sin6;
  2731. break;
  2732. #endif
  2733. #ifdef INET
  2734. case SCTP_IPV4_ADDRESS:
  2735. memset(&sin, 0, sizeof(sin));
  2736. sin.sin_family = AF_INET;
  2737. #ifdef HAVE_SIN_LEN
  2738. sin.sin_len = sizeof(sin);
  2739. #endif
  2740. sin.sin_port = sh->src_port;
  2741. sin.sin_addr.s_addr = cookie->address[0];
  2742. to = (struct sockaddr *)&sin;
  2743. break;
  2744. #endif
  2745. #if defined(__Userspace__)
  2746. case SCTP_CONN_ADDRESS:
  2747. memset(&sconn, 0, sizeof(struct sockaddr_conn));
  2748. sconn.sconn_family = AF_CONN;
  2749. #ifdef HAVE_SCONN_LEN
  2750. sconn.sconn_len = sizeof(struct sockaddr_conn);
  2751. #endif
  2752. sconn.sconn_port = sh->src_port;
  2753. memcpy(&sconn.sconn_addr, cookie->address, sizeof(void *));
  2754. to = (struct sockaddr *)&sconn;
  2755. break;
  2756. #endif
  2757. default:
  2758. /* This should not happen */
  2759. return (NULL);
  2760. }
  2761. if (*stcb == NULL) {
  2762. /* Yep, lets check */
  2763. *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL);
  2764. if (*stcb == NULL) {
  2765. /*
  2766. * We should have only got back the same inp. If we
  2767. * got back a different ep we have a problem. The
  2768. * original findep got back l_inp and now
  2769. */
  2770. if (l_inp != *inp_p) {
  2771. SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
  2772. }
  2773. } else {
  2774. if (*locked_tcb == NULL) {
  2775. /* In this case we found the assoc only
  2776. * after we locked the create lock. This means
  2777. * we are in a colliding case and we must make
  2778. * sure that we unlock the tcb if its one of the
  2779. * cases where we throw away the incoming packets.
  2780. */
  2781. *locked_tcb = *stcb;
  2782. /* We must also increment the inp ref count
  2783. * since the ref_count flags was set when we
  2784. * did not find the TCB, now we found it which
  2785. * reduces the refcount.. we must raise it back
  2786. * out to balance it all :-)
  2787. */
  2788. SCTP_INP_INCR_REF((*stcb)->sctp_ep);
  2789. if ((*stcb)->sctp_ep != l_inp) {
  2790. SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
  2791. (void *)(*stcb)->sctp_ep, (void *)l_inp);
  2792. }
  2793. }
  2794. }
  2795. }
  2796. cookie_len -= SCTP_SIGNATURE_SIZE;
  2797. if (*stcb == NULL) {
  2798. /* this is the "normal" case... get a new TCB */
  2799. *stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh,
  2800. cookie, cookie_len, *inp_p,
  2801. netp, to, &notification,
  2802. auth_skipped, auth_offset, auth_len,
  2803. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2804. mflowtype, mflowid,
  2805. #endif
  2806. vrf_id, port);
  2807. } else {
  2808. /* this is abnormal... cookie-echo on existing TCB */
  2809. had_a_existing_tcb = 1;
  2810. *stcb = sctp_process_cookie_existing(m, iphlen, offset,
  2811. src, dst, sh,
  2812. cookie, cookie_len, *inp_p, *stcb, netp, to,
  2813. &notification, auth_skipped, auth_offset, auth_len,
  2814. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2815. mflowtype, mflowid,
  2816. #endif
  2817. vrf_id, port);
  2818. if (*stcb == NULL) {
  2819. *locked_tcb = NULL;
  2820. }
  2821. }
  2822. if (*stcb == NULL) {
  2823. /* still no TCB... must be bad cookie-echo */
  2824. return (NULL);
  2825. }
  2826. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2827. if (*netp != NULL) {
  2828. (*netp)->flowtype = mflowtype;
  2829. (*netp)->flowid = mflowid;
  2830. }
  2831. #endif
  2832. /*
  2833. * Ok, we built an association so confirm the address we sent the
  2834. * INIT-ACK to.
  2835. */
  2836. netl = sctp_findnet(*stcb, to);
  2837. /*
  2838. * This code should in theory NOT run but
  2839. */
  2840. if (netl == NULL) {
  2841. /* TSNH! Huh, why do I need to add this address here? */
  2842. if (sctp_add_remote_addr(*stcb, to, NULL, port,
  2843. SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) {
  2844. return (NULL);
  2845. }
  2846. netl = sctp_findnet(*stcb, to);
  2847. }
  2848. if (netl) {
  2849. if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
  2850. netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
  2851. (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
  2852. netl);
  2853. send_int_conf = 1;
  2854. }
  2855. }
  2856. sctp_start_net_timers(*stcb);
  2857. if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
  2858. if (!had_a_existing_tcb ||
  2859. (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
  2860. /*
  2861. * If we have a NEW cookie or the connect never
  2862. * reached the connected state during collision we
  2863. * must do the TCP accept thing.
  2864. */
  2865. struct socket *so, *oso;
  2866. struct sctp_inpcb *inp;
  2867. if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
  2868. /*
  2869. * For a restart we will keep the same
  2870. * socket, no need to do anything. I THINK!!
  2871. */
  2872. sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
  2873. if (send_int_conf) {
  2874. sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
  2875. (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
  2876. }
  2877. return (m);
  2878. }
  2879. oso = (*inp_p)->sctp_socket;
  2880. atomic_add_int(&(*stcb)->asoc.refcnt, 1);
  2881. SCTP_TCB_UNLOCK((*stcb));
  2882. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2883. CURVNET_SET(oso->so_vnet);
  2884. #endif
  2885. #if defined(__APPLE__) && !defined(__Userspace__)
  2886. SCTP_SOCKET_LOCK(oso, 1);
  2887. #endif
  2888. so = sonewconn(oso, 0
  2889. #if defined(__APPLE__) && !defined(__Userspace__)
  2890. ,NULL
  2891. #endif
  2892. );
  2893. #if defined(__APPLE__) && !defined(__Userspace__)
  2894. SCTP_SOCKET_UNLOCK(oso, 1);
  2895. #endif
  2896. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2897. CURVNET_RESTORE();
  2898. #endif
  2899. SCTP_TCB_LOCK((*stcb));
  2900. atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
  2901. if (so == NULL) {
  2902. struct mbuf *op_err;
  2903. #if defined(__APPLE__) && !defined(__Userspace__)
  2904. struct socket *pcb_so;
  2905. #endif
  2906. /* Too many sockets */
  2907. SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
  2908. op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
  2909. sctp_abort_association(*inp_p, NULL, m, iphlen,
  2910. src, dst, sh, op_err,
  2911. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2912. mflowtype, mflowid,
  2913. #endif
  2914. vrf_id, port);
  2915. #if defined(__APPLE__) && !defined(__Userspace__)
  2916. pcb_so = SCTP_INP_SO(*inp_p);
  2917. atomic_add_int(&(*stcb)->asoc.refcnt, 1);
  2918. SCTP_TCB_UNLOCK((*stcb));
  2919. SCTP_SOCKET_LOCK(pcb_so, 1);
  2920. SCTP_TCB_LOCK((*stcb));
  2921. atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
  2922. #endif
  2923. (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC,
  2924. SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
  2925. #if defined(__APPLE__) && !defined(__Userspace__)
  2926. SCTP_SOCKET_UNLOCK(pcb_so, 1);
  2927. #endif
  2928. return (NULL);
  2929. }
  2930. inp = (struct sctp_inpcb *)so->so_pcb;
  2931. SCTP_INP_INCR_REF(inp);
  2932. /*
  2933. * We add the unbound flag here so that
  2934. * if we get an soabort() before we get the
  2935. * move_pcb done, we will properly cleanup.
  2936. */
  2937. inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
  2938. SCTP_PCB_FLAGS_CONNECTED |
  2939. SCTP_PCB_FLAGS_IN_TCPPOOL |
  2940. SCTP_PCB_FLAGS_UNBOUND |
  2941. (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
  2942. SCTP_PCB_FLAGS_DONT_WAKE);
  2943. inp->sctp_features = (*inp_p)->sctp_features;
  2944. inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
  2945. inp->sctp_socket = so;
  2946. inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
  2947. inp->max_cwnd = (*inp_p)->max_cwnd;
  2948. inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
  2949. inp->ecn_supported = (*inp_p)->ecn_supported;
  2950. inp->prsctp_supported = (*inp_p)->prsctp_supported;
  2951. inp->auth_supported = (*inp_p)->auth_supported;
  2952. inp->asconf_supported = (*inp_p)->asconf_supported;
  2953. inp->reconfig_supported = (*inp_p)->reconfig_supported;
  2954. inp->nrsack_supported = (*inp_p)->nrsack_supported;
  2955. inp->pktdrop_supported = (*inp_p)->pktdrop_supported;
  2956. inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
  2957. inp->sctp_context = (*inp_p)->sctp_context;
  2958. inp->local_strreset_support = (*inp_p)->local_strreset_support;
  2959. inp->fibnum = (*inp_p)->fibnum;
  2960. #if defined(__Userspace__)
  2961. inp->ulp_info = (*inp_p)->ulp_info;
  2962. inp->recv_callback = (*inp_p)->recv_callback;
  2963. inp->send_callback = (*inp_p)->send_callback;
  2964. inp->send_sb_threshold = (*inp_p)->send_sb_threshold;
  2965. #endif
  2966. /*
  2967. * copy in the authentication parameters from the
  2968. * original endpoint
  2969. */
  2970. if (inp->sctp_ep.local_hmacs)
  2971. sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
  2972. inp->sctp_ep.local_hmacs =
  2973. sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
  2974. if (inp->sctp_ep.local_auth_chunks)
  2975. sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
  2976. inp->sctp_ep.local_auth_chunks =
  2977. sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
  2978. /*
  2979. * Now we must move it from one hash table to
  2980. * another and get the tcb in the right place.
  2981. */
  2982. /* This is where the one-2-one socket is put into
  2983. * the accept state waiting for the accept!
  2984. */
  2985. if (*stcb) {
  2986. SCTP_ADD_SUBSTATE(*stcb, SCTP_STATE_IN_ACCEPT_QUEUE);
  2987. }
  2988. sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
  2989. atomic_add_int(&(*stcb)->asoc.refcnt, 1);
  2990. SCTP_TCB_UNLOCK((*stcb));
  2991. #if defined(__FreeBSD__) && !defined(__Userspace__)
  2992. sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
  2993. 0);
  2994. #else
  2995. sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
  2996. #endif
  2997. SCTP_TCB_LOCK((*stcb));
  2998. atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
  2999. /* now we must check to see if we were aborted while
  3000. * the move was going on and the lock/unlock happened.
  3001. */
  3002. if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  3003. /* yep it was, we leave the
  3004. * assoc attached to the socket since
  3005. * the sctp_inpcb_free() call will send
  3006. * an abort for us.
  3007. */
  3008. SCTP_INP_DECR_REF(inp);
  3009. return (NULL);
  3010. }
  3011. SCTP_INP_DECR_REF(inp);
  3012. /* Switch over to the new guy */
  3013. *inp_p = inp;
  3014. sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
  3015. if (send_int_conf) {
  3016. sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
  3017. (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
  3018. }
  3019. /* Pull it from the incomplete queue and wake the guy */
  3020. #if defined(__APPLE__) && !defined(__Userspace__)
  3021. atomic_add_int(&(*stcb)->asoc.refcnt, 1);
  3022. SCTP_TCB_UNLOCK((*stcb));
  3023. SCTP_SOCKET_LOCK(so, 1);
  3024. #endif
  3025. soisconnected(so);
  3026. #if defined(__APPLE__) && !defined(__Userspace__)
  3027. SCTP_TCB_LOCK((*stcb));
  3028. atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
  3029. SCTP_SOCKET_UNLOCK(so, 1);
  3030. #endif
  3031. return (m);
  3032. }
  3033. }
  3034. if (notification) {
  3035. sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
  3036. }
  3037. if (send_int_conf) {
  3038. sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
  3039. (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
  3040. }
  3041. return (m);
  3042. }
  3043. static void
  3044. sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED,
  3045. struct sctp_tcb *stcb, struct sctp_nets *net)
  3046. {
  3047. /* cp must not be used, others call this without a c-ack :-) */
  3048. struct sctp_association *asoc;
  3049. struct sctp_tmit_chunk *chk;
  3050. SCTPDBG(SCTP_DEBUG_INPUT2,
  3051. "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
  3052. if ((stcb == NULL) || (net == NULL)) {
  3053. return;
  3054. }
  3055. asoc = &stcb->asoc;
  3056. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
  3057. sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
  3058. asoc->overall_error_count,
  3059. 0,
  3060. SCTP_FROM_SCTP_INPUT,
  3061. __LINE__);
  3062. }
  3063. asoc->overall_error_count = 0;
  3064. sctp_stop_all_cookie_timers(stcb);
  3065. /* process according to association state */
  3066. if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) {
  3067. /* state change only needed when I am in right state */
  3068. SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
  3069. SCTP_SET_STATE(stcb, SCTP_STATE_OPEN);
  3070. sctp_start_net_timers(stcb);
  3071. if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
  3072. sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
  3073. stcb->sctp_ep, stcb, NULL);
  3074. }
  3075. /* update RTO */
  3076. SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
  3077. SCTP_STAT_INCR_GAUGE32(sctps_currestab);
  3078. if (asoc->overall_error_count == 0) {
  3079. sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered,
  3080. SCTP_RTT_FROM_NON_DATA);
  3081. }
  3082. (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
  3083. sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
  3084. if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  3085. (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
  3086. #if defined(__APPLE__) && !defined(__Userspace__)
  3087. struct socket *so;
  3088. #endif
  3089. sctp_pcb_add_flags(stcb->sctp_ep, SCTP_PCB_FLAGS_CONNECTED);
  3090. #if defined(__APPLE__) && !defined(__Userspace__)
  3091. so = SCTP_INP_SO(stcb->sctp_ep);
  3092. atomic_add_int(&stcb->asoc.refcnt, 1);
  3093. SCTP_TCB_UNLOCK(stcb);
  3094. SCTP_SOCKET_LOCK(so, 1);
  3095. SCTP_TCB_LOCK(stcb);
  3096. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  3097. #endif
  3098. if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) {
  3099. soisconnected(stcb->sctp_socket);
  3100. }
  3101. #if defined(__APPLE__) && !defined(__Userspace__)
  3102. SCTP_SOCKET_UNLOCK(so, 1);
  3103. #endif
  3104. }
  3105. /*
  3106. * since we did not send a HB make sure we don't double
  3107. * things
  3108. */
  3109. net->hb_responded = 1;
  3110. if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  3111. /* We don't need to do the asconf thing,
  3112. * nor hb or autoclose if the socket is closed.
  3113. */
  3114. goto closed_socket;
  3115. }
  3116. sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
  3117. stcb, net);
  3118. if (stcb->asoc.sctp_autoclose_ticks &&
  3119. sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
  3120. sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
  3121. stcb->sctp_ep, stcb, NULL);
  3122. }
  3123. /*
  3124. * send ASCONF if parameters are pending and ASCONFs are
  3125. * allowed (eg. addresses changed when init/cookie echo were
  3126. * in flight)
  3127. */
  3128. if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
  3129. (stcb->asoc.asconf_supported == 1) &&
  3130. (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
  3131. #ifdef SCTP_TIMER_BASED_ASCONF
  3132. sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
  3133. stcb->sctp_ep, stcb,
  3134. stcb->asoc.primary_destination);
  3135. #else
  3136. sctp_send_asconf(stcb, stcb->asoc.primary_destination,
  3137. SCTP_ADDR_NOT_LOCKED);
  3138. #endif
  3139. }
  3140. }
  3141. closed_socket:
  3142. /* Toss the cookie if I can */
  3143. sctp_toss_old_cookies(stcb, asoc);
  3144. /* Restart the timer if we have pending data */
  3145. TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
  3146. if (chk->whoTo != NULL) {
  3147. break;
  3148. }
  3149. }
  3150. if (chk != NULL) {
  3151. sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
  3152. }
  3153. }
  3154. static void
  3155. sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
  3156. struct sctp_tcb *stcb)
  3157. {
  3158. struct sctp_nets *net;
  3159. struct sctp_tmit_chunk *lchk;
  3160. struct sctp_ecne_chunk bkup;
  3161. uint8_t override_bit;
  3162. uint32_t tsn, window_data_tsn;
  3163. int len;
  3164. unsigned int pkt_cnt;
  3165. len = ntohs(cp->ch.chunk_length);
  3166. if (len == sizeof(struct old_sctp_ecne_chunk)) {
  3167. /* Its the old format */
  3168. memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
  3169. bkup.num_pkts_since_cwr = htonl(1);
  3170. cp = &bkup;
  3171. }
  3172. SCTP_STAT_INCR(sctps_recvecne);
  3173. tsn = ntohl(cp->tsn);
  3174. pkt_cnt = ntohl(cp->num_pkts_since_cwr);
  3175. lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
  3176. if (lchk == NULL) {
  3177. window_data_tsn = stcb->asoc.sending_seq - 1;
  3178. } else {
  3179. window_data_tsn = lchk->rec.data.tsn;
  3180. }
  3181. /* Find where it was sent to if possible. */
  3182. net = NULL;
  3183. TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
  3184. if (lchk->rec.data.tsn == tsn) {
  3185. net = lchk->whoTo;
  3186. net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
  3187. break;
  3188. }
  3189. if (SCTP_TSN_GT(lchk->rec.data.tsn, tsn)) {
  3190. break;
  3191. }
  3192. }
  3193. if (net == NULL) {
  3194. /*
  3195. * What to do. A previous send of a
  3196. * CWR was possibly lost. See how old it is, we
  3197. * may have it marked on the actual net.
  3198. */
  3199. TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
  3200. if (tsn == net->last_cwr_tsn) {
  3201. /* Found him, send it off */
  3202. break;
  3203. }
  3204. }
  3205. if (net == NULL) {
  3206. /*
  3207. * If we reach here, we need to send a special
  3208. * CWR that says hey, we did this a long time
  3209. * ago and you lost the response.
  3210. */
  3211. net = TAILQ_FIRST(&stcb->asoc.nets);
  3212. if (net == NULL) {
  3213. /* TSNH */
  3214. return;
  3215. }
  3216. override_bit = SCTP_CWR_REDUCE_OVERRIDE;
  3217. } else {
  3218. override_bit = 0;
  3219. }
  3220. } else {
  3221. override_bit = 0;
  3222. }
  3223. if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
  3224. ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
  3225. /* JRS - Use the congestion control given in the pluggable CC module */
  3226. stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
  3227. /*
  3228. * We reduce once every RTT. So we will only lower cwnd at
  3229. * the next sending seq i.e. the window_data_tsn
  3230. */
  3231. net->cwr_window_tsn = window_data_tsn;
  3232. net->ecn_ce_pkt_cnt += pkt_cnt;
  3233. net->lost_cnt = pkt_cnt;
  3234. net->last_cwr_tsn = tsn;
  3235. } else {
  3236. override_bit |= SCTP_CWR_IN_SAME_WINDOW;
  3237. if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
  3238. ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
  3239. /*
  3240. * Another loss in the same window update how
  3241. * many marks/packets lost we have had.
  3242. */
  3243. int cnt = 1;
  3244. if (pkt_cnt > net->lost_cnt) {
  3245. /* Should be the case */
  3246. cnt = (pkt_cnt - net->lost_cnt);
  3247. net->ecn_ce_pkt_cnt += cnt;
  3248. }
  3249. net->lost_cnt = pkt_cnt;
  3250. net->last_cwr_tsn = tsn;
  3251. /*
  3252. * Most CC functions will ignore this call, since we are in-window
  3253. * yet of the initial CE the peer saw.
  3254. */
  3255. stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
  3256. }
  3257. }
  3258. /*
  3259. * We always send a CWR this way if our previous one was lost our
  3260. * peer will get an update, or if it is not time again to reduce we
  3261. * still get the cwr to the peer. Note we set the override when we
  3262. * could not find the TSN on the chunk or the destination network.
  3263. */
  3264. sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
  3265. }
  3266. static void
  3267. sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
  3268. {
  3269. /*
  3270. * Here we get a CWR from the peer. We must look in the outqueue and
  3271. * make sure that we have a covered ECNE in the control chunk part.
  3272. * If so remove it.
  3273. */
  3274. struct sctp_tmit_chunk *chk, *nchk;
  3275. struct sctp_ecne_chunk *ecne;
  3276. int override;
  3277. uint32_t cwr_tsn;
  3278. cwr_tsn = ntohl(cp->tsn);
  3279. override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
  3280. TAILQ_FOREACH_SAFE(chk, &stcb->asoc.control_send_queue, sctp_next, nchk) {
  3281. if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
  3282. continue;
  3283. }
  3284. if ((override == 0) && (chk->whoTo != net)) {
  3285. /* Must be from the right src unless override is set */
  3286. continue;
  3287. }
  3288. ecne = mtod(chk->data, struct sctp_ecne_chunk *);
  3289. if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
  3290. /* this covers this ECNE, we can remove it */
  3291. stcb->asoc.ecn_echo_cnt_onq--;
  3292. TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
  3293. sctp_next);
  3294. stcb->asoc.ctrl_queue_cnt--;
  3295. sctp_m_freem(chk->data);
  3296. chk->data = NULL;
  3297. sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
  3298. if (override == 0) {
  3299. break;
  3300. }
  3301. }
  3302. }
  3303. }
  3304. static void
  3305. sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED,
  3306. struct sctp_tcb *stcb, struct sctp_nets *net)
  3307. {
  3308. #if defined(__APPLE__) && !defined(__Userspace__)
  3309. struct socket *so;
  3310. #endif
  3311. SCTPDBG(SCTP_DEBUG_INPUT2,
  3312. "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
  3313. if (stcb == NULL)
  3314. return;
  3315. /* process according to association state */
  3316. if (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
  3317. /* unexpected SHUTDOWN-COMPLETE... so ignore... */
  3318. SCTPDBG(SCTP_DEBUG_INPUT2,
  3319. "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
  3320. SCTP_TCB_UNLOCK(stcb);
  3321. return;
  3322. }
  3323. /* notify upper layer protocol */
  3324. if (stcb->sctp_socket) {
  3325. sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
  3326. }
  3327. #ifdef INVARIANTS
  3328. if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
  3329. !TAILQ_EMPTY(&stcb->asoc.sent_queue) ||
  3330. sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
  3331. panic("Queues are not empty when handling SHUTDOWN-COMPLETE");
  3332. }
  3333. #endif
  3334. /* stop the timer */
  3335. sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net,
  3336. SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
  3337. SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
  3338. /* free the TCB */
  3339. SCTPDBG(SCTP_DEBUG_INPUT2,
  3340. "sctp_handle_shutdown_complete: calls free-asoc\n");
  3341. #if defined(__APPLE__) && !defined(__Userspace__)
  3342. so = SCTP_INP_SO(stcb->sctp_ep);
  3343. atomic_add_int(&stcb->asoc.refcnt, 1);
  3344. SCTP_TCB_UNLOCK(stcb);
  3345. SCTP_SOCKET_LOCK(so, 1);
  3346. SCTP_TCB_LOCK(stcb);
  3347. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  3348. #endif
  3349. (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
  3350. SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
  3351. #if defined(__APPLE__) && !defined(__Userspace__)
  3352. SCTP_SOCKET_UNLOCK(so, 1);
  3353. #endif
  3354. return;
  3355. }
  3356. static int
  3357. process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
  3358. struct sctp_nets *net, uint8_t flg)
  3359. {
  3360. switch (desc->chunk_type) {
  3361. case SCTP_DATA:
  3362. case SCTP_IDATA:
  3363. /* find the tsn to resend (possibly) */
  3364. {
  3365. uint32_t tsn;
  3366. struct sctp_tmit_chunk *tp1;
  3367. tsn = ntohl(desc->tsn_ifany);
  3368. TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
  3369. if (tp1->rec.data.tsn == tsn) {
  3370. /* found it */
  3371. break;
  3372. }
  3373. if (SCTP_TSN_GT(tp1->rec.data.tsn, tsn)) {
  3374. /* not found */
  3375. tp1 = NULL;
  3376. break;
  3377. }
  3378. }
  3379. if (tp1 == NULL) {
  3380. /*
  3381. * Do it the other way , aka without paying
  3382. * attention to queue seq order.
  3383. */
  3384. SCTP_STAT_INCR(sctps_pdrpdnfnd);
  3385. TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
  3386. if (tp1->rec.data.tsn == tsn) {
  3387. /* found it */
  3388. break;
  3389. }
  3390. }
  3391. }
  3392. if (tp1 == NULL) {
  3393. SCTP_STAT_INCR(sctps_pdrptsnnf);
  3394. }
  3395. if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
  3396. if (((flg & SCTP_BADCRC) == 0) &&
  3397. ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
  3398. return (0);
  3399. }
  3400. if ((stcb->asoc.peers_rwnd == 0) &&
  3401. ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
  3402. SCTP_STAT_INCR(sctps_pdrpdiwnp);
  3403. return (0);
  3404. }
  3405. if (stcb->asoc.peers_rwnd == 0 &&
  3406. (flg & SCTP_FROM_MIDDLE_BOX)) {
  3407. SCTP_STAT_INCR(sctps_pdrpdizrw);
  3408. return (0);
  3409. }
  3410. if ((uint32_t)SCTP_BUF_LEN(tp1->data) <
  3411. SCTP_DATA_CHUNK_OVERHEAD(stcb) + SCTP_NUM_DB_TO_VERIFY) {
  3412. /* Payload not matching. */
  3413. SCTP_STAT_INCR(sctps_pdrpbadd);
  3414. return (-1);
  3415. }
  3416. if (memcmp(mtod(tp1->data, caddr_t) + SCTP_DATA_CHUNK_OVERHEAD(stcb),
  3417. desc->data_bytes, SCTP_NUM_DB_TO_VERIFY) != 0) {
  3418. /* Payload not matching. */
  3419. SCTP_STAT_INCR(sctps_pdrpbadd);
  3420. return (-1);
  3421. }
  3422. if (tp1->do_rtt) {
  3423. /*
  3424. * this guy had a RTO calculation
  3425. * pending on it, cancel it
  3426. */
  3427. if (tp1->whoTo->rto_needed == 0) {
  3428. tp1->whoTo->rto_needed = 1;
  3429. }
  3430. tp1->do_rtt = 0;
  3431. }
  3432. SCTP_STAT_INCR(sctps_pdrpmark);
  3433. if (tp1->sent != SCTP_DATAGRAM_RESEND)
  3434. sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
  3435. /*
  3436. * mark it as if we were doing a FR, since
  3437. * we will be getting gap ack reports behind
  3438. * the info from the router.
  3439. */
  3440. tp1->rec.data.doing_fast_retransmit = 1;
  3441. /*
  3442. * mark the tsn with what sequences can
  3443. * cause a new FR.
  3444. */
  3445. if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
  3446. tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
  3447. } else {
  3448. tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
  3449. }
  3450. /* restart the timer */
  3451. sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
  3452. stcb, tp1->whoTo,
  3453. SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
  3454. sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
  3455. stcb, tp1->whoTo);
  3456. /* fix counts and things */
  3457. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
  3458. sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
  3459. tp1->whoTo->flight_size,
  3460. tp1->book_size,
  3461. (uint32_t)(uintptr_t)stcb,
  3462. tp1->rec.data.tsn);
  3463. }
  3464. if (tp1->sent < SCTP_DATAGRAM_RESEND) {
  3465. sctp_flight_size_decrease(tp1);
  3466. sctp_total_flight_decrease(stcb, tp1);
  3467. }
  3468. tp1->sent = SCTP_DATAGRAM_RESEND;
  3469. } {
  3470. /* audit code */
  3471. unsigned int audit;
  3472. audit = 0;
  3473. TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
  3474. if (tp1->sent == SCTP_DATAGRAM_RESEND)
  3475. audit++;
  3476. }
  3477. TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
  3478. sctp_next) {
  3479. if (tp1->sent == SCTP_DATAGRAM_RESEND)
  3480. audit++;
  3481. }
  3482. if (audit != stcb->asoc.sent_queue_retran_cnt) {
  3483. SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
  3484. audit, stcb->asoc.sent_queue_retran_cnt);
  3485. #ifndef SCTP_AUDITING_ENABLED
  3486. stcb->asoc.sent_queue_retran_cnt = audit;
  3487. #endif
  3488. }
  3489. }
  3490. }
  3491. break;
  3492. case SCTP_ASCONF:
  3493. {
  3494. struct sctp_tmit_chunk *asconf;
  3495. TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
  3496. sctp_next) {
  3497. if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
  3498. break;
  3499. }
  3500. }
  3501. if (asconf) {
  3502. if (asconf->sent != SCTP_DATAGRAM_RESEND)
  3503. sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
  3504. asconf->sent = SCTP_DATAGRAM_RESEND;
  3505. asconf->snd_count--;
  3506. }
  3507. }
  3508. break;
  3509. case SCTP_INITIATION:
  3510. /* resend the INIT */
  3511. stcb->asoc.dropped_special_cnt++;
  3512. if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
  3513. /*
  3514. * If we can get it in, in a few attempts we do
  3515. * this, otherwise we let the timer fire.
  3516. */
  3517. sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
  3518. stcb, net,
  3519. SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
  3520. sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
  3521. }
  3522. break;
  3523. case SCTP_SELECTIVE_ACK:
  3524. case SCTP_NR_SELECTIVE_ACK:
  3525. /* resend the sack */
  3526. sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
  3527. break;
  3528. case SCTP_HEARTBEAT_REQUEST:
  3529. /* resend a demand HB */
  3530. if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
  3531. /* Only retransmit if we KNOW we wont destroy the tcb */
  3532. sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
  3533. }
  3534. break;
  3535. case SCTP_SHUTDOWN:
  3536. sctp_send_shutdown(stcb, net);
  3537. break;
  3538. case SCTP_SHUTDOWN_ACK:
  3539. sctp_send_shutdown_ack(stcb, net);
  3540. break;
  3541. case SCTP_COOKIE_ECHO:
  3542. {
  3543. struct sctp_tmit_chunk *cookie;
  3544. cookie = NULL;
  3545. TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
  3546. sctp_next) {
  3547. if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
  3548. break;
  3549. }
  3550. }
  3551. if (cookie) {
  3552. if (cookie->sent != SCTP_DATAGRAM_RESEND)
  3553. sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
  3554. cookie->sent = SCTP_DATAGRAM_RESEND;
  3555. sctp_stop_all_cookie_timers(stcb);
  3556. }
  3557. }
  3558. break;
  3559. case SCTP_COOKIE_ACK:
  3560. sctp_send_cookie_ack(stcb);
  3561. break;
  3562. case SCTP_ASCONF_ACK:
  3563. /* resend last asconf ack */
  3564. sctp_send_asconf_ack(stcb);
  3565. break;
  3566. case SCTP_IFORWARD_CUM_TSN:
  3567. case SCTP_FORWARD_CUM_TSN:
  3568. send_forward_tsn(stcb, &stcb->asoc);
  3569. break;
  3570. /* can't do anything with these */
  3571. case SCTP_PACKET_DROPPED:
  3572. case SCTP_INITIATION_ACK: /* this should not happen */
  3573. case SCTP_HEARTBEAT_ACK:
  3574. case SCTP_ABORT_ASSOCIATION:
  3575. case SCTP_OPERATION_ERROR:
  3576. case SCTP_SHUTDOWN_COMPLETE:
  3577. case SCTP_ECN_ECHO:
  3578. case SCTP_ECN_CWR:
  3579. default:
  3580. break;
  3581. }
  3582. return (0);
  3583. }
  3584. void
  3585. sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
  3586. {
  3587. uint32_t i;
  3588. uint16_t temp;
  3589. /*
  3590. * We set things to 0xffffffff since this is the last delivered sequence
  3591. * and we will be sending in 0 after the reset.
  3592. */
  3593. if (number_entries) {
  3594. for (i = 0; i < number_entries; i++) {
  3595. temp = ntohs(list[i]);
  3596. if (temp >= stcb->asoc.streamincnt) {
  3597. continue;
  3598. }
  3599. stcb->asoc.strmin[temp].last_mid_delivered = 0xffffffff;
  3600. }
  3601. } else {
  3602. list = NULL;
  3603. for (i = 0; i < stcb->asoc.streamincnt; i++) {
  3604. stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
  3605. }
  3606. }
  3607. sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
  3608. }
  3609. static void
  3610. sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
  3611. {
  3612. uint32_t i;
  3613. uint16_t temp;
  3614. if (number_entries > 0) {
  3615. for (i = 0; i < number_entries; i++) {
  3616. temp = ntohs(list[i]);
  3617. if (temp >= stcb->asoc.streamoutcnt) {
  3618. /* no such stream */
  3619. continue;
  3620. }
  3621. stcb->asoc.strmout[temp].next_mid_ordered = 0;
  3622. stcb->asoc.strmout[temp].next_mid_unordered = 0;
  3623. }
  3624. } else {
  3625. for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
  3626. stcb->asoc.strmout[i].next_mid_ordered = 0;
  3627. stcb->asoc.strmout[i].next_mid_unordered = 0;
  3628. }
  3629. }
  3630. sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
  3631. }
  3632. static void
  3633. sctp_reset_clear_pending(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
  3634. {
  3635. uint32_t i;
  3636. uint16_t temp;
  3637. if (number_entries > 0) {
  3638. for (i = 0; i < number_entries; i++) {
  3639. temp = ntohs(list[i]);
  3640. if (temp >= stcb->asoc.streamoutcnt) {
  3641. /* no such stream */
  3642. continue;
  3643. }
  3644. stcb->asoc.strmout[temp].state = SCTP_STREAM_OPEN;
  3645. }
  3646. } else {
  3647. for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
  3648. stcb->asoc.strmout[i].state = SCTP_STREAM_OPEN;
  3649. }
  3650. }
  3651. }
  3652. struct sctp_stream_reset_request *
  3653. sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
  3654. {
  3655. struct sctp_association *asoc;
  3656. struct sctp_chunkhdr *ch;
  3657. struct sctp_stream_reset_request *r;
  3658. struct sctp_tmit_chunk *chk;
  3659. int len, clen;
  3660. asoc = &stcb->asoc;
  3661. chk = asoc->str_reset;
  3662. if (TAILQ_EMPTY(&asoc->control_send_queue) ||
  3663. (chk == NULL)) {
  3664. asoc->stream_reset_outstanding = 0;
  3665. return (NULL);
  3666. }
  3667. if (chk->data == NULL) {
  3668. return (NULL);
  3669. }
  3670. if (bchk != NULL) {
  3671. /* he wants a copy of the chk pointer */
  3672. *bchk = chk;
  3673. }
  3674. clen = chk->send_size;
  3675. ch = mtod(chk->data, struct sctp_chunkhdr *);
  3676. r = (struct sctp_stream_reset_request *)(ch + 1);
  3677. if (ntohl(r->request_seq) == seq) {
  3678. /* found it */
  3679. return (r);
  3680. }
  3681. len = SCTP_SIZE32(ntohs(r->ph.param_length));
  3682. if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
  3683. /* move to the next one, there can only be a max of two */
  3684. r = (struct sctp_stream_reset_request *)((caddr_t)r + len);
  3685. if (ntohl(r->request_seq) == seq) {
  3686. return (r);
  3687. }
  3688. }
  3689. /* that seq is not here */
  3690. return (NULL);
  3691. }
  3692. static void
  3693. sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
  3694. {
  3695. struct sctp_association *asoc;
  3696. struct sctp_tmit_chunk *chk;
  3697. asoc = &stcb->asoc;
  3698. chk = asoc->str_reset;
  3699. if (chk == NULL) {
  3700. return;
  3701. }
  3702. asoc->str_reset = NULL;
  3703. sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb,
  3704. NULL, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
  3705. TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
  3706. asoc->ctrl_queue_cnt--;
  3707. if (chk->data) {
  3708. sctp_m_freem(chk->data);
  3709. chk->data = NULL;
  3710. }
  3711. sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
  3712. }
  3713. static int
  3714. sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
  3715. uint32_t seq, uint32_t action,
  3716. struct sctp_stream_reset_response *respin)
  3717. {
  3718. uint16_t type;
  3719. int lparam_len;
  3720. struct sctp_association *asoc = &stcb->asoc;
  3721. struct sctp_tmit_chunk *chk;
  3722. struct sctp_stream_reset_request *req_param;
  3723. struct sctp_stream_reset_out_request *req_out_param;
  3724. struct sctp_stream_reset_in_request *req_in_param;
  3725. uint32_t number_entries;
  3726. if (asoc->stream_reset_outstanding == 0) {
  3727. /* duplicate */
  3728. return (0);
  3729. }
  3730. if (seq == stcb->asoc.str_reset_seq_out) {
  3731. req_param = sctp_find_stream_reset(stcb, seq, &chk);
  3732. if (req_param != NULL) {
  3733. stcb->asoc.str_reset_seq_out++;
  3734. type = ntohs(req_param->ph.param_type);
  3735. lparam_len = ntohs(req_param->ph.param_length);
  3736. if (type == SCTP_STR_RESET_OUT_REQUEST) {
  3737. int no_clear = 0;
  3738. req_out_param = (struct sctp_stream_reset_out_request *)req_param;
  3739. number_entries = (lparam_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
  3740. asoc->stream_reset_out_is_outstanding = 0;
  3741. if (asoc->stream_reset_outstanding)
  3742. asoc->stream_reset_outstanding--;
  3743. if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
  3744. /* do it */
  3745. sctp_reset_out_streams(stcb, number_entries, req_out_param->list_of_streams);
  3746. } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
  3747. sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
  3748. } else if (action == SCTP_STREAM_RESET_RESULT_IN_PROGRESS) {
  3749. /* Set it up so we don't stop retransmitting */
  3750. asoc->stream_reset_outstanding++;
  3751. stcb->asoc.str_reset_seq_out--;
  3752. asoc->stream_reset_out_is_outstanding = 1;
  3753. no_clear = 1;
  3754. } else {
  3755. sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
  3756. }
  3757. if (no_clear == 0) {
  3758. sctp_reset_clear_pending(stcb, number_entries, req_out_param->list_of_streams);
  3759. }
  3760. } else if (type == SCTP_STR_RESET_IN_REQUEST) {
  3761. req_in_param = (struct sctp_stream_reset_in_request *)req_param;
  3762. number_entries = (lparam_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
  3763. if (asoc->stream_reset_outstanding)
  3764. asoc->stream_reset_outstanding--;
  3765. if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
  3766. sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb,
  3767. number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
  3768. } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
  3769. sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb,
  3770. number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
  3771. }
  3772. } else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) {
  3773. /* Ok we now may have more streams */
  3774. int num_stream;
  3775. num_stream = stcb->asoc.strm_pending_add_size;
  3776. if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) {
  3777. /* TSNH */
  3778. num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt;
  3779. }
  3780. stcb->asoc.strm_pending_add_size = 0;
  3781. if (asoc->stream_reset_outstanding)
  3782. asoc->stream_reset_outstanding--;
  3783. if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
  3784. /* Put the new streams into effect */
  3785. int i;
  3786. for (i = asoc->streamoutcnt; i < (asoc->streamoutcnt + num_stream); i++) {
  3787. asoc->strmout[i].state = SCTP_STREAM_OPEN;
  3788. }
  3789. asoc->streamoutcnt += num_stream;
  3790. sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
  3791. } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
  3792. sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
  3793. SCTP_STREAM_CHANGE_DENIED);
  3794. } else {
  3795. sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
  3796. SCTP_STREAM_CHANGE_FAILED);
  3797. }
  3798. } else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) {
  3799. if (asoc->stream_reset_outstanding)
  3800. asoc->stream_reset_outstanding--;
  3801. if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
  3802. sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
  3803. SCTP_STREAM_CHANGE_DENIED);
  3804. } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
  3805. sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
  3806. SCTP_STREAM_CHANGE_FAILED);
  3807. }
  3808. } else if (type == SCTP_STR_RESET_TSN_REQUEST) {
  3809. /**
  3810. * a) Adopt the new in tsn.
  3811. * b) reset the map
  3812. * c) Adopt the new out-tsn
  3813. */
  3814. struct sctp_stream_reset_response_tsn *resp;
  3815. struct sctp_forward_tsn_chunk fwdtsn;
  3816. int abort_flag = 0;
  3817. if (respin == NULL) {
  3818. /* huh ? */
  3819. return (0);
  3820. }
  3821. if (ntohs(respin->ph.param_length) < sizeof(struct sctp_stream_reset_response_tsn)) {
  3822. return (0);
  3823. }
  3824. if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
  3825. resp = (struct sctp_stream_reset_response_tsn *)respin;
  3826. asoc->stream_reset_outstanding--;
  3827. fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
  3828. fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
  3829. fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
  3830. sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
  3831. if (abort_flag) {
  3832. return (1);
  3833. }
  3834. stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
  3835. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
  3836. sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
  3837. }
  3838. stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
  3839. stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
  3840. memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
  3841. stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
  3842. memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
  3843. stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
  3844. stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
  3845. sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
  3846. sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
  3847. sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0);
  3848. } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
  3849. sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
  3850. SCTP_ASSOC_RESET_DENIED);
  3851. } else {
  3852. sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
  3853. SCTP_ASSOC_RESET_FAILED);
  3854. }
  3855. }
  3856. /* get rid of the request and get the request flags */
  3857. if (asoc->stream_reset_outstanding == 0) {
  3858. sctp_clean_up_stream_reset(stcb);
  3859. }
  3860. }
  3861. }
  3862. if (asoc->stream_reset_outstanding == 0) {
  3863. sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
  3864. }
  3865. return (0);
  3866. }
  3867. static void
  3868. sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
  3869. struct sctp_tmit_chunk *chk,
  3870. struct sctp_stream_reset_in_request *req, int trunc)
  3871. {
  3872. uint32_t seq;
  3873. int len, i;
  3874. int number_entries;
  3875. uint16_t temp;
  3876. /*
  3877. * peer wants me to send a str-reset to him for my outgoing seq's if
  3878. * seq_in is right.
  3879. */
  3880. struct sctp_association *asoc = &stcb->asoc;
  3881. seq = ntohl(req->request_seq);
  3882. if (asoc->str_reset_seq_in == seq) {
  3883. asoc->last_reset_action[1] = asoc->last_reset_action[0];
  3884. if ((asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ) == 0) {
  3885. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
  3886. } else if (trunc) {
  3887. /* Can't do it, since they exceeded our buffer size */
  3888. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
  3889. } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
  3890. len = ntohs(req->ph.param_length);
  3891. number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
  3892. if (number_entries) {
  3893. for (i = 0; i < number_entries; i++) {
  3894. temp = ntohs(req->list_of_streams[i]);
  3895. if (temp >= stcb->asoc.streamoutcnt) {
  3896. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
  3897. goto bad_boy;
  3898. }
  3899. req->list_of_streams[i] = temp;
  3900. }
  3901. for (i = 0; i < number_entries; i++) {
  3902. if (stcb->asoc.strmout[req->list_of_streams[i]].state == SCTP_STREAM_OPEN) {
  3903. stcb->asoc.strmout[req->list_of_streams[i]].state = SCTP_STREAM_RESET_PENDING;
  3904. }
  3905. }
  3906. } else {
  3907. /* Its all */
  3908. for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
  3909. if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN)
  3910. stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING;
  3911. }
  3912. }
  3913. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
  3914. } else {
  3915. /* Can't do it, since we have sent one out */
  3916. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
  3917. }
  3918. bad_boy:
  3919. sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
  3920. asoc->str_reset_seq_in++;
  3921. } else if (asoc->str_reset_seq_in - 1 == seq) {
  3922. sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
  3923. } else if (asoc->str_reset_seq_in - 2 == seq) {
  3924. sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
  3925. } else {
  3926. sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
  3927. }
  3928. sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
  3929. }
  3930. static int
  3931. sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
  3932. struct sctp_tmit_chunk *chk,
  3933. struct sctp_stream_reset_tsn_request *req)
  3934. {
  3935. /* reset all in and out and update the tsn */
  3936. /*
  3937. * A) reset my str-seq's on in and out. B) Select a receive next,
  3938. * and set cum-ack to it. Also process this selected number as a
  3939. * fwd-tsn as well. C) set in the response my next sending seq.
  3940. */
  3941. struct sctp_forward_tsn_chunk fwdtsn;
  3942. struct sctp_association *asoc = &stcb->asoc;
  3943. int abort_flag = 0;
  3944. uint32_t seq;
  3945. seq = ntohl(req->request_seq);
  3946. if (asoc->str_reset_seq_in == seq) {
  3947. asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0];
  3948. if ((asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ) == 0) {
  3949. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
  3950. } else {
  3951. fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
  3952. fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
  3953. fwdtsn.ch.chunk_flags = 0;
  3954. fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
  3955. sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
  3956. if (abort_flag) {
  3957. return (1);
  3958. }
  3959. asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
  3960. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
  3961. sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
  3962. }
  3963. asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
  3964. asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1;
  3965. memset(asoc->mapping_array, 0, asoc->mapping_array_size);
  3966. asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
  3967. memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
  3968. atomic_add_int(&asoc->sending_seq, 1);
  3969. /* save off historical data for retrans */
  3970. asoc->last_sending_seq[1] = asoc->last_sending_seq[0];
  3971. asoc->last_sending_seq[0] = asoc->sending_seq;
  3972. asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0];
  3973. asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn;
  3974. sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
  3975. sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
  3976. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
  3977. sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0);
  3978. }
  3979. sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
  3980. asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
  3981. asoc->str_reset_seq_in++;
  3982. } else if (asoc->str_reset_seq_in - 1 == seq) {
  3983. sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
  3984. asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
  3985. } else if (asoc->str_reset_seq_in - 2 == seq) {
  3986. sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
  3987. asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]);
  3988. } else {
  3989. sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
  3990. }
  3991. return (0);
  3992. }
  3993. static void
  3994. sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
  3995. struct sctp_tmit_chunk *chk,
  3996. struct sctp_stream_reset_out_request *req, int trunc)
  3997. {
  3998. uint32_t seq, tsn;
  3999. int number_entries, len;
  4000. struct sctp_association *asoc = &stcb->asoc;
  4001. seq = ntohl(req->request_seq);
  4002. /* now if its not a duplicate we process it */
  4003. if (asoc->str_reset_seq_in == seq) {
  4004. len = ntohs(req->ph.param_length);
  4005. number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
  4006. /*
  4007. * the sender is resetting, handle the list issue.. we must
  4008. * a) verify if we can do the reset, if so no problem b) If
  4009. * we can't do the reset we must copy the request. c) queue
  4010. * it, and setup the data in processor to trigger it off
  4011. * when needed and dequeue all the queued data.
  4012. */
  4013. tsn = ntohl(req->send_reset_at_tsn);
  4014. /* move the reset action back one */
  4015. asoc->last_reset_action[1] = asoc->last_reset_action[0];
  4016. if ((asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ) == 0) {
  4017. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
  4018. } else if (trunc) {
  4019. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
  4020. } else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
  4021. /* we can do it now */
  4022. sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
  4023. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
  4024. } else {
  4025. /*
  4026. * we must queue it up and thus wait for the TSN's
  4027. * to arrive that are at or before tsn
  4028. */
  4029. struct sctp_stream_reset_list *liste;
  4030. int siz;
  4031. siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
  4032. SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
  4033. siz, SCTP_M_STRESET);
  4034. if (liste == NULL) {
  4035. /* gak out of memory */
  4036. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
  4037. sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
  4038. return;
  4039. }
  4040. liste->seq = seq;
  4041. liste->tsn = tsn;
  4042. liste->number_entries = number_entries;
  4043. memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t));
  4044. TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
  4045. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_IN_PROGRESS;
  4046. }
  4047. sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
  4048. asoc->str_reset_seq_in++;
  4049. } else if ((asoc->str_reset_seq_in - 1) == seq) {
  4050. /*
  4051. * one seq back, just echo back last action since my
  4052. * response was lost.
  4053. */
  4054. sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
  4055. } else if ((asoc->str_reset_seq_in - 2) == seq) {
  4056. /*
  4057. * two seq back, just echo back last action since my
  4058. * response was lost.
  4059. */
  4060. sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
  4061. } else {
  4062. sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
  4063. }
  4064. }
  4065. static void
  4066. sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
  4067. struct sctp_stream_reset_add_strm *str_add)
  4068. {
  4069. /*
  4070. * Peer is requesting to add more streams.
  4071. * If its within our max-streams we will
  4072. * allow it.
  4073. */
  4074. uint32_t num_stream, i;
  4075. uint32_t seq;
  4076. struct sctp_association *asoc = &stcb->asoc;
  4077. struct sctp_queued_to_read *ctl, *nctl;
  4078. /* Get the number. */
  4079. seq = ntohl(str_add->request_seq);
  4080. num_stream = ntohs(str_add->number_of_streams);
  4081. /* Now what would be the new total? */
  4082. if (asoc->str_reset_seq_in == seq) {
  4083. num_stream += stcb->asoc.streamincnt;
  4084. stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
  4085. if ((asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ) == 0) {
  4086. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
  4087. } else if ((num_stream > stcb->asoc.max_inbound_streams) ||
  4088. (num_stream > 0xffff)) {
  4089. /* We must reject it they ask for to many */
  4090. denied:
  4091. stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
  4092. } else {
  4093. /* Ok, we can do that :-) */
  4094. struct sctp_stream_in *oldstrm;
  4095. /* save off the old */
  4096. oldstrm = stcb->asoc.strmin;
  4097. SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
  4098. (num_stream * sizeof(struct sctp_stream_in)),
  4099. SCTP_M_STRMI);
  4100. if (stcb->asoc.strmin == NULL) {
  4101. stcb->asoc.strmin = oldstrm;
  4102. goto denied;
  4103. }
  4104. /* copy off the old data */
  4105. for (i = 0; i < stcb->asoc.streamincnt; i++) {
  4106. TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
  4107. TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
  4108. stcb->asoc.strmin[i].sid = i;
  4109. stcb->asoc.strmin[i].last_mid_delivered = oldstrm[i].last_mid_delivered;
  4110. stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
  4111. stcb->asoc.strmin[i].pd_api_started = oldstrm[i].pd_api_started;
  4112. /* now anything on those queues? */
  4113. TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next_instrm, nctl) {
  4114. TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next_instrm);
  4115. TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next_instrm);
  4116. }
  4117. TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].uno_inqueue, next_instrm, nctl) {
  4118. TAILQ_REMOVE(&oldstrm[i].uno_inqueue, ctl, next_instrm);
  4119. TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].uno_inqueue, ctl, next_instrm);
  4120. }
  4121. }
  4122. /* Init the new streams */
  4123. for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
  4124. TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
  4125. TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
  4126. stcb->asoc.strmin[i].sid = i;
  4127. stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
  4128. stcb->asoc.strmin[i].pd_api_started = 0;
  4129. stcb->asoc.strmin[i].delivery_started = 0;
  4130. }
  4131. SCTP_FREE(oldstrm, SCTP_M_STRMI);
  4132. /* update the size */
  4133. stcb->asoc.streamincnt = num_stream;
  4134. stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
  4135. sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
  4136. }
  4137. sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
  4138. asoc->str_reset_seq_in++;
  4139. } else if ((asoc->str_reset_seq_in - 1) == seq) {
  4140. /*
  4141. * one seq back, just echo back last action since my
  4142. * response was lost.
  4143. */
  4144. sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
  4145. } else if ((asoc->str_reset_seq_in - 2) == seq) {
  4146. /*
  4147. * two seq back, just echo back last action since my
  4148. * response was lost.
  4149. */
  4150. sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
  4151. } else {
  4152. sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
  4153. }
  4154. }
  4155. static void
  4156. sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
  4157. struct sctp_stream_reset_add_strm *str_add)
  4158. {
  4159. /*
  4160. * Peer is requesting to add more streams.
  4161. * If its within our max-streams we will
  4162. * allow it.
  4163. */
  4164. uint16_t num_stream;
  4165. uint32_t seq;
  4166. struct sctp_association *asoc = &stcb->asoc;
  4167. /* Get the number. */
  4168. seq = ntohl(str_add->request_seq);
  4169. num_stream = ntohs(str_add->number_of_streams);
  4170. /* Now what would be the new total? */
  4171. if (asoc->str_reset_seq_in == seq) {
  4172. stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
  4173. if ((asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ) == 0) {
  4174. asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
  4175. } else if (stcb->asoc.stream_reset_outstanding) {
  4176. /* We must reject it we have something pending */
  4177. stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
  4178. } else {
  4179. /* Ok, we can do that :-) */
  4180. int mychk;
  4181. mychk = stcb->asoc.streamoutcnt;
  4182. mychk += num_stream;
  4183. if (mychk < 0x10000) {
  4184. stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
  4185. if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 1, num_stream, 0, 1)) {
  4186. stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
  4187. }
  4188. } else {
  4189. stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
  4190. }
  4191. }
  4192. sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]);
  4193. asoc->str_reset_seq_in++;
  4194. } else if ((asoc->str_reset_seq_in - 1) == seq) {
  4195. /*
  4196. * one seq back, just echo back last action since my
  4197. * response was lost.
  4198. */
  4199. sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
  4200. } else if ((asoc->str_reset_seq_in - 2) == seq) {
  4201. /*
  4202. * two seq back, just echo back last action since my
  4203. * response was lost.
  4204. */
  4205. sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
  4206. } else {
  4207. sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
  4208. }
  4209. }
  4210. #ifdef __GNUC__
  4211. __attribute__ ((noinline))
  4212. #endif
  4213. static int
  4214. sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
  4215. struct sctp_chunkhdr *ch_req)
  4216. {
  4217. uint16_t remaining_length, param_len, ptype;
  4218. struct sctp_paramhdr pstore;
  4219. uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
  4220. uint32_t seq = 0;
  4221. int num_req = 0;
  4222. int trunc = 0;
  4223. struct sctp_tmit_chunk *chk;
  4224. struct sctp_chunkhdr *ch;
  4225. struct sctp_paramhdr *ph;
  4226. int ret_code = 0;
  4227. int num_param = 0;
  4228. /* now it may be a reset or a reset-response */
  4229. remaining_length = ntohs(ch_req->chunk_length) - sizeof(struct sctp_chunkhdr);
  4230. /* setup for adding the response */
  4231. sctp_alloc_a_chunk(stcb, chk);
  4232. if (chk == NULL) {
  4233. return (ret_code);
  4234. }
  4235. chk->copy_by_ref = 0;
  4236. chk->rec.chunk_id.id = SCTP_STREAM_RESET;
  4237. chk->rec.chunk_id.can_take_data = 0;
  4238. chk->flags = 0;
  4239. chk->asoc = &stcb->asoc;
  4240. chk->no_fr_allowed = 0;
  4241. chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
  4242. chk->book_size_scale = 0;
  4243. chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
  4244. if (chk->data == NULL) {
  4245. strres_nochunk:
  4246. if (chk->data) {
  4247. sctp_m_freem(chk->data);
  4248. chk->data = NULL;
  4249. }
  4250. sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
  4251. return (ret_code);
  4252. }
  4253. SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
  4254. /* setup chunk parameters */
  4255. chk->sent = SCTP_DATAGRAM_UNSENT;
  4256. chk->snd_count = 0;
  4257. chk->whoTo = NULL;
  4258. ch = mtod(chk->data, struct sctp_chunkhdr *);
  4259. ch->chunk_type = SCTP_STREAM_RESET;
  4260. ch->chunk_flags = 0;
  4261. ch->chunk_length = htons(chk->send_size);
  4262. SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
  4263. offset += sizeof(struct sctp_chunkhdr);
  4264. while (remaining_length >= sizeof(struct sctp_paramhdr)) {
  4265. ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *)&pstore);
  4266. if (ph == NULL) {
  4267. /* TSNH */
  4268. break;
  4269. }
  4270. param_len = ntohs(ph->param_length);
  4271. if ((param_len > remaining_length) ||
  4272. (param_len < (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)))) {
  4273. /* bad parameter length */
  4274. break;
  4275. }
  4276. ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, sizeof(cstore)),
  4277. (uint8_t *)&cstore);
  4278. if (ph == NULL) {
  4279. /* TSNH */
  4280. break;
  4281. }
  4282. ptype = ntohs(ph->param_type);
  4283. num_param++;
  4284. if (param_len > sizeof(cstore)) {
  4285. trunc = 1;
  4286. } else {
  4287. trunc = 0;
  4288. }
  4289. if (num_param > SCTP_MAX_RESET_PARAMS) {
  4290. /* hit the max of parameters already sorry.. */
  4291. break;
  4292. }
  4293. if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
  4294. struct sctp_stream_reset_out_request *req_out;
  4295. if (param_len < sizeof(struct sctp_stream_reset_out_request)) {
  4296. break;
  4297. }
  4298. req_out = (struct sctp_stream_reset_out_request *)ph;
  4299. num_req++;
  4300. if (stcb->asoc.stream_reset_outstanding) {
  4301. seq = ntohl(req_out->response_seq);
  4302. if (seq == stcb->asoc.str_reset_seq_out) {
  4303. /* implicit ack */
  4304. (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL);
  4305. }
  4306. }
  4307. sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
  4308. } else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) {
  4309. struct sctp_stream_reset_add_strm *str_add;
  4310. if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
  4311. break;
  4312. }
  4313. str_add = (struct sctp_stream_reset_add_strm *)ph;
  4314. num_req++;
  4315. sctp_handle_str_reset_add_strm(stcb, chk, str_add);
  4316. } else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) {
  4317. struct sctp_stream_reset_add_strm *str_add;
  4318. if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
  4319. break;
  4320. }
  4321. str_add = (struct sctp_stream_reset_add_strm *)ph;
  4322. num_req++;
  4323. sctp_handle_str_reset_add_out_strm(stcb, chk, str_add);
  4324. } else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
  4325. struct sctp_stream_reset_in_request *req_in;
  4326. num_req++;
  4327. req_in = (struct sctp_stream_reset_in_request *)ph;
  4328. sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
  4329. } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
  4330. struct sctp_stream_reset_tsn_request *req_tsn;
  4331. num_req++;
  4332. req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
  4333. if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
  4334. ret_code = 1;
  4335. goto strres_nochunk;
  4336. }
  4337. /* no more */
  4338. break;
  4339. } else if (ptype == SCTP_STR_RESET_RESPONSE) {
  4340. struct sctp_stream_reset_response *resp;
  4341. uint32_t result;
  4342. if (param_len < sizeof(struct sctp_stream_reset_response)) {
  4343. break;
  4344. }
  4345. resp = (struct sctp_stream_reset_response *)ph;
  4346. seq = ntohl(resp->response_seq);
  4347. result = ntohl(resp->result);
  4348. if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
  4349. ret_code = 1;
  4350. goto strres_nochunk;
  4351. }
  4352. } else {
  4353. break;
  4354. }
  4355. offset += SCTP_SIZE32(param_len);
  4356. if (remaining_length >= SCTP_SIZE32(param_len)) {
  4357. remaining_length -= SCTP_SIZE32(param_len);
  4358. } else {
  4359. remaining_length = 0;
  4360. }
  4361. }
  4362. if (num_req == 0) {
  4363. /* we have no response free the stuff */
  4364. goto strres_nochunk;
  4365. }
  4366. /* ok we have a chunk to link in */
  4367. TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
  4368. chk,
  4369. sctp_next);
  4370. stcb->asoc.ctrl_queue_cnt++;
  4371. return (ret_code);
  4372. }
  4373. /*
  4374. * Handle a router or endpoints report of a packet loss, there are two ways
  4375. * to handle this, either we get the whole packet and must disect it
  4376. * ourselves (possibly with truncation and or corruption) or it is a summary
  4377. * from a middle box that did the disecting for us.
  4378. */
  4379. static void
  4380. sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
  4381. struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
  4382. {
  4383. struct sctp_chunk_desc desc;
  4384. struct sctp_chunkhdr *chk_hdr;
  4385. struct sctp_data_chunk *data_chunk;
  4386. struct sctp_idata_chunk *idata_chunk;
  4387. uint32_t bottle_bw, on_queue;
  4388. uint32_t offset, chk_len;
  4389. uint16_t pktdrp_len;
  4390. uint8_t pktdrp_flags;
  4391. KASSERT(sizeof(struct sctp_pktdrop_chunk) <= limit,
  4392. ("PKTDROP chunk too small"));
  4393. pktdrp_flags = cp->ch.chunk_flags;
  4394. pktdrp_len = ntohs(cp->ch.chunk_length);
  4395. KASSERT(limit <= pktdrp_len, ("Inconsistent limit"));
  4396. if (pktdrp_flags & SCTP_PACKET_TRUNCATED) {
  4397. if (ntohs(cp->trunc_len) <= pktdrp_len - sizeof(struct sctp_pktdrop_chunk)) {
  4398. /* The peer plays games with us. */
  4399. return;
  4400. }
  4401. }
  4402. limit -= sizeof(struct sctp_pktdrop_chunk);
  4403. offset = 0;
  4404. if (offset == limit) {
  4405. if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
  4406. SCTP_STAT_INCR(sctps_pdrpbwrpt);
  4407. }
  4408. } else if (offset + sizeof(struct sctphdr) > limit) {
  4409. /* Only a partial SCTP common header. */
  4410. SCTP_STAT_INCR(sctps_pdrpcrupt);
  4411. offset = limit;
  4412. } else {
  4413. /* XXX: Check embedded SCTP common header. */
  4414. offset += sizeof(struct sctphdr);
  4415. }
  4416. /* Now parse through the chunks themselves. */
  4417. while (offset < limit) {
  4418. if (offset + sizeof(struct sctp_chunkhdr) > limit) {
  4419. SCTP_STAT_INCR(sctps_pdrpcrupt);
  4420. break;
  4421. }
  4422. chk_hdr = (struct sctp_chunkhdr *)(cp->data + offset);
  4423. desc.chunk_type = chk_hdr->chunk_type;
  4424. /* get amount we need to move */
  4425. chk_len = (uint32_t)ntohs(chk_hdr->chunk_length);
  4426. if (chk_len < sizeof(struct sctp_chunkhdr)) {
  4427. /* Someone is lying... */
  4428. break;
  4429. }
  4430. if (desc.chunk_type == SCTP_DATA) {
  4431. if (stcb->asoc.idata_supported) {
  4432. /* Some is playing games with us. */
  4433. break;
  4434. }
  4435. if (chk_len <= sizeof(struct sctp_data_chunk)) {
  4436. /* Some is playing games with us. */
  4437. break;
  4438. }
  4439. if (chk_len < sizeof(struct sctp_data_chunk) + SCTP_NUM_DB_TO_VERIFY) {
  4440. /* Not enough data bytes available in the chunk. */
  4441. SCTP_STAT_INCR(sctps_pdrpnedat);
  4442. goto next_chunk;
  4443. }
  4444. if (offset + sizeof(struct sctp_data_chunk) + SCTP_NUM_DB_TO_VERIFY > limit) {
  4445. /* Not enough data in buffer. */
  4446. break;
  4447. }
  4448. data_chunk = (struct sctp_data_chunk *)(cp->data + offset);
  4449. memcpy(desc.data_bytes, data_chunk + 1, SCTP_NUM_DB_TO_VERIFY);
  4450. desc.tsn_ifany = data_chunk->dp.tsn;
  4451. if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
  4452. SCTP_STAT_INCR(sctps_pdrpmbda);
  4453. }
  4454. } else if (desc.chunk_type == SCTP_IDATA) {
  4455. if (!stcb->asoc.idata_supported) {
  4456. /* Some is playing games with us. */
  4457. break;
  4458. }
  4459. if (chk_len <= sizeof(struct sctp_idata_chunk)) {
  4460. /* Some is playing games with us. */
  4461. break;
  4462. }
  4463. if (chk_len < sizeof(struct sctp_idata_chunk) + SCTP_NUM_DB_TO_VERIFY) {
  4464. /* Not enough data bytes available in the chunk. */
  4465. SCTP_STAT_INCR(sctps_pdrpnedat);
  4466. goto next_chunk;
  4467. }
  4468. if (offset + sizeof(struct sctp_idata_chunk) + SCTP_NUM_DB_TO_VERIFY > limit) {
  4469. /* Not enough data in buffer. */
  4470. break;
  4471. }
  4472. idata_chunk = (struct sctp_idata_chunk *)(cp->data + offset);
  4473. memcpy(desc.data_bytes, idata_chunk + 1, SCTP_NUM_DB_TO_VERIFY);
  4474. desc.tsn_ifany = idata_chunk->dp.tsn;
  4475. if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
  4476. SCTP_STAT_INCR(sctps_pdrpmbda);
  4477. }
  4478. } else {
  4479. if (pktdrp_flags & SCTP_FROM_MIDDLE_BOX) {
  4480. SCTP_STAT_INCR(sctps_pdrpmbct);
  4481. }
  4482. }
  4483. if (process_chunk_drop(stcb, &desc, net, pktdrp_flags)) {
  4484. SCTP_STAT_INCR(sctps_pdrppdbrk);
  4485. break;
  4486. }
  4487. next_chunk:
  4488. offset += SCTP_SIZE32(chk_len);
  4489. }
  4490. /* Now update any rwnd --- possibly */
  4491. if ((pktdrp_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
  4492. /* From a peer, we get a rwnd report */
  4493. uint32_t a_rwnd;
  4494. SCTP_STAT_INCR(sctps_pdrpfehos);
  4495. bottle_bw = ntohl(cp->bottle_bw);
  4496. on_queue = ntohl(cp->current_onq);
  4497. if (bottle_bw && on_queue) {
  4498. /* a rwnd report is in here */
  4499. if (bottle_bw > on_queue)
  4500. a_rwnd = bottle_bw - on_queue;
  4501. else
  4502. a_rwnd = 0;
  4503. if (a_rwnd == 0)
  4504. stcb->asoc.peers_rwnd = 0;
  4505. else {
  4506. if (a_rwnd > stcb->asoc.total_flight) {
  4507. stcb->asoc.peers_rwnd =
  4508. a_rwnd - stcb->asoc.total_flight;
  4509. } else {
  4510. stcb->asoc.peers_rwnd = 0;
  4511. }
  4512. if (stcb->asoc.peers_rwnd <
  4513. stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
  4514. /* SWS sender side engages */
  4515. stcb->asoc.peers_rwnd = 0;
  4516. }
  4517. }
  4518. }
  4519. } else {
  4520. SCTP_STAT_INCR(sctps_pdrpfmbox);
  4521. }
  4522. /* now middle boxes in sat networks get a cwnd bump */
  4523. if ((pktdrp_flags & SCTP_FROM_MIDDLE_BOX) &&
  4524. (stcb->asoc.sat_t3_loss_recovery == 0) &&
  4525. (stcb->asoc.sat_network)) {
  4526. /*
  4527. * This is debatable but for sat networks it makes sense
  4528. * Note if a T3 timer has went off, we will prohibit any
  4529. * changes to cwnd until we exit the t3 loss recovery.
  4530. */
  4531. stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
  4532. net, cp, &bottle_bw, &on_queue);
  4533. }
  4534. }
  4535. /*
  4536. * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
  4537. * still contain IP/SCTP header - stcb: is the tcb found for this packet -
  4538. * offset: offset into the mbuf chain to first chunkhdr - length: is the
  4539. * length of the complete packet outputs: - length: modified to remaining
  4540. * length after control processing - netp: modified to new sctp_nets after
  4541. * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
  4542. * bad packet,...) otherwise return the tcb for this packet
  4543. */
  4544. #ifdef __GNUC__
  4545. __attribute__ ((noinline))
  4546. #endif
  4547. static struct sctp_tcb *
  4548. sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
  4549. struct sockaddr *src, struct sockaddr *dst,
  4550. struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
  4551. struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
  4552. #if defined(__FreeBSD__) && !defined(__Userspace__)
  4553. uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
  4554. #endif
  4555. uint32_t vrf_id, uint16_t port)
  4556. {
  4557. struct sctp_association *asoc;
  4558. struct mbuf *op_err;
  4559. char msg[SCTP_DIAG_INFO_LEN];
  4560. uint32_t vtag_in;
  4561. int num_chunks = 0; /* number of control chunks processed */
  4562. uint32_t chk_length, contiguous;
  4563. int ret;
  4564. int abort_no_unlock = 0;
  4565. int ecne_seen = 0;
  4566. int abort_flag;
  4567. /*
  4568. * How big should this be, and should it be alloc'd? Lets try the
  4569. * d-mtu-ceiling for now (2k) and that should hopefully work ...
  4570. * until we get into jumbo grams and such..
  4571. */
  4572. uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
  4573. int got_auth = 0;
  4574. uint32_t auth_offset = 0, auth_len = 0;
  4575. int auth_skipped = 0;
  4576. int asconf_cnt = 0;
  4577. #if defined(__APPLE__) && !defined(__Userspace__)
  4578. struct socket *so;
  4579. #endif
  4580. SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
  4581. iphlen, *offset, length, (void *)stcb);
  4582. if (stcb) {
  4583. SCTP_TCB_LOCK_ASSERT(stcb);
  4584. }
  4585. /* validate chunk header length... */
  4586. if (ntohs(ch->chunk_length) < sizeof(*ch)) {
  4587. SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
  4588. ntohs(ch->chunk_length));
  4589. *offset = length;
  4590. return (stcb);
  4591. }
  4592. /*
  4593. * validate the verification tag
  4594. */
  4595. vtag_in = ntohl(sh->v_tag);
  4596. if (ch->chunk_type == SCTP_INITIATION) {
  4597. SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
  4598. ntohs(ch->chunk_length), vtag_in);
  4599. if (vtag_in != 0) {
  4600. /* protocol error- silently discard... */
  4601. SCTP_STAT_INCR(sctps_badvtag);
  4602. if (stcb != NULL) {
  4603. SCTP_TCB_UNLOCK(stcb);
  4604. }
  4605. return (NULL);
  4606. }
  4607. } else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
  4608. /*
  4609. * If there is no stcb, skip the AUTH chunk and process
  4610. * later after a stcb is found (to validate the lookup was
  4611. * valid.
  4612. */
  4613. if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
  4614. (stcb == NULL) &&
  4615. (inp->auth_supported == 1)) {
  4616. /* save this chunk for later processing */
  4617. auth_skipped = 1;
  4618. auth_offset = *offset;
  4619. auth_len = ntohs(ch->chunk_length);
  4620. /* (temporarily) move past this chunk */
  4621. *offset += SCTP_SIZE32(auth_len);
  4622. if (*offset >= length) {
  4623. /* no more data left in the mbuf chain */
  4624. *offset = length;
  4625. return (NULL);
  4626. }
  4627. ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
  4628. sizeof(struct sctp_chunkhdr), chunk_buf);
  4629. }
  4630. if (ch == NULL) {
  4631. /* Help */
  4632. *offset = length;
  4633. return (stcb);
  4634. }
  4635. if (ch->chunk_type == SCTP_COOKIE_ECHO) {
  4636. goto process_control_chunks;
  4637. }
  4638. /*
  4639. * first check if it's an ASCONF with an unknown src addr we
  4640. * need to look inside to find the association
  4641. */
  4642. if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
  4643. struct sctp_chunkhdr *asconf_ch = ch;
  4644. uint32_t asconf_offset = 0, asconf_len = 0;
  4645. /* inp's refcount may be reduced */
  4646. SCTP_INP_INCR_REF(inp);
  4647. asconf_offset = *offset;
  4648. do {
  4649. asconf_len = ntohs(asconf_ch->chunk_length);
  4650. if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
  4651. break;
  4652. stcb = sctp_findassociation_ep_asconf(m,
  4653. *offset,
  4654. dst,
  4655. sh, &inp, netp, vrf_id);
  4656. if (stcb != NULL)
  4657. break;
  4658. asconf_offset += SCTP_SIZE32(asconf_len);
  4659. asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
  4660. sizeof(struct sctp_chunkhdr), chunk_buf);
  4661. } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
  4662. if (stcb == NULL) {
  4663. /*
  4664. * reduce inp's refcount if not reduced in
  4665. * sctp_findassociation_ep_asconf().
  4666. */
  4667. SCTP_INP_DECR_REF(inp);
  4668. }
  4669. /* now go back and verify any auth chunk to be sure */
  4670. if (auth_skipped && (stcb != NULL)) {
  4671. struct sctp_auth_chunk *auth;
  4672. if (auth_len <= SCTP_CHUNK_BUFFER_SIZE) {
  4673. auth = (struct sctp_auth_chunk *)sctp_m_getptr(m, auth_offset, auth_len, chunk_buf);
  4674. got_auth = 1;
  4675. auth_skipped = 0;
  4676. } else {
  4677. auth = NULL;
  4678. }
  4679. if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
  4680. auth_offset)) {
  4681. /* auth HMAC failed so dump it */
  4682. *offset = length;
  4683. return (stcb);
  4684. } else {
  4685. /* remaining chunks are HMAC checked */
  4686. stcb->asoc.authenticated = 1;
  4687. }
  4688. }
  4689. }
  4690. if (stcb == NULL) {
  4691. SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
  4692. op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
  4693. msg);
  4694. /* no association, so it's out of the blue... */
  4695. sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err,
  4696. #if defined(__FreeBSD__) && !defined(__Userspace__)
  4697. mflowtype, mflowid, inp->fibnum,
  4698. #endif
  4699. vrf_id, port);
  4700. *offset = length;
  4701. return (NULL);
  4702. }
  4703. asoc = &stcb->asoc;
  4704. /* ABORT and SHUTDOWN can use either v_tag... */
  4705. if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
  4706. (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
  4707. (ch->chunk_type == SCTP_PACKET_DROPPED)) {
  4708. /* Take the T-bit always into account. */
  4709. if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) &&
  4710. (vtag_in == asoc->my_vtag)) ||
  4711. (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) &&
  4712. (asoc->peer_vtag != htonl(0)) &&
  4713. (vtag_in == asoc->peer_vtag))) {
  4714. /* this is valid */
  4715. } else {
  4716. /* drop this packet... */
  4717. SCTP_STAT_INCR(sctps_badvtag);
  4718. if (stcb != NULL) {
  4719. SCTP_TCB_UNLOCK(stcb);
  4720. }
  4721. return (NULL);
  4722. }
  4723. } else {
  4724. /* for all other chunks, vtag must match */
  4725. if (vtag_in != asoc->my_vtag) {
  4726. /* invalid vtag... */
  4727. SCTPDBG(SCTP_DEBUG_INPUT3,
  4728. "invalid vtag: %xh, expect %xh\n",
  4729. vtag_in, asoc->my_vtag);
  4730. SCTP_STAT_INCR(sctps_badvtag);
  4731. if (stcb != NULL) {
  4732. SCTP_TCB_UNLOCK(stcb);
  4733. }
  4734. *offset = length;
  4735. return (NULL);
  4736. }
  4737. }
  4738. } /* end if !SCTP_COOKIE_ECHO */
  4739. /*
  4740. * process all control chunks...
  4741. */
  4742. if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
  4743. (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
  4744. (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
  4745. (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
  4746. /* implied cookie-ack.. we must have lost the ack */
  4747. sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
  4748. *netp);
  4749. }
  4750. process_control_chunks:
  4751. while (IS_SCTP_CONTROL(ch)) {
  4752. /* validate chunk length */
  4753. chk_length = ntohs(ch->chunk_length);
  4754. SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
  4755. ch->chunk_type, chk_length);
  4756. SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
  4757. if (chk_length < sizeof(*ch) ||
  4758. (*offset + (int)chk_length) > length) {
  4759. *offset = length;
  4760. return (stcb);
  4761. }
  4762. SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
  4763. /*
  4764. * INIT and INIT-ACK only gets the init ack "header" portion
  4765. * only because we don't have to process the peer's COOKIE.
  4766. * All others get a complete chunk.
  4767. */
  4768. switch (ch->chunk_type) {
  4769. case SCTP_INITIATION:
  4770. contiguous = sizeof(struct sctp_init_chunk);
  4771. break;
  4772. case SCTP_INITIATION_ACK:
  4773. contiguous = sizeof(struct sctp_init_ack_chunk);
  4774. break;
  4775. default:
  4776. contiguous = min(chk_length, sizeof(chunk_buf));
  4777. break;
  4778. }
  4779. ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
  4780. contiguous,
  4781. chunk_buf);
  4782. if (ch == NULL) {
  4783. *offset = length;
  4784. return (stcb);
  4785. }
  4786. num_chunks++;
  4787. /* Save off the last place we got a control from */
  4788. if (stcb != NULL) {
  4789. if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
  4790. /*
  4791. * allow last_control to be NULL if
  4792. * ASCONF... ASCONF processing will find the
  4793. * right net later
  4794. */
  4795. if ((netp != NULL) && (*netp != NULL))
  4796. stcb->asoc.last_control_chunk_from = *netp;
  4797. }
  4798. }
  4799. #ifdef SCTP_AUDITING_ENABLED
  4800. sctp_audit_log(0xB0, ch->chunk_type);
  4801. #endif
  4802. /* check to see if this chunk required auth, but isn't */
  4803. if ((stcb != NULL) &&
  4804. sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
  4805. !stcb->asoc.authenticated) {
  4806. /* "silently" ignore */
  4807. SCTP_STAT_INCR(sctps_recvauthmissing);
  4808. goto next_chunk;
  4809. }
  4810. switch (ch->chunk_type) {
  4811. case SCTP_INITIATION:
  4812. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
  4813. /* The INIT chunk must be the only chunk. */
  4814. if ((num_chunks > 1) ||
  4815. (length - *offset > (int)SCTP_SIZE32(chk_length))) {
  4816. /*
  4817. * RFC 4960bis requires stopping the
  4818. * processing of the packet.
  4819. */
  4820. *offset = length;
  4821. return (stcb);
  4822. }
  4823. /* Honor our resource limit. */
  4824. if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) {
  4825. op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
  4826. sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
  4827. #if defined(__FreeBSD__) && !defined(__Userspace__)
  4828. mflowtype, mflowid, inp->fibnum,
  4829. #endif
  4830. vrf_id, port);
  4831. *offset = length;
  4832. if (stcb != NULL) {
  4833. SCTP_TCB_UNLOCK(stcb);
  4834. }
  4835. return (NULL);
  4836. }
  4837. sctp_handle_init(m, iphlen, *offset, src, dst, sh,
  4838. (struct sctp_init_chunk *)ch, inp,
  4839. stcb, *netp,
  4840. #if defined(__FreeBSD__) && !defined(__Userspace__)
  4841. mflowtype, mflowid,
  4842. #endif
  4843. vrf_id, port);
  4844. *offset = length;
  4845. if (stcb != NULL) {
  4846. SCTP_TCB_UNLOCK(stcb);
  4847. }
  4848. return (NULL);
  4849. break;
  4850. case SCTP_PAD_CHUNK:
  4851. break;
  4852. case SCTP_INITIATION_ACK:
  4853. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT_ACK\n");
  4854. if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  4855. /* We are not interested anymore */
  4856. if ((stcb != NULL) && (stcb->asoc.total_output_queue_size)) {
  4857. ;
  4858. } else {
  4859. *offset = length;
  4860. if (stcb != NULL) {
  4861. #if defined(__APPLE__) && !defined(__Userspace__)
  4862. so = SCTP_INP_SO(inp);
  4863. atomic_add_int(&stcb->asoc.refcnt, 1);
  4864. SCTP_TCB_UNLOCK(stcb);
  4865. SCTP_SOCKET_LOCK(so, 1);
  4866. SCTP_TCB_LOCK(stcb);
  4867. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4868. #endif
  4869. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
  4870. SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
  4871. #if defined(__APPLE__) && !defined(__Userspace__)
  4872. SCTP_SOCKET_UNLOCK(so, 1);
  4873. #endif
  4874. }
  4875. return (NULL);
  4876. }
  4877. }
  4878. /* The INIT-ACK chunk must be the only chunk. */
  4879. if ((num_chunks > 1) ||
  4880. (length - *offset > (int)SCTP_SIZE32(chk_length))) {
  4881. *offset = length;
  4882. return (stcb);
  4883. }
  4884. if ((netp != NULL) && (*netp != NULL)) {
  4885. ret = sctp_handle_init_ack(m, iphlen, *offset,
  4886. src, dst, sh,
  4887. (struct sctp_init_ack_chunk *)ch,
  4888. stcb, *netp,
  4889. &abort_no_unlock,
  4890. #if defined(__FreeBSD__) && !defined(__Userspace__)
  4891. mflowtype, mflowid,
  4892. #endif
  4893. vrf_id);
  4894. } else {
  4895. ret = -1;
  4896. }
  4897. *offset = length;
  4898. if (abort_no_unlock) {
  4899. return (NULL);
  4900. }
  4901. /*
  4902. * Special case, I must call the output routine to
  4903. * get the cookie echoed
  4904. */
  4905. if ((stcb != NULL) && (ret == 0)) {
  4906. sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
  4907. }
  4908. return (stcb);
  4909. break;
  4910. case SCTP_SELECTIVE_ACK:
  4911. case SCTP_NR_SELECTIVE_ACK:
  4912. {
  4913. int abort_now = 0;
  4914. uint32_t a_rwnd, cum_ack;
  4915. uint16_t num_seg, num_nr_seg, num_dup;
  4916. uint8_t flags;
  4917. int offset_seg, offset_dup;
  4918. SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n",
  4919. ch->chunk_type == SCTP_SELECTIVE_ACK ? "SCTP_SACK" : "SCTP_NR_SACK");
  4920. SCTP_STAT_INCR(sctps_recvsacks);
  4921. if (stcb == NULL) {
  4922. SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing %s chunk\n",
  4923. (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK");
  4924. break;
  4925. }
  4926. if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
  4927. if (chk_length < sizeof(struct sctp_sack_chunk)) {
  4928. SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
  4929. break;
  4930. }
  4931. } else {
  4932. if (stcb->asoc.nrsack_supported == 0) {
  4933. goto unknown_chunk;
  4934. }
  4935. if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
  4936. SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR_SACK chunk, too small\n");
  4937. break;
  4938. }
  4939. }
  4940. if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
  4941. /*-
  4942. * If we have sent a shutdown-ack, we will pay no
  4943. * attention to a sack sent in to us since
  4944. * we don't care anymore.
  4945. */
  4946. break;
  4947. }
  4948. flags = ch->chunk_flags;
  4949. if (ch->chunk_type == SCTP_SELECTIVE_ACK) {
  4950. struct sctp_sack_chunk *sack;
  4951. sack = (struct sctp_sack_chunk *)ch;
  4952. cum_ack = ntohl(sack->sack.cum_tsn_ack);
  4953. num_seg = ntohs(sack->sack.num_gap_ack_blks);
  4954. num_nr_seg = 0;
  4955. num_dup = ntohs(sack->sack.num_dup_tsns);
  4956. a_rwnd = ntohl(sack->sack.a_rwnd);
  4957. if (sizeof(struct sctp_sack_chunk) +
  4958. num_seg * sizeof(struct sctp_gap_ack_block) +
  4959. num_dup * sizeof(uint32_t) != chk_length) {
  4960. SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
  4961. break;
  4962. }
  4963. offset_seg = *offset + sizeof(struct sctp_sack_chunk);
  4964. offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
  4965. } else {
  4966. struct sctp_nr_sack_chunk *nr_sack;
  4967. nr_sack = (struct sctp_nr_sack_chunk *)ch;
  4968. cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
  4969. num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
  4970. num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
  4971. num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
  4972. a_rwnd = ntohl(nr_sack->nr_sack.a_rwnd);
  4973. if (sizeof(struct sctp_nr_sack_chunk) +
  4974. (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
  4975. num_dup * sizeof(uint32_t) != chk_length) {
  4976. SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
  4977. break;
  4978. }
  4979. offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
  4980. offset_dup = offset_seg + (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block);
  4981. }
  4982. SCTPDBG(SCTP_DEBUG_INPUT3, "%s process cum_ack:%x num_seg:%d a_rwnd:%d\n",
  4983. (ch->chunk_type == SCTP_SELECTIVE_ACK) ? "SCTP_SACK" : "SCTP_NR_SACK",
  4984. cum_ack, num_seg, a_rwnd);
  4985. stcb->asoc.seen_a_sack_this_pkt = 1;
  4986. if ((stcb->asoc.pr_sctp_cnt == 0) &&
  4987. (num_seg == 0) && (num_nr_seg == 0) &&
  4988. SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
  4989. (stcb->asoc.saw_sack_with_frags == 0) &&
  4990. (stcb->asoc.saw_sack_with_nr_frags == 0) &&
  4991. (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
  4992. /*
  4993. * We have a SIMPLE sack having no
  4994. * prior segments and data on sent
  4995. * queue to be acked. Use the
  4996. * faster path sack processing. We
  4997. * also allow window update sacks
  4998. * with no missing segments to go
  4999. * this way too.
  5000. */
  5001. sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
  5002. &abort_now, ecne_seen);
  5003. } else {
  5004. if ((netp != NULL) && (*netp != NULL)) {
  5005. sctp_handle_sack(m, offset_seg, offset_dup, stcb,
  5006. num_seg, num_nr_seg, num_dup, &abort_now, flags,
  5007. cum_ack, a_rwnd, ecne_seen);
  5008. }
  5009. }
  5010. if (abort_now) {
  5011. /* ABORT signal from sack processing */
  5012. *offset = length;
  5013. return (NULL);
  5014. }
  5015. if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
  5016. TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
  5017. (stcb->asoc.stream_queue_cnt == 0)) {
  5018. sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
  5019. }
  5020. break;
  5021. }
  5022. case SCTP_HEARTBEAT_REQUEST:
  5023. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
  5024. if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
  5025. SCTP_STAT_INCR(sctps_recvheartbeat);
  5026. sctp_send_heartbeat_ack(stcb, m, *offset,
  5027. chk_length, *netp);
  5028. }
  5029. break;
  5030. case SCTP_HEARTBEAT_ACK:
  5031. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT_ACK\n");
  5032. if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
  5033. /* Its not ours */
  5034. break;
  5035. }
  5036. SCTP_STAT_INCR(sctps_recvheartbeatack);
  5037. if ((netp != NULL) && (*netp != NULL)) {
  5038. sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
  5039. stcb, *netp);
  5040. }
  5041. break;
  5042. case SCTP_ABORT_ASSOCIATION:
  5043. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
  5044. (void *)stcb);
  5045. *offset = length;
  5046. if ((stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
  5047. if (sctp_handle_abort((struct sctp_abort_chunk *)ch, stcb, *netp)) {
  5048. return (NULL);
  5049. } else {
  5050. return (stcb);
  5051. }
  5052. } else {
  5053. return (NULL);
  5054. }
  5055. break;
  5056. case SCTP_SHUTDOWN:
  5057. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
  5058. (void *)stcb);
  5059. if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
  5060. break;
  5061. }
  5062. if ((netp != NULL) && (*netp != NULL)) {
  5063. abort_flag = 0;
  5064. sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
  5065. stcb, *netp, &abort_flag);
  5066. if (abort_flag) {
  5067. *offset = length;
  5068. return (NULL);
  5069. }
  5070. }
  5071. break;
  5072. case SCTP_SHUTDOWN_ACK:
  5073. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_ACK, stcb %p\n", (void *)stcb);
  5074. if ((chk_length == sizeof(struct sctp_shutdown_ack_chunk)) &&
  5075. (stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
  5076. sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
  5077. *offset = length;
  5078. return (NULL);
  5079. }
  5080. break;
  5081. case SCTP_OPERATION_ERROR:
  5082. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP_ERR\n");
  5083. if ((stcb != NULL) && (netp != NULL) && (*netp != NULL) &&
  5084. sctp_handle_error(ch, stcb, *netp, contiguous) < 0) {
  5085. *offset = length;
  5086. return (NULL);
  5087. }
  5088. break;
  5089. case SCTP_COOKIE_ECHO:
  5090. SCTPDBG(SCTP_DEBUG_INPUT3,
  5091. "SCTP_COOKIE_ECHO, stcb %p\n", (void *)stcb);
  5092. if ((stcb != NULL) && (stcb->asoc.total_output_queue_size > 0)) {
  5093. ;
  5094. } else {
  5095. if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  5096. /* We are not interested anymore */
  5097. abend:
  5098. if (stcb != NULL) {
  5099. SCTP_TCB_UNLOCK(stcb);
  5100. }
  5101. *offset = length;
  5102. return (NULL);
  5103. }
  5104. }
  5105. /*-
  5106. * First are we accepting? We do this again here
  5107. * since it is possible that a previous endpoint WAS
  5108. * listening responded to a INIT-ACK and then
  5109. * closed. We opened and bound.. and are now no
  5110. * longer listening.
  5111. *
  5112. * XXXGL: notes on checking listen queue length.
  5113. * 1) SCTP_IS_LISTENING() doesn't necessarily mean
  5114. * SOLISTENING(), because a listening "UDP type"
  5115. * socket isn't listening in terms of the socket
  5116. * layer. It is a normal data flow socket, that
  5117. * can fork off new connections. Thus, we should
  5118. * look into sol_qlen only in case we are !UDP.
  5119. * 2) Checking sol_qlen in general requires locking
  5120. * the socket, and this code lacks that.
  5121. */
  5122. if ((stcb == NULL) &&
  5123. (!SCTP_IS_LISTENING(inp) ||
  5124. (((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) == 0) &&
  5125. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5126. inp->sctp_socket->sol_qlen >= inp->sctp_socket->sol_qlimit))) {
  5127. #else
  5128. inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit))) {
  5129. #endif
  5130. if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
  5131. (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
  5132. op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
  5133. sctp_abort_association(inp, stcb, m, iphlen,
  5134. src, dst, sh, op_err,
  5135. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5136. mflowtype, mflowid,
  5137. #endif
  5138. vrf_id, port);
  5139. }
  5140. *offset = length;
  5141. return (NULL);
  5142. } else {
  5143. struct mbuf *ret_buf;
  5144. struct sctp_inpcb *linp;
  5145. struct sctp_tmit_chunk *chk;
  5146. if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE |
  5147. SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
  5148. goto abend;
  5149. }
  5150. if (stcb) {
  5151. linp = NULL;
  5152. } else {
  5153. linp = inp;
  5154. }
  5155. if (linp != NULL) {
  5156. SCTP_ASOC_CREATE_LOCK(linp);
  5157. }
  5158. if (netp != NULL) {
  5159. struct sctp_tcb *locked_stcb;
  5160. locked_stcb = stcb;
  5161. ret_buf =
  5162. sctp_handle_cookie_echo(m, iphlen,
  5163. *offset,
  5164. src, dst,
  5165. sh,
  5166. (struct sctp_cookie_echo_chunk *)ch,
  5167. &inp, &stcb, netp,
  5168. auth_skipped,
  5169. auth_offset,
  5170. auth_len,
  5171. &locked_stcb,
  5172. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5173. mflowtype,
  5174. mflowid,
  5175. #endif
  5176. vrf_id,
  5177. port);
  5178. if ((locked_stcb != NULL) && (locked_stcb != stcb)) {
  5179. SCTP_TCB_UNLOCK(locked_stcb);
  5180. }
  5181. if (stcb != NULL) {
  5182. SCTP_TCB_LOCK_ASSERT(stcb);
  5183. }
  5184. } else {
  5185. ret_buf = NULL;
  5186. }
  5187. if (linp != NULL) {
  5188. SCTP_ASOC_CREATE_UNLOCK(linp);
  5189. }
  5190. if (ret_buf == NULL) {
  5191. if (stcb != NULL) {
  5192. SCTP_TCB_UNLOCK(stcb);
  5193. }
  5194. SCTPDBG(SCTP_DEBUG_INPUT3,
  5195. "GAK, null buffer\n");
  5196. *offset = length;
  5197. return (NULL);
  5198. }
  5199. /* if AUTH skipped, see if it verified... */
  5200. if (auth_skipped) {
  5201. got_auth = 1;
  5202. auth_skipped = 0;
  5203. }
  5204. /* Restart the timer if we have pending data */
  5205. TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
  5206. if (chk->whoTo != NULL) {
  5207. break;
  5208. }
  5209. }
  5210. if (chk != NULL) {
  5211. sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
  5212. }
  5213. }
  5214. break;
  5215. case SCTP_COOKIE_ACK:
  5216. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE_ACK, stcb %p\n", (void *)stcb);
  5217. if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
  5218. break;
  5219. }
  5220. if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  5221. /* We are not interested anymore */
  5222. if ((stcb) && (stcb->asoc.total_output_queue_size)) {
  5223. ;
  5224. } else if (stcb) {
  5225. #if defined(__APPLE__) && !defined(__Userspace__)
  5226. so = SCTP_INP_SO(inp);
  5227. atomic_add_int(&stcb->asoc.refcnt, 1);
  5228. SCTP_TCB_UNLOCK(stcb);
  5229. SCTP_SOCKET_LOCK(so, 1);
  5230. SCTP_TCB_LOCK(stcb);
  5231. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  5232. #endif
  5233. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
  5234. SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
  5235. #if defined(__APPLE__) && !defined(__Userspace__)
  5236. SCTP_SOCKET_UNLOCK(so, 1);
  5237. #endif
  5238. *offset = length;
  5239. return (NULL);
  5240. }
  5241. }
  5242. if ((netp != NULL) && (*netp != NULL)) {
  5243. sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
  5244. }
  5245. break;
  5246. case SCTP_ECN_ECHO:
  5247. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_ECHO\n");
  5248. if (stcb == NULL) {
  5249. break;
  5250. }
  5251. if (stcb->asoc.ecn_supported == 0) {
  5252. goto unknown_chunk;
  5253. }
  5254. if ((chk_length != sizeof(struct sctp_ecne_chunk)) &&
  5255. (chk_length != sizeof(struct old_sctp_ecne_chunk))) {
  5256. break;
  5257. }
  5258. sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, stcb);
  5259. ecne_seen = 1;
  5260. break;
  5261. case SCTP_ECN_CWR:
  5262. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN_CWR\n");
  5263. if (stcb == NULL) {
  5264. break;
  5265. }
  5266. if (stcb->asoc.ecn_supported == 0) {
  5267. goto unknown_chunk;
  5268. }
  5269. if (chk_length != sizeof(struct sctp_cwr_chunk)) {
  5270. break;
  5271. }
  5272. sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
  5273. break;
  5274. case SCTP_SHUTDOWN_COMPLETE:
  5275. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN_COMPLETE, stcb %p\n", (void *)stcb);
  5276. /* must be first and only chunk */
  5277. if ((num_chunks > 1) ||
  5278. (length - *offset > (int)SCTP_SIZE32(chk_length))) {
  5279. *offset = length;
  5280. return (stcb);
  5281. }
  5282. if ((chk_length == sizeof(struct sctp_shutdown_complete_chunk)) &&
  5283. (stcb != NULL) && (netp != NULL) && (*netp != NULL)) {
  5284. sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
  5285. stcb, *netp);
  5286. *offset = length;
  5287. return (NULL);
  5288. }
  5289. break;
  5290. case SCTP_ASCONF:
  5291. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
  5292. if (stcb != NULL) {
  5293. if (stcb->asoc.asconf_supported == 0) {
  5294. goto unknown_chunk;
  5295. }
  5296. sctp_handle_asconf(m, *offset, src,
  5297. (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
  5298. asconf_cnt++;
  5299. }
  5300. break;
  5301. case SCTP_ASCONF_ACK:
  5302. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF_ACK\n");
  5303. if (stcb == NULL) {
  5304. break;
  5305. }
  5306. if (stcb->asoc.asconf_supported == 0) {
  5307. goto unknown_chunk;
  5308. }
  5309. if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
  5310. break;
  5311. }
  5312. if ((netp != NULL) && (*netp != NULL)) {
  5313. /* He's alive so give him credit */
  5314. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
  5315. sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
  5316. stcb->asoc.overall_error_count,
  5317. 0,
  5318. SCTP_FROM_SCTP_INPUT,
  5319. __LINE__);
  5320. }
  5321. stcb->asoc.overall_error_count = 0;
  5322. sctp_handle_asconf_ack(m, *offset,
  5323. (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
  5324. if (abort_no_unlock)
  5325. return (NULL);
  5326. }
  5327. break;
  5328. case SCTP_FORWARD_CUM_TSN:
  5329. case SCTP_IFORWARD_CUM_TSN:
  5330. SCTPDBG(SCTP_DEBUG_INPUT3, "%s\n",
  5331. ch->chunk_type == SCTP_FORWARD_CUM_TSN ? "FORWARD_TSN" : "I_FORWARD_TSN");
  5332. if (stcb == NULL) {
  5333. break;
  5334. }
  5335. if (stcb->asoc.prsctp_supported == 0) {
  5336. goto unknown_chunk;
  5337. }
  5338. if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
  5339. break;
  5340. }
  5341. if (((stcb->asoc.idata_supported == 1) && (ch->chunk_type == SCTP_FORWARD_CUM_TSN)) ||
  5342. ((stcb->asoc.idata_supported == 0) && (ch->chunk_type == SCTP_IFORWARD_CUM_TSN))) {
  5343. if (ch->chunk_type == SCTP_FORWARD_CUM_TSN) {
  5344. SCTP_SNPRINTF(msg, sizeof(msg), "%s", "FORWARD-TSN chunk received when I-FORWARD-TSN was negotiated");
  5345. } else {
  5346. SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-FORWARD-TSN chunk received when FORWARD-TSN was negotiated");
  5347. }
  5348. op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
  5349. sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
  5350. *offset = length;
  5351. return (NULL);
  5352. }
  5353. *fwd_tsn_seen = 1;
  5354. if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  5355. /* We are not interested anymore */
  5356. #if defined(__APPLE__) && !defined(__Userspace__)
  5357. so = SCTP_INP_SO(inp);
  5358. atomic_add_int(&stcb->asoc.refcnt, 1);
  5359. SCTP_TCB_UNLOCK(stcb);
  5360. SCTP_SOCKET_LOCK(so, 1);
  5361. SCTP_TCB_LOCK(stcb);
  5362. atomic_subtract_int(&stcb->asoc.refcnt, 1);
  5363. #endif
  5364. (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
  5365. SCTP_FROM_SCTP_INPUT + SCTP_LOC_31);
  5366. #if defined(__APPLE__) && !defined(__Userspace__)
  5367. SCTP_SOCKET_UNLOCK(so, 1);
  5368. #endif
  5369. *offset = length;
  5370. return (NULL);
  5371. }
  5372. /*
  5373. * For sending a SACK this looks like DATA
  5374. * chunks.
  5375. */
  5376. stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from;
  5377. abort_flag = 0;
  5378. sctp_handle_forward_tsn(stcb,
  5379. (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
  5380. if (abort_flag) {
  5381. *offset = length;
  5382. return (NULL);
  5383. }
  5384. break;
  5385. case SCTP_STREAM_RESET:
  5386. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
  5387. if (stcb == NULL) {
  5388. break;
  5389. }
  5390. if (stcb->asoc.reconfig_supported == 0) {
  5391. goto unknown_chunk;
  5392. }
  5393. if (chk_length < sizeof(struct sctp_stream_reset_tsn_req)) {
  5394. break;
  5395. }
  5396. if (sctp_handle_stream_reset(stcb, m, *offset, ch)) {
  5397. /* stop processing */
  5398. *offset = length;
  5399. return (NULL);
  5400. }
  5401. break;
  5402. case SCTP_PACKET_DROPPED:
  5403. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
  5404. if (stcb == NULL) {
  5405. break;
  5406. }
  5407. if (stcb->asoc.pktdrop_supported == 0) {
  5408. goto unknown_chunk;
  5409. }
  5410. if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
  5411. break;
  5412. }
  5413. if ((netp != NULL) && (*netp != NULL)) {
  5414. sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
  5415. stcb, *netp,
  5416. min(chk_length, contiguous));
  5417. }
  5418. break;
  5419. case SCTP_AUTHENTICATION:
  5420. SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
  5421. if (stcb == NULL) {
  5422. /* save the first AUTH for later processing */
  5423. if (auth_skipped == 0) {
  5424. auth_offset = *offset;
  5425. auth_len = chk_length;
  5426. auth_skipped = 1;
  5427. }
  5428. /* skip this chunk (temporarily) */
  5429. break;
  5430. }
  5431. if (stcb->asoc.auth_supported == 0) {
  5432. goto unknown_chunk;
  5433. }
  5434. if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
  5435. (chk_length > (sizeof(struct sctp_auth_chunk) +
  5436. SCTP_AUTH_DIGEST_LEN_MAX))) {
  5437. /* Its not ours */
  5438. *offset = length;
  5439. return (stcb);
  5440. }
  5441. if (got_auth == 1) {
  5442. /* skip this chunk... it's already auth'd */
  5443. break;
  5444. }
  5445. got_auth = 1;
  5446. if (sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, m, *offset)) {
  5447. /* auth HMAC failed so dump the packet */
  5448. *offset = length;
  5449. return (stcb);
  5450. } else {
  5451. /* remaining chunks are HMAC checked */
  5452. stcb->asoc.authenticated = 1;
  5453. }
  5454. break;
  5455. default:
  5456. unknown_chunk:
  5457. /* it's an unknown chunk! */
  5458. if ((ch->chunk_type & 0x40) &&
  5459. (stcb != NULL) &&
  5460. (SCTP_GET_STATE(stcb) != SCTP_STATE_EMPTY) &&
  5461. (SCTP_GET_STATE(stcb) != SCTP_STATE_INUSE) &&
  5462. (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) {
  5463. struct sctp_gen_error_cause *cause;
  5464. int len;
  5465. op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
  5466. 0, M_NOWAIT, 1, MT_DATA);
  5467. if (op_err != NULL) {
  5468. len = min(SCTP_SIZE32(chk_length), (uint32_t)(length - *offset));
  5469. cause = mtod(op_err, struct sctp_gen_error_cause *);
  5470. cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
  5471. cause->length = htons((uint16_t)(len + sizeof(struct sctp_gen_error_cause)));
  5472. SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
  5473. SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT);
  5474. if (SCTP_BUF_NEXT(op_err) != NULL) {
  5475. #ifdef SCTP_MBUF_LOGGING
  5476. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
  5477. sctp_log_mbc(SCTP_BUF_NEXT(op_err), SCTP_MBUF_ICOPY);
  5478. }
  5479. #endif
  5480. sctp_queue_op_err(stcb, op_err);
  5481. } else {
  5482. sctp_m_freem(op_err);
  5483. }
  5484. }
  5485. }
  5486. if ((ch->chunk_type & 0x80) == 0) {
  5487. /* discard this packet */
  5488. *offset = length;
  5489. return (stcb);
  5490. } /* else skip this bad chunk and continue... */
  5491. break;
  5492. } /* switch (ch->chunk_type) */
  5493. next_chunk:
  5494. /* get the next chunk */
  5495. *offset += SCTP_SIZE32(chk_length);
  5496. if (*offset >= length) {
  5497. /* no more data left in the mbuf chain */
  5498. break;
  5499. }
  5500. ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
  5501. sizeof(struct sctp_chunkhdr), chunk_buf);
  5502. if (ch == NULL) {
  5503. *offset = length;
  5504. return (stcb);
  5505. }
  5506. } /* while */
  5507. if ((asconf_cnt > 0) && (stcb != NULL)) {
  5508. sctp_send_asconf_ack(stcb);
  5509. }
  5510. return (stcb);
  5511. }
  5512. /*
  5513. * common input chunk processing (v4 and v6)
  5514. */
  5515. void
  5516. sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length,
  5517. struct sockaddr *src, struct sockaddr *dst,
  5518. struct sctphdr *sh, struct sctp_chunkhdr *ch,
  5519. uint8_t compute_crc,
  5520. uint8_t ecn_bits,
  5521. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5522. uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
  5523. #endif
  5524. uint32_t vrf_id, uint16_t port)
  5525. {
  5526. uint32_t high_tsn;
  5527. int fwd_tsn_seen = 0, data_processed = 0;
  5528. struct mbuf *m = *mm, *op_err;
  5529. char msg[SCTP_DIAG_INFO_LEN];
  5530. int un_sent;
  5531. int cnt_ctrl_ready = 0;
  5532. struct sctp_inpcb *inp = NULL, *inp_decr = NULL;
  5533. struct sctp_tcb *stcb = NULL;
  5534. struct sctp_nets *net = NULL;
  5535. #if defined(__Userspace__)
  5536. struct socket *upcall_socket = NULL;
  5537. #endif
  5538. SCTP_STAT_INCR(sctps_recvdatagrams);
  5539. #ifdef SCTP_AUDITING_ENABLED
  5540. sctp_audit_log(0xE0, 1);
  5541. sctp_auditing(0, inp, stcb, net);
  5542. #endif
  5543. if (compute_crc != 0) {
  5544. uint32_t check, calc_check;
  5545. check = sh->checksum;
  5546. sh->checksum = 0;
  5547. calc_check = sctp_calculate_cksum(m, iphlen);
  5548. sh->checksum = check;
  5549. if (calc_check != check) {
  5550. SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
  5551. calc_check, check, (void *)m, length, iphlen);
  5552. stcb = sctp_findassociation_addr(m, offset, src, dst,
  5553. sh, ch, &inp, &net, vrf_id);
  5554. #if defined(INET) || defined(INET6)
  5555. if ((ch->chunk_type != SCTP_INITIATION) &&
  5556. (net != NULL) && (net->port != port)) {
  5557. if (net->port == 0) {
  5558. /* UDP encapsulation turned on. */
  5559. net->mtu -= sizeof(struct udphdr);
  5560. if (stcb->asoc.smallest_mtu > net->mtu) {
  5561. sctp_pathmtu_adjustment(stcb, net->mtu, true);
  5562. }
  5563. } else if (port == 0) {
  5564. /* UDP encapsulation turned off. */
  5565. net->mtu += sizeof(struct udphdr);
  5566. /* XXX Update smallest_mtu */
  5567. }
  5568. net->port = port;
  5569. }
  5570. #endif
  5571. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5572. if (net != NULL) {
  5573. net->flowtype = mflowtype;
  5574. net->flowid = mflowid;
  5575. }
  5576. SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
  5577. #endif
  5578. if ((inp != NULL) && (stcb != NULL)) {
  5579. sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1);
  5580. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
  5581. } else if ((inp != NULL) && (stcb == NULL)) {
  5582. inp_decr = inp;
  5583. }
  5584. SCTP_STAT_INCR(sctps_badsum);
  5585. SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
  5586. goto out;
  5587. }
  5588. }
  5589. /* Destination port of 0 is illegal, based on RFC4960. */
  5590. if (sh->dest_port == 0) {
  5591. SCTP_STAT_INCR(sctps_hdrops);
  5592. goto out;
  5593. }
  5594. stcb = sctp_findassociation_addr(m, offset, src, dst,
  5595. sh, ch, &inp, &net, vrf_id);
  5596. #if defined(INET) || defined(INET6)
  5597. if ((ch->chunk_type != SCTP_INITIATION) &&
  5598. (net != NULL) && (net->port != port)) {
  5599. if (net->port == 0) {
  5600. /* UDP encapsulation turned on. */
  5601. net->mtu -= sizeof(struct udphdr);
  5602. if (stcb->asoc.smallest_mtu > net->mtu) {
  5603. sctp_pathmtu_adjustment(stcb, net->mtu, true);
  5604. }
  5605. } else if (port == 0) {
  5606. /* UDP encapsulation turned off. */
  5607. net->mtu += sizeof(struct udphdr);
  5608. /* XXX Update smallest_mtu */
  5609. }
  5610. net->port = port;
  5611. }
  5612. #endif
  5613. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5614. if (net != NULL) {
  5615. net->flowtype = mflowtype;
  5616. net->flowid = mflowid;
  5617. }
  5618. #endif
  5619. if (inp == NULL) {
  5620. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5621. SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
  5622. #endif
  5623. SCTP_STAT_INCR(sctps_noport);
  5624. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5625. if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) {
  5626. goto out;
  5627. }
  5628. #endif
  5629. if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
  5630. SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
  5631. sctp_send_shutdown_complete2(src, dst, sh,
  5632. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5633. mflowtype, mflowid, fibnum,
  5634. #endif
  5635. vrf_id, port);
  5636. goto out;
  5637. }
  5638. if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
  5639. SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
  5640. goto out;
  5641. }
  5642. if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) {
  5643. if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
  5644. ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
  5645. (ch->chunk_type != SCTP_INIT))) {
  5646. op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
  5647. "Out of the blue");
  5648. sctp_send_abort(m, iphlen, src, dst,
  5649. sh, 0, op_err,
  5650. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5651. mflowtype, mflowid, fibnum,
  5652. #endif
  5653. vrf_id, port);
  5654. }
  5655. }
  5656. goto out;
  5657. } else if (stcb == NULL) {
  5658. inp_decr = inp;
  5659. }
  5660. SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
  5661. (void *)m, iphlen, offset, length, (void *)stcb);
  5662. if (stcb) {
  5663. /* always clear this before beginning a packet */
  5664. stcb->asoc.authenticated = 0;
  5665. stcb->asoc.seen_a_sack_this_pkt = 0;
  5666. SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
  5667. (void *)stcb, stcb->asoc.state);
  5668. if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
  5669. (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
  5670. /*-
  5671. * If we hit here, we had a ref count
  5672. * up when the assoc was aborted and the
  5673. * timer is clearing out the assoc, we should
  5674. * NOT respond to any packet.. its OOTB.
  5675. */
  5676. SCTP_TCB_UNLOCK(stcb);
  5677. stcb = NULL;
  5678. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5679. SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
  5680. #endif
  5681. SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
  5682. op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
  5683. msg);
  5684. sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
  5685. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5686. mflowtype, mflowid, inp->fibnum,
  5687. #endif
  5688. vrf_id, port);
  5689. goto out;
  5690. }
  5691. }
  5692. #if defined(__Userspace__)
  5693. if ((stcb != NULL) &&
  5694. ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
  5695. (stcb->sctp_socket != NULL)) {
  5696. if (stcb->sctp_socket->so_head != NULL) {
  5697. upcall_socket = stcb->sctp_socket->so_head;
  5698. } else {
  5699. upcall_socket = stcb->sctp_socket;
  5700. }
  5701. SOCK_LOCK(upcall_socket);
  5702. soref(upcall_socket);
  5703. SOCK_UNLOCK(upcall_socket);
  5704. }
  5705. #endif
  5706. if (IS_SCTP_CONTROL(ch)) {
  5707. /* process the control portion of the SCTP packet */
  5708. /* sa_ignore NO_NULL_CHK */
  5709. stcb = sctp_process_control(m, iphlen, &offset, length,
  5710. src, dst, sh, ch,
  5711. inp, stcb, &net, &fwd_tsn_seen,
  5712. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5713. mflowtype, mflowid, fibnum,
  5714. #endif
  5715. vrf_id, port);
  5716. if (stcb) {
  5717. /* This covers us if the cookie-echo was there
  5718. * and it changes our INP.
  5719. */
  5720. inp = stcb->sctp_ep;
  5721. #if defined(INET) || defined(INET6)
  5722. if ((ch->chunk_type != SCTP_INITIATION) &&
  5723. (net != NULL) && (net->port != port)) {
  5724. if (net->port == 0) {
  5725. /* UDP encapsulation turned on. */
  5726. net->mtu -= sizeof(struct udphdr);
  5727. if (stcb->asoc.smallest_mtu > net->mtu) {
  5728. sctp_pathmtu_adjustment(stcb, net->mtu, true);
  5729. }
  5730. } else if (port == 0) {
  5731. /* UDP encapsulation turned off. */
  5732. net->mtu += sizeof(struct udphdr);
  5733. /* XXX Update smallest_mtu */
  5734. }
  5735. net->port = port;
  5736. }
  5737. #endif
  5738. }
  5739. } else {
  5740. /*
  5741. * no control chunks, so pre-process DATA chunks (these
  5742. * checks are taken care of by control processing)
  5743. */
  5744. /*
  5745. * if DATA only packet, and auth is required, then punt...
  5746. * can't have authenticated without any AUTH (control)
  5747. * chunks
  5748. */
  5749. if ((stcb != NULL) &&
  5750. sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
  5751. /* "silently" ignore */
  5752. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5753. SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
  5754. #endif
  5755. SCTP_STAT_INCR(sctps_recvauthmissing);
  5756. goto out;
  5757. }
  5758. if (stcb == NULL) {
  5759. /* out of the blue DATA chunk */
  5760. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5761. SCTP_PROBE5(receive, NULL, NULL, m, NULL, sh);
  5762. #endif
  5763. SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
  5764. op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
  5765. msg);
  5766. sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
  5767. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5768. mflowtype, mflowid, fibnum,
  5769. #endif
  5770. vrf_id, port);
  5771. goto out;
  5772. }
  5773. if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
  5774. /* v_tag mismatch! */
  5775. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5776. SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
  5777. #endif
  5778. SCTP_STAT_INCR(sctps_badvtag);
  5779. goto out;
  5780. }
  5781. }
  5782. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5783. SCTP_PROBE5(receive, NULL, stcb, m, stcb, sh);
  5784. #endif
  5785. if (stcb == NULL) {
  5786. /*
  5787. * no valid TCB for this packet, or we found it's a bad
  5788. * packet while processing control, or we're done with this
  5789. * packet (done or skip rest of data), so we drop it...
  5790. */
  5791. goto out;
  5792. }
  5793. #if defined(__Userspace__)
  5794. if ((upcall_socket == NULL) &&
  5795. ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
  5796. (stcb->sctp_socket != NULL)) {
  5797. if (stcb->sctp_socket->so_head != NULL) {
  5798. upcall_socket = stcb->sctp_socket->so_head;
  5799. } else {
  5800. upcall_socket = stcb->sctp_socket;
  5801. }
  5802. SOCK_LOCK(upcall_socket);
  5803. soref(upcall_socket);
  5804. SOCK_UNLOCK(upcall_socket);
  5805. }
  5806. #endif
  5807. /*
  5808. * DATA chunk processing
  5809. */
  5810. /* plow through the data chunks while length > offset */
  5811. /*
  5812. * Rest should be DATA only. Check authentication state if AUTH for
  5813. * DATA is required.
  5814. */
  5815. if ((length > offset) &&
  5816. (stcb != NULL) &&
  5817. sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
  5818. !stcb->asoc.authenticated) {
  5819. /* "silently" ignore */
  5820. SCTP_STAT_INCR(sctps_recvauthmissing);
  5821. SCTPDBG(SCTP_DEBUG_AUTH1,
  5822. "Data chunk requires AUTH, skipped\n");
  5823. goto trigger_send;
  5824. }
  5825. if (length > offset) {
  5826. int retval;
  5827. /*
  5828. * First check to make sure our state is correct. We would
  5829. * not get here unless we really did have a tag, so we don't
  5830. * abort if this happens, just dump the chunk silently.
  5831. */
  5832. switch (SCTP_GET_STATE(stcb)) {
  5833. case SCTP_STATE_COOKIE_ECHOED:
  5834. /*
  5835. * we consider data with valid tags in this state
  5836. * shows us the cookie-ack was lost. Imply it was
  5837. * there.
  5838. */
  5839. sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
  5840. break;
  5841. case SCTP_STATE_COOKIE_WAIT:
  5842. /*
  5843. * We consider OOTB any data sent during asoc setup.
  5844. */
  5845. SCTP_SNPRINTF(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
  5846. op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
  5847. msg);
  5848. sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
  5849. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5850. mflowtype, mflowid, inp->fibnum,
  5851. #endif
  5852. vrf_id, port);
  5853. goto out;
  5854. /*sa_ignore NOTREACHED*/
  5855. break;
  5856. case SCTP_STATE_EMPTY: /* should not happen */
  5857. case SCTP_STATE_INUSE: /* should not happen */
  5858. case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */
  5859. case SCTP_STATE_SHUTDOWN_ACK_SENT:
  5860. default:
  5861. goto out;
  5862. /*sa_ignore NOTREACHED*/
  5863. break;
  5864. case SCTP_STATE_OPEN:
  5865. case SCTP_STATE_SHUTDOWN_SENT:
  5866. break;
  5867. }
  5868. /* plow through the data chunks while length > offset */
  5869. retval = sctp_process_data(mm, iphlen, &offset, length,
  5870. inp, stcb, net, &high_tsn);
  5871. if (retval == 2) {
  5872. /*
  5873. * The association aborted, NO UNLOCK needed since
  5874. * the association is destroyed.
  5875. */
  5876. stcb = NULL;
  5877. goto out;
  5878. }
  5879. if (retval == 0) {
  5880. data_processed = 1;
  5881. }
  5882. /*
  5883. * Anything important needs to have been m_copy'ed in
  5884. * process_data
  5885. */
  5886. }
  5887. /* take care of ecn */
  5888. if ((data_processed == 1) &&
  5889. (stcb->asoc.ecn_supported == 1) &&
  5890. ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
  5891. /* Yep, we need to add a ECNE */
  5892. sctp_send_ecn_echo(stcb, net, high_tsn);
  5893. }
  5894. if ((data_processed == 0) && (fwd_tsn_seen)) {
  5895. int was_a_gap;
  5896. uint32_t highest_tsn;
  5897. if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
  5898. highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
  5899. } else {
  5900. highest_tsn = stcb->asoc.highest_tsn_inside_map;
  5901. }
  5902. was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
  5903. stcb->asoc.send_sack = 1;
  5904. sctp_sack_check(stcb, was_a_gap);
  5905. } else if (fwd_tsn_seen) {
  5906. stcb->asoc.send_sack = 1;
  5907. }
  5908. /* trigger send of any chunks in queue... */
  5909. trigger_send:
  5910. #ifdef SCTP_AUDITING_ENABLED
  5911. sctp_audit_log(0xE0, 2);
  5912. sctp_auditing(1, inp, stcb, net);
  5913. #endif
  5914. SCTPDBG(SCTP_DEBUG_INPUT1,
  5915. "Check for chunk output prw:%d tqe:%d tf=%d\n",
  5916. stcb->asoc.peers_rwnd,
  5917. TAILQ_EMPTY(&stcb->asoc.control_send_queue),
  5918. stcb->asoc.total_flight);
  5919. un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
  5920. if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
  5921. cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
  5922. }
  5923. if (!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue) ||
  5924. cnt_ctrl_ready ||
  5925. stcb->asoc.trigger_reset ||
  5926. ((un_sent > 0) &&
  5927. (stcb->asoc.peers_rwnd > 0 || stcb->asoc.total_flight == 0))) {
  5928. SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
  5929. sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
  5930. SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
  5931. }
  5932. #ifdef SCTP_AUDITING_ENABLED
  5933. sctp_audit_log(0xE0, 3);
  5934. sctp_auditing(2, inp, stcb, net);
  5935. #endif
  5936. out:
  5937. if (stcb != NULL) {
  5938. SCTP_TCB_UNLOCK(stcb);
  5939. }
  5940. #if defined(__Userspace__)
  5941. if (upcall_socket != NULL) {
  5942. if (upcall_socket->so_upcall != NULL) {
  5943. if (soreadable(upcall_socket) ||
  5944. sowriteable(upcall_socket) ||
  5945. upcall_socket->so_error) {
  5946. (*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
  5947. }
  5948. }
  5949. ACCEPT_LOCK();
  5950. SOCK_LOCK(upcall_socket);
  5951. sorele(upcall_socket);
  5952. }
  5953. #endif
  5954. if (inp_decr != NULL) {
  5955. /* reduce ref-count */
  5956. SCTP_INP_WLOCK(inp_decr);
  5957. SCTP_INP_DECR_REF(inp_decr);
  5958. SCTP_INP_WUNLOCK(inp_decr);
  5959. }
  5960. return;
  5961. }
  5962. #ifdef INET
  5963. #if !defined(__Userspace__)
  5964. void
  5965. sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
  5966. {
  5967. struct mbuf *m;
  5968. int iphlen;
  5969. uint32_t vrf_id = 0;
  5970. uint8_t ecn_bits;
  5971. struct sockaddr_in src, dst;
  5972. struct ip *ip;
  5973. struct sctphdr *sh;
  5974. struct sctp_chunkhdr *ch;
  5975. int length, offset;
  5976. uint8_t compute_crc;
  5977. #if defined(__FreeBSD__) && !defined(__Userspace__)
  5978. uint32_t mflowid;
  5979. uint8_t mflowtype;
  5980. uint16_t fibnum;
  5981. #endif
  5982. #if defined(__Userspace__)
  5983. uint16_t port = 0;
  5984. #endif
  5985. iphlen = off;
  5986. if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
  5987. SCTP_RELEASE_PKT(i_pak);
  5988. return;
  5989. }
  5990. m = SCTP_HEADER_TO_CHAIN(i_pak);
  5991. #ifdef SCTP_MBUF_LOGGING
  5992. /* Log in any input mbufs */
  5993. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
  5994. sctp_log_mbc(m, SCTP_MBUF_INPUT);
  5995. }
  5996. #endif
  5997. #ifdef SCTP_PACKET_LOGGING
  5998. if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
  5999. sctp_packet_log(m);
  6000. }
  6001. #endif
  6002. #if defined(__FreeBSD__) && !defined(__Userspace__)
  6003. SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
  6004. "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n",
  6005. m->m_pkthdr.len,
  6006. if_name(m->m_pkthdr.rcvif),
  6007. (int)m->m_pkthdr.csum_flags, CSUM_BITS);
  6008. #endif
  6009. #if defined(__APPLE__) && !defined(__Userspace__)
  6010. SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
  6011. "sctp_input(): Packet of length %d received on %s%d with csum_flags 0x%x.\n",
  6012. m->m_pkthdr.len,
  6013. m->m_pkthdr.rcvif->if_name,
  6014. m->m_pkthdr.rcvif->if_unit,
  6015. m->m_pkthdr.csum_flags);
  6016. #endif
  6017. #if defined(_WIN32) && !defined(__Userspace__)
  6018. SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
  6019. "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
  6020. m->m_pkthdr.len,
  6021. m->m_pkthdr.rcvif->if_xname,
  6022. m->m_pkthdr.csum_flags);
  6023. #endif
  6024. #if defined(__FreeBSD__) && !defined(__Userspace__)
  6025. mflowid = m->m_pkthdr.flowid;
  6026. mflowtype = M_HASHTYPE_GET(m);
  6027. fibnum = M_GETFIB(m);
  6028. #endif
  6029. SCTP_STAT_INCR(sctps_recvpackets);
  6030. SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
  6031. /* Get IP, SCTP, and first chunk header together in the first mbuf. */
  6032. offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
  6033. if (SCTP_BUF_LEN(m) < offset) {
  6034. if ((m = m_pullup(m, offset)) == NULL) {
  6035. SCTP_STAT_INCR(sctps_hdrops);
  6036. return;
  6037. }
  6038. }
  6039. ip = mtod(m, struct ip *);
  6040. sh = (struct sctphdr *)((caddr_t)ip + iphlen);
  6041. ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
  6042. offset -= sizeof(struct sctp_chunkhdr);
  6043. memset(&src, 0, sizeof(struct sockaddr_in));
  6044. src.sin_family = AF_INET;
  6045. #ifdef HAVE_SIN_LEN
  6046. src.sin_len = sizeof(struct sockaddr_in);
  6047. #endif
  6048. src.sin_port = sh->src_port;
  6049. src.sin_addr = ip->ip_src;
  6050. memset(&dst, 0, sizeof(struct sockaddr_in));
  6051. dst.sin_family = AF_INET;
  6052. #ifdef HAVE_SIN_LEN
  6053. dst.sin_len = sizeof(struct sockaddr_in);
  6054. #endif
  6055. dst.sin_port = sh->dest_port;
  6056. dst.sin_addr = ip->ip_dst;
  6057. #if defined(_WIN32) && !defined(__Userspace__)
  6058. NTOHS(ip->ip_len);
  6059. #endif
  6060. #if defined(__linux__) || (defined(_WIN32) && defined(__Userspace__))
  6061. ip->ip_len = ntohs(ip->ip_len);
  6062. #endif
  6063. #if defined(__Userspace__)
  6064. #if defined(__linux__) || defined(_WIN32)
  6065. length = ip->ip_len;
  6066. #else
  6067. length = ip->ip_len + iphlen;
  6068. #endif
  6069. #elif defined(__FreeBSD__)
  6070. length = ntohs(ip->ip_len);
  6071. #elif defined(__APPLE__)
  6072. length = ip->ip_len + iphlen;
  6073. #else
  6074. length = ip->ip_len;
  6075. #endif
  6076. /* Validate mbuf chain length with IP payload length. */
  6077. if (SCTP_HEADER_LEN(m) != length) {
  6078. SCTPDBG(SCTP_DEBUG_INPUT1,
  6079. "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m));
  6080. SCTP_STAT_INCR(sctps_hdrops);
  6081. goto out;
  6082. }
  6083. /* SCTP does not allow broadcasts or multicasts */
  6084. if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
  6085. goto out;
  6086. }
  6087. if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) {
  6088. goto out;
  6089. }
  6090. ecn_bits = ip->ip_tos;
  6091. #if defined(__FreeBSD__) && !defined(__Userspace__)
  6092. if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
  6093. SCTP_STAT_INCR(sctps_recvhwcrc);
  6094. compute_crc = 0;
  6095. } else {
  6096. #else
  6097. if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
  6098. ((src.sin_addr.s_addr == dst.sin_addr.s_addr) ||
  6099. (SCTP_IS_IT_LOOPBACK(m)))) {
  6100. SCTP_STAT_INCR(sctps_recvhwcrc);
  6101. compute_crc = 0;
  6102. } else {
  6103. #endif
  6104. SCTP_STAT_INCR(sctps_recvswcrc);
  6105. compute_crc = 1;
  6106. }
  6107. sctp_common_input_processing(&m, iphlen, offset, length,
  6108. (struct sockaddr *)&src,
  6109. (struct sockaddr *)&dst,
  6110. sh, ch,
  6111. compute_crc,
  6112. ecn_bits,
  6113. #if defined(__FreeBSD__) && !defined(__Userspace__)
  6114. mflowtype, mflowid, fibnum,
  6115. #endif
  6116. vrf_id, port);
  6117. out:
  6118. if (m) {
  6119. sctp_m_freem(m);
  6120. }
  6121. return;
  6122. }
  6123. #if defined(__FreeBSD__) && !defined(__Userspace__)
  6124. #if defined(SCTP_MCORE_INPUT) && defined(SMP)
  6125. extern int *sctp_cpuarry;
  6126. #endif
  6127. #endif
  6128. #if defined(__FreeBSD__) && !defined(__Userspace__)
  6129. int
  6130. sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED)
  6131. {
  6132. struct mbuf *m;
  6133. int off;
  6134. m = *mp;
  6135. off = *offp;
  6136. #else
  6137. void
  6138. sctp_input(struct mbuf *m, int off)
  6139. {
  6140. #endif
  6141. #if defined(__FreeBSD__) && !defined(__Userspace__)
  6142. #if defined(SCTP_MCORE_INPUT) && defined(SMP)
  6143. if (mp_ncpus > 1) {
  6144. struct ip *ip;
  6145. struct sctphdr *sh;
  6146. int offset;
  6147. int cpu_to_use;
  6148. uint32_t flowid, tag;
  6149. if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
  6150. flowid = m->m_pkthdr.flowid;
  6151. } else {
  6152. /* No flow id built by lower layers
  6153. * fix it so we create one.
  6154. */
  6155. offset = off + sizeof(struct sctphdr);
  6156. if (SCTP_BUF_LEN(m) < offset) {
  6157. if ((m = m_pullup(m, offset)) == NULL) {
  6158. SCTP_STAT_INCR(sctps_hdrops);
  6159. return (IPPROTO_DONE);
  6160. }
  6161. }
  6162. ip = mtod(m, struct ip *);
  6163. sh = (struct sctphdr *)((caddr_t)ip + off);
  6164. tag = htonl(sh->v_tag);
  6165. flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
  6166. m->m_pkthdr.flowid = flowid;
  6167. M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE_HASH);
  6168. }
  6169. cpu_to_use = sctp_cpuarry[flowid % mp_ncpus];
  6170. sctp_queue_to_mcore(m, off, cpu_to_use);
  6171. return (IPPROTO_DONE);
  6172. }
  6173. #endif
  6174. #endif
  6175. sctp_input_with_port(m, off, 0);
  6176. #if defined(__FreeBSD__) && !defined(__Userspace__)
  6177. return (IPPROTO_DONE);
  6178. #endif
  6179. }
  6180. #endif
  6181. #endif