12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383143841438514386143871438814389143901439114392143931439414395143961439714398143991440014401144021440314404144051440614407144081440914410144111441214413144141441514416144171441814419144201442114422144231442414425144261442714428144291443014431144321443314434144351443614437144381443914440144411444214443144441444514446144471444814449144501445114452144531445414455144561445714458144591446014461144621446314464144651446614467144681446914470144711447214473144741447514476144771447814479144801448114482144831448414485144861448714488144891449014491144921449314494144951449614497144981449914500145011450214503145041450514506145071450814509145101451114512145131451414515145161451714518145191452014521145221452314524145251452614527145281452914530145311453214533145341453514536145371453814539145401454114542145431454414545145461454714548145491455014551145521455314554145551455614557145581455914560145611456214563145641456514566145671456814569145701457114572145731457414575145761457714578145791458014581145821458314584145851458614587145881458914590145911459214593145941459514596145971459814599146001460114602146031460414605146061460714608146091461014611146121461314614146151461614617146181461914620146211462214623146241462514626146271462814629146301463114632146331463414635146361463714638146391464014641146421464314644146451464614647146481464914650146511465214653146541465514656146571465814659146601466114662146631466414665146661466714668146691467014671146721467314674146751467614677146781467914680146811468214683146841468514686146871468814689146901469114692146931469414695146961469714698146991470014701147021470314704147051470614707147081470914710147111471214713147141471514716147171471814719147201472114722147231472414725147261472714728147291473014731147321473314734147351473614737147381473914740147411474214743147441474514746147471474814749147501475114752147531475414755147561475714758147591476014761147621476314764147651476614767147681476914770147711477214773147741477514776147771477814779147801478114782147831478414785147861478714788147891479014791147921479314794147951479614797147981479914800148011480214803148041480514806148071480814809148101481114812148131481414815148161481714818148191482014821148221482314824148251482614827148281482914830148311483214833148341483514836148371483814839148401484114842148431484414845148461484714848148491485014851148521485314854148551485614857148581485914860148611486214863148641486514866148671486814869148701487114872148731487414875148761487714878148791488014881148821488314884148851488614887148881488914890148911489214893148941489514896148971489814899149001490114902149031490414905149061490714908149091491014911149121491314914149151491614917149181491914920149211492214923149241492514926149271492814929149301493114932149331493414935149361493714938149391494014941149421494314944149451494614947149481494914950149511495214953149541495514956149571495814959149601496114962149631496414965149661496714968149691497014971149721497314974149751497614977149781497914980149811498214983149841498514986149871498814989149901499114992149931499414995149961499714998149991500015001150021500315004150051500615007150081500915010150111501215013150141501515016150171501815019150201502115022150231502415025150261502715028150291503015031150321503315034150351503615037150381503915040150411504215043150441504515046150471504815049150501505115052150531505415055150561505715058150591506015061150621506315064150651506615067150681506915070150711507215073150741507515076150771507815079150801508115082150831508415085150861508715088150891509015091150921509315094150951509615097150981509915100151011510215103151041510515106151071510815109151101511115112151131511415115151161511715118151191512015121151221512315124151251512615127151281512915130151311513215133151341513515136151371513815139151401514115142151431514415145151461514715148151491515015151151521515315154151551515615157151581515915160151611516215163151641516515166151671516815169151701517115172151731517415175151761517715178151791518015181151821518315184151851518615187151881518915190151911519215193151941519515196151971519815199152001520115202152031520415205152061520715208152091521015211152121521315214152151521615217152181521915220152211522215223152241522515226152271522815229152301523115232152331523415235152361523715238152391524015241152421524315244152451524615247152481524915250152511525215253152541525515256152571525815259152601526115262152631526415265152661526715268152691527015271152721527315274152751527615277152781527915280152811528215283152841528515286152871528815289152901529115292152931529415295152961529715298152991530015301153021530315304153051530615307153081530915310153111531215313153141531515316153171531815319153201532115322153231532415325153261532715328153291533015331153321533315334153351533615337153381533915340153411534215343153441534515346153471534815349153501535115352153531535415355153561535715358153591536015361153621536315364153651536615367153681536915370153711537215373153741537515376153771537815379153801538115382153831538415385153861538715388153891539015391153921539315394153951539615397153981539915400154011540215403154041540515406154071540815409154101541115412154131541415415154161541715418154191542015421154221542315424154251542615427154281542915430154311543215433154341543515436154371543815439154401544115442154431544415445154461544715448154491545015451154521545315454154551545615457154581545915460154611546215463154641546515466154671546815469154701547115472154731547415475154761547715478154791548015481154821548315484154851548615487154881548915490154911549215493154941549515496154971549815499155001550115502155031550415505155061550715508155091551015511155121551315514155151551615517155181551915520155211552215523155241552515526155271552815529155301553115532155331553415535155361553715538155391554015541155421554315544155451554615547155481554915550155511555215553155541555515556155571555815559155601556115562155631556415565155661556715568155691557015571155721557315574155751557615577155781557915580155811558215583155841558515586155871558815589155901559115592155931559415595155961559715598155991560015601156021560315604156051560615607156081560915610156111561215613156141561515616156171561815619156201562115622156231562415625156261562715628156291563015631156321563315634156351563615637156381563915640156411564215643156441564515646156471564815649156501565115652156531565415655156561565715658156591566015661156621566315664156651566615667156681566915670156711567215673156741567515676156771567815679156801568115682156831568415685156861568715688156891569015691156921569315694156951569615697156981569915700157011570215703157041570515706157071570815709157101571115712157131571415715157161571715718157191572015721157221572315724157251572615727157281572915730157311573215733157341573515736157371573815739157401574115742157431574415745157461574715748157491575015751157521575315754157551575615757157581575915760157611576215763157641576515766157671576815769157701577115772157731577415775157761577715778157791578015781157821578315784157851578615787157881578915790157911579215793157941579515796157971579815799158001580115802158031580415805158061580715808158091581015811158121581315814158151581615817158181581915820158211582215823158241582515826158271582815829158301583115832158331583415835158361583715838158391584015841158421584315844158451584615847158481584915850158511585215853158541585515856158571585815859158601586115862158631586415865158661586715868158691587015871158721587315874158751587615877158781587915880158811588215883158841588515886158871588815889158901589115892158931589415895158961589715898158991590015901159021590315904159051590615907159081590915910159111591215913159141591515916159171591815919159201592115922159231592415925159261592715928159291593015931159321593315934159351593615937159381593915940159411594215943159441594515946159471594815949159501595115952159531595415955159561595715958159591596015961159621596315964159651596615967159681596915970159711597215973159741597515976159771597815979159801598115982159831598415985159861598715988159891599015991159921599315994159951599615997159981599916000160011600216003160041600516006160071600816009160101601116012160131601416015160161601716018160191602016021160221602316024160251602616027160281602916030160311603216033160341603516036160371603816039160401604116042160431604416045160461604716048160491605016051160521605316054160551605616057160581605916060160611606216063160641606516066160671606816069160701607116072160731607416075160761607716078160791608016081160821608316084160851608616087160881608916090160911609216093160941609516096160971609816099161001610116102161031610416105161061610716108161091611016111161121611316114161151611616117161181611916120161211612216123161241612516126161271612816129161301613116132161331613416135161361613716138161391614016141161421614316144161451614616147161481614916150161511615216153161541615516156161571615816159161601616116162161631616416165161661616716168161691617016171161721617316174161751617616177161781617916180161811618216183161841618516186161871618816189161901619116192161931619416195161961619716198161991620016201162021620316204162051620616207162081620916210162111621216213162141621516216162171621816219162201622116222162231622416225162261622716228162291623016231162321623316234162351623616237162381623916240162411624216243162441624516246162471624816249162501625116252162531625416255162561625716258162591626016261162621626316264162651626616267162681626916270162711627216273162741627516276162771627816279162801628116282162831628416285162861628716288162891629016291162921629316294162951629616297162981629916300163011630216303163041630516306163071630816309163101631116312163131631416315163161631716318163191632016321163221632316324163251632616327163281632916330163311633216333163341633516336163371633816339163401634116342163431634416345163461634716348163491635016351163521635316354163551635616357163581635916360163611636216363163641636516366163671636816369163701637116372163731637416375163761637716378163791638016381163821638316384163851638616387163881638916390163911639216393163941639516396163971639816399164001640116402164031640416405164061640716408164091641016411164121641316414164151641616417164181641916420164211642216423164241642516426164271642816429164301643116432164331643416435164361643716438164391644016441164421644316444164451644616447164481644916450164511645216453164541645516456164571645816459164601646116462164631646416465164661646716468164691647016471164721647316474164751647616477164781647916480164811648216483164841648516486164871648816489164901649116492164931649416495164961649716498164991650016501165021650316504165051650616507165081650916510165111651216513165141651516516165171651816519165201652116522165231652416525165261652716528165291653016531165321653316534165351653616537165381653916540165411654216543165441654516546165471654816549165501655116552165531655416555165561655716558165591656016561165621656316564165651656616567165681656916570165711657216573165741657516576165771657816579165801658116582165831658416585165861658716588165891659016591165921659316594165951659616597165981659916600166011660216603166041660516606166071660816609166101661116612166131661416615166161661716618166191662016621166221662316624166251662616627166281662916630166311663216633166341663516636166371663816639166401664116642166431664416645166461664716648166491665016651166521665316654166551665616657166581665916660166611666216663166641666516666166671666816669166701667116672166731667416675166761667716678166791668016681166821668316684166851668616687166881668916690166911669216693166941669516696166971669816699167001670116702167031670416705167061670716708167091671016711167121671316714167151671616717167181671916720167211672216723167241672516726167271672816729167301673116732167331673416735167361673716738167391674016741167421674316744167451674616747167481674916750167511675216753167541675516756167571675816759167601676116762167631676416765167661676716768167691677016771167721677316774167751677616777167781677916780167811678216783167841678516786167871678816789167901679116792167931679416795167961679716798167991680016801168021680316804168051680616807168081680916810168111681216813168141681516816168171681816819168201682116822168231682416825168261682716828168291683016831168321683316834168351683616837168381683916840168411684216843168441684516846168471684816849168501685116852168531685416855168561685716858168591686016861168621686316864168651686616867168681686916870168711687216873168741687516876168771687816879168801688116882168831688416885168861688716888168891689016891168921689316894168951689616897168981689916900169011690216903169041690516906169071690816909169101691116912169131691416915169161691716918169191692016921169221692316924169251692616927169281692916930169311693216933169341693516936169371693816939169401694116942169431694416945169461694716948169491695016951169521695316954169551695616957169581695916960169611696216963169641696516966169671696816969169701697116972169731697416975169761697716978169791698016981169821698316984169851698616987169881698916990169911699216993169941699516996169971699816999170001700117002170031700417005170061700717008170091701017011170121701317014170151701617017170181701917020170211702217023170241702517026170271702817029170301703117032170331703417035170361703717038170391704017041170421704317044170451704617047170481704917050170511705217053170541705517056170571705817059170601706117062170631706417065170661706717068170691707017071170721707317074170751707617077170781707917080170811708217083170841708517086170871708817089170901709117092170931709417095170961709717098170991710017101171021710317104171051710617107171081710917110171111711217113171141711517116171171711817119171201712117122171231712417125171261712717128171291713017131171321713317134171351713617137171381713917140171411714217143171441714517146171471714817149171501715117152171531715417155171561715717158171591716017161171621716317164171651716617167171681716917170171711717217173171741717517176171771717817179171801718117182171831718417185171861718717188171891719017191171921719317194171951719617197171981719917200172011720217203172041720517206172071720817209172101721117212172131721417215172161721717218172191722017221172221722317224172251722617227172281722917230172311723217233172341723517236172371723817239172401724117242172431724417245172461724717248172491725017251172521725317254172551725617257172581725917260172611726217263172641726517266172671726817269172701727117272172731727417275172761727717278172791728017281172821728317284172851728617287172881728917290172911729217293172941729517296172971729817299173001730117302173031730417305173061730717308173091731017311173121731317314173151731617317173181731917320173211732217323173241732517326173271732817329173301733117332173331733417335173361733717338173391734017341173421734317344173451734617347173481734917350173511735217353173541735517356173571735817359173601736117362173631736417365173661736717368173691737017371173721737317374173751737617377173781737917380173811738217383173841738517386173871738817389173901739117392173931739417395173961739717398173991740017401174021740317404174051740617407174081740917410174111741217413174141741517416174171741817419174201742117422174231742417425174261742717428174291743017431174321743317434174351743617437174381743917440174411744217443174441744517446174471744817449174501745117452174531745417455174561745717458174591746017461174621746317464174651746617467174681746917470174711747217473174741747517476174771747817479174801748117482174831748417485174861748717488174891749017491174921749317494174951749617497174981749917500175011750217503175041750517506175071750817509175101751117512175131751417515175161751717518175191752017521175221752317524175251752617527175281752917530175311753217533175341753517536175371753817539175401754117542175431754417545175461754717548175491755017551175521755317554175551755617557175581755917560175611756217563175641756517566175671756817569175701757117572175731757417575175761757717578175791758017581175821758317584175851758617587175881758917590175911759217593175941759517596175971759817599176001760117602176031760417605176061760717608176091761017611176121761317614176151761617617176181761917620176211762217623176241762517626176271762817629176301763117632176331763417635176361763717638176391764017641176421764317644176451764617647176481764917650176511765217653176541765517656176571765817659176601766117662176631766417665176661766717668176691767017671176721767317674176751767617677176781767917680176811768217683176841768517686176871768817689176901769117692176931769417695176961769717698176991770017701177021770317704177051770617707177081770917710177111771217713177141771517716177171771817719177201772117722177231772417725177261772717728177291773017731177321773317734177351773617737177381773917740177411774217743177441774517746177471774817749177501775117752177531775417755177561775717758177591776017761177621776317764177651776617767177681776917770177711777217773177741777517776177771777817779177801778117782177831778417785177861778717788177891779017791177921779317794177951779617797177981779917800178011780217803178041780517806178071780817809178101781117812178131781417815178161781717818178191782017821178221782317824178251782617827178281782917830178311783217833178341783517836178371783817839178401784117842178431784417845178461784717848178491785017851178521785317854178551785617857178581785917860178611786217863178641786517866178671786817869178701787117872178731787417875178761787717878178791788017881178821788317884178851788617887178881788917890178911789217893178941789517896178971789817899179001790117902179031790417905179061790717908179091791017911179121791317914179151791617917179181791917920179211792217923179241792517926179271792817929179301793117932179331793417935179361793717938179391794017941179421794317944179451794617947179481794917950179511795217953179541795517956179571795817959179601796117962179631796417965179661796717968179691797017971179721797317974179751797617977179781797917980179811798217983179841798517986179871798817989179901799117992179931799417995179961799717998179991800018001180021800318004180051800618007180081800918010180111801218013180141801518016180171801818019180201802118022180231802418025180261802718028180291803018031180321803318034180351803618037180381803918040180411804218043180441804518046180471804818049180501805118052180531805418055180561805718058180591806018061180621806318064180651806618067180681806918070180711807218073180741807518076180771807818079180801808118082180831808418085180861808718088180891809018091180921809318094180951809618097180981809918100181011810218103181041810518106181071810818109181101811118112181131811418115181161811718118181191812018121181221812318124181251812618127181281812918130181311813218133181341813518136181371813818139181401814118142181431814418145181461814718148181491815018151181521815318154181551815618157181581815918160181611816218163181641816518166181671816818169181701817118172181731817418175181761817718178181791818018181181821818318184181851818618187181881818918190181911819218193181941819518196181971819818199182001820118202182031820418205182061820718208182091821018211182121821318214182151821618217182181821918220182211822218223182241822518226182271822818229182301823118232182331823418235182361823718238182391824018241182421824318244182451824618247182481824918250182511825218253182541825518256182571825818259182601826118262182631826418265182661826718268182691827018271182721827318274182751827618277182781827918280182811828218283182841828518286182871828818289182901829118292182931829418295182961829718298182991830018301183021830318304183051830618307183081830918310183111831218313183141831518316183171831818319183201832118322183231832418325183261832718328183291833018331183321833318334183351833618337183381833918340183411834218343183441834518346183471834818349183501835118352183531835418355183561835718358183591836018361183621836318364183651836618367183681836918370183711837218373183741837518376183771837818379183801838118382183831838418385183861838718388183891839018391183921839318394183951839618397183981839918400184011840218403184041840518406184071840818409184101841118412184131841418415184161841718418184191842018421184221842318424184251842618427184281842918430184311843218433184341843518436184371843818439184401844118442184431844418445184461844718448184491845018451184521845318454184551845618457184581845918460184611846218463184641846518466184671846818469184701847118472184731847418475184761847718478184791848018481184821848318484184851848618487184881848918490184911849218493184941849518496184971849818499185001850118502185031850418505185061850718508185091851018511185121851318514185151851618517185181851918520185211852218523185241852518526185271852818529185301853118532185331853418535185361853718538185391854018541185421854318544185451854618547185481854918550185511855218553185541855518556185571855818559185601856118562185631856418565185661856718568185691857018571185721857318574185751857618577185781857918580185811858218583185841858518586185871858818589185901859118592185931859418595185961859718598185991860018601186021860318604186051860618607186081860918610186111861218613186141861518616186171861818619186201862118622186231862418625186261862718628186291863018631186321863318634186351863618637186381863918640186411864218643186441864518646186471864818649186501865118652186531865418655186561865718658186591866018661186621866318664186651866618667186681866918670186711867218673186741867518676186771867818679186801868118682186831868418685186861868718688186891869018691186921869318694186951869618697186981869918700187011870218703187041870518706187071870818709187101871118712187131871418715187161871718718187191872018721187221872318724187251872618727187281872918730187311873218733187341873518736187371873818739187401874118742187431874418745187461874718748187491875018751187521875318754187551875618757187581875918760187611876218763187641876518766187671876818769187701877118772187731877418775187761877718778187791878018781187821878318784187851878618787187881878918790187911879218793187941879518796187971879818799188001880118802188031880418805188061880718808188091881018811188121881318814188151881618817188181881918820188211882218823188241882518826188271882818829188301883118832188331883418835188361883718838188391884018841188421884318844188451884618847188481884918850188511885218853188541885518856188571885818859188601886118862188631886418865188661886718868188691887018871188721887318874188751887618877188781887918880188811888218883188841888518886188871888818889188901889118892188931889418895188961889718898188991890018901189021890318904189051890618907189081890918910189111891218913189141891518916189171891818919189201892118922189231892418925189261892718928189291893018931189321893318934189351893618937189381893918940189411894218943189441894518946189471894818949189501895118952189531895418955189561895718958189591896018961189621896318964189651896618967189681896918970189711897218973189741897518976189771897818979189801898118982189831898418985189861898718988189891899018991189921899318994189951899618997189981899919000190011900219003190041900519006190071900819009190101901119012190131901419015190161901719018190191902019021190221902319024190251902619027190281902919030190311903219033190341903519036190371903819039190401904119042190431904419045190461904719048190491905019051190521905319054190551905619057190581905919060190611906219063190641906519066190671906819069190701907119072190731907419075190761907719078190791908019081190821908319084190851908619087190881908919090190911909219093190941909519096190971909819099191001910119102191031910419105191061910719108191091911019111191121911319114191151911619117191181911919120191211912219123191241912519126191271912819129191301913119132191331913419135191361913719138191391914019141191421914319144191451914619147191481914919150191511915219153191541915519156191571915819159191601916119162191631916419165191661916719168191691917019171191721917319174191751917619177191781917919180191811918219183191841918519186191871918819189191901919119192191931919419195191961919719198191991920019201192021920319204192051920619207192081920919210192111921219213192141921519216192171921819219192201922119222192231922419225192261922719228192291923019231192321923319234192351923619237192381923919240192411924219243192441924519246192471924819249192501925119252192531925419255192561925719258192591926019261192621926319264192651926619267192681926919270192711927219273192741927519276192771927819279192801928119282192831928419285192861928719288192891929019291192921929319294192951929619297192981929919300193011930219303193041930519306193071930819309193101931119312193131931419315193161931719318193191932019321193221932319324193251932619327193281932919330193311933219333193341933519336193371933819339193401934119342193431934419345193461934719348193491935019351193521935319354193551935619357193581935919360193611936219363193641936519366193671936819369193701937119372193731937419375193761937719378193791938019381193821938319384193851938619387193881938919390193911939219393193941939519396193971939819399194001940119402194031940419405194061940719408194091941019411194121941319414194151941619417194181941919420194211942219423194241942519426194271942819429194301943119432194331943419435194361943719438194391944019441194421944319444194451944619447194481944919450194511945219453194541945519456194571945819459194601946119462194631946419465194661946719468194691947019471194721947319474194751947619477194781947919480194811948219483194841948519486194871948819489194901949119492194931949419495194961949719498194991950019501195021950319504195051950619507195081950919510195111951219513195141951519516195171951819519195201952119522195231952419525195261952719528195291953019531195321953319534195351953619537195381953919540195411954219543195441954519546195471954819549195501955119552195531955419555195561955719558195591956019561195621956319564195651956619567195681956919570195711957219573195741957519576195771957819579195801958119582195831958419585195861958719588195891959019591195921959319594195951959619597195981959919600196011960219603196041960519606196071960819609196101961119612196131961419615196161961719618196191962019621196221962319624196251962619627196281962919630196311963219633196341963519636196371963819639196401964119642196431964419645196461964719648196491965019651196521965319654196551965619657196581965919660196611966219663196641966519666196671966819669196701967119672196731967419675196761967719678196791968019681196821968319684196851968619687196881968919690196911969219693196941969519696196971969819699197001970119702197031970419705197061970719708197091971019711197121971319714197151971619717197181971919720197211972219723197241972519726197271972819729197301973119732197331973419735197361973719738197391974019741197421974319744197451974619747197481974919750197511975219753197541975519756197571975819759197601976119762197631976419765197661976719768197691977019771197721977319774197751977619777197781977919780197811978219783197841978519786197871978819789197901979119792197931979419795197961979719798197991980019801198021980319804198051980619807198081980919810198111981219813198141981519816198171981819819198201982119822198231982419825198261982719828198291983019831198321983319834198351983619837198381983919840198411984219843198441984519846198471984819849198501985119852198531985419855198561985719858198591986019861198621986319864198651986619867198681986919870198711987219873198741987519876198771987819879198801988119882198831988419885198861988719888198891989019891198921989319894198951989619897198981989919900199011990219903199041990519906199071990819909199101991119912199131991419915199161991719918199191992019921199221992319924199251992619927199281992919930199311993219933199341993519936199371993819939199401994119942199431994419945199461994719948199491995019951199521995319954199551995619957199581995919960199611996219963199641996519966199671996819969199701997119972199731997419975199761997719978199791998019981199821998319984199851998619987199881998919990199911999219993199941999519996199971999819999200002000120002200032000420005200062000720008200092001020011200122001320014200152001620017200182001920020200212002220023200242002520026200272002820029200302003120032200332003420035200362003720038200392004020041200422004320044200452004620047200482004920050200512005220053200542005520056200572005820059200602006120062200632006420065200662006720068200692007020071200722007320074200752007620077200782007920080200812008220083200842008520086200872008820089200902009120092200932009420095200962009720098200992010020101201022010320104201052010620107201082010920110201112011220113201142011520116201172011820119201202012120122201232012420125201262012720128201292013020131201322013320134201352013620137201382013920140201412014220143201442014520146201472014820149201502015120152201532015420155201562015720158201592016020161201622016320164201652016620167201682016920170201712017220173201742017520176201772017820179201802018120182201832018420185201862018720188201892019020191201922019320194201952019620197201982019920200202012020220203202042020520206202072020820209202102021120212202132021420215202162021720218202192022020221202222022320224202252022620227202282022920230202312023220233202342023520236202372023820239202402024120242202432024420245202462024720248202492025020251202522025320254202552025620257202582025920260202612026220263202642026520266202672026820269202702027120272202732027420275202762027720278202792028020281202822028320284202852028620287202882028920290202912029220293202942029520296202972029820299203002030120302203032030420305203062030720308203092031020311203122031320314203152031620317203182031920320203212032220323203242032520326203272032820329203302033120332203332033420335203362033720338203392034020341203422034320344203452034620347203482034920350203512035220353203542035520356203572035820359203602036120362203632036420365203662036720368203692037020371203722037320374203752037620377203782037920380203812038220383203842038520386203872038820389203902039120392203932039420395203962039720398203992040020401204022040320404204052040620407204082040920410204112041220413204142041520416204172041820419204202042120422204232042420425204262042720428204292043020431204322043320434204352043620437204382043920440204412044220443204442044520446204472044820449204502045120452204532045420455204562045720458204592046020461204622046320464204652046620467204682046920470204712047220473204742047520476204772047820479204802048120482204832048420485204862048720488204892049020491204922049320494204952049620497204982049920500205012050220503205042050520506205072050820509205102051120512205132051420515205162051720518205192052020521205222052320524205252052620527205282052920530205312053220533205342053520536205372053820539205402054120542205432054420545205462054720548205492055020551205522055320554205552055620557205582055920560205612056220563205642056520566205672056820569205702057120572205732057420575205762057720578205792058020581205822058320584205852058620587205882058920590205912059220593205942059520596205972059820599206002060120602206032060420605206062060720608206092061020611206122061320614206152061620617206182061920620206212062220623206242062520626206272062820629206302063120632206332063420635206362063720638206392064020641206422064320644206452064620647206482064920650206512065220653206542065520656206572065820659206602066120662206632066420665206662066720668206692067020671206722067320674206752067620677206782067920680206812068220683206842068520686206872068820689206902069120692206932069420695206962069720698206992070020701207022070320704207052070620707207082070920710207112071220713207142071520716207172071820719207202072120722207232072420725207262072720728207292073020731207322073320734207352073620737207382073920740207412074220743207442074520746207472074820749207502075120752207532075420755207562075720758207592076020761207622076320764207652076620767207682076920770207712077220773207742077520776207772077820779207802078120782207832078420785207862078720788207892079020791207922079320794207952079620797207982079920800208012080220803208042080520806208072080820809208102081120812208132081420815208162081720818208192082020821208222082320824208252082620827208282082920830208312083220833208342083520836208372083820839208402084120842208432084420845208462084720848208492085020851208522085320854208552085620857208582085920860208612086220863208642086520866208672086820869208702087120872208732087420875208762087720878208792088020881208822088320884208852088620887208882088920890208912089220893208942089520896208972089820899209002090120902209032090420905209062090720908209092091020911209122091320914209152091620917209182091920920209212092220923209242092520926209272092820929209302093120932209332093420935209362093720938209392094020941209422094320944209452094620947209482094920950209512095220953209542095520956209572095820959209602096120962209632096420965209662096720968209692097020971209722097320974209752097620977209782097920980209812098220983209842098520986209872098820989209902099120992209932099420995209962099720998209992100021001210022100321004210052100621007210082100921010210112101221013210142101521016210172101821019210202102121022210232102421025210262102721028210292103021031210322103321034210352103621037210382103921040210412104221043210442104521046210472104821049210502105121052210532105421055210562105721058210592106021061210622106321064210652106621067210682106921070210712107221073210742107521076210772107821079210802108121082210832108421085210862108721088210892109021091210922109321094210952109621097210982109921100211012110221103211042110521106211072110821109211102111121112211132111421115211162111721118211192112021121211222112321124211252112621127211282112921130211312113221133211342113521136211372113821139211402114121142211432114421145211462114721148211492115021151211522115321154211552115621157211582115921160211612116221163211642116521166211672116821169211702117121172211732117421175211762117721178211792118021181211822118321184211852118621187211882118921190211912119221193211942119521196211972119821199212002120121202212032120421205212062120721208212092121021211212122121321214212152121621217212182121921220212212122221223212242122521226212272122821229212302123121232212332123421235212362123721238212392124021241212422124321244212452124621247212482124921250212512125221253212542125521256212572125821259212602126121262212632126421265212662126721268212692127021271212722127321274212752127621277212782127921280212812128221283212842128521286212872128821289212902129121292212932129421295212962129721298212992130021301213022130321304213052130621307213082130921310213112131221313213142131521316213172131821319213202132121322213232132421325213262132721328213292133021331213322133321334213352133621337213382133921340213412134221343213442134521346213472134821349213502135121352213532135421355213562135721358213592136021361213622136321364213652136621367213682136921370213712137221373213742137521376213772137821379213802138121382213832138421385213862138721388213892139021391213922139321394213952139621397213982139921400214012140221403214042140521406214072140821409214102141121412214132141421415214162141721418214192142021421214222142321424214252142621427214282142921430214312143221433214342143521436214372143821439214402144121442214432144421445214462144721448214492145021451214522145321454214552145621457214582145921460214612146221463214642146521466214672146821469214702147121472214732147421475214762147721478214792148021481214822148321484214852148621487214882148921490214912149221493214942149521496214972149821499215002150121502215032150421505215062150721508215092151021511215122151321514215152151621517215182151921520215212152221523215242152521526215272152821529215302153121532215332153421535215362153721538215392154021541215422154321544215452154621547215482154921550215512155221553215542155521556215572155821559215602156121562215632156421565215662156721568215692157021571215722157321574215752157621577215782157921580215812158221583215842158521586215872158821589215902159121592215932159421595215962159721598215992160021601216022160321604216052160621607216082160921610216112161221613216142161521616216172161821619216202162121622216232162421625216262162721628216292163021631216322163321634216352163621637216382163921640216412164221643216442164521646216472164821649216502165121652216532165421655216562165721658216592166021661216622166321664216652166621667216682166921670216712167221673216742167521676216772167821679216802168121682216832168421685216862168721688216892169021691216922169321694216952169621697216982169921700217012170221703217042170521706217072170821709217102171121712217132171421715217162171721718217192172021721217222172321724217252172621727217282172921730217312173221733217342173521736217372173821739217402174121742217432174421745217462174721748217492175021751217522175321754217552175621757217582175921760217612176221763217642176521766217672176821769217702177121772217732177421775217762177721778217792178021781217822178321784217852178621787217882178921790217912179221793217942179521796217972179821799218002180121802218032180421805218062180721808218092181021811218122181321814218152181621817218182181921820218212182221823218242182521826218272182821829218302183121832218332183421835218362183721838218392184021841218422184321844218452184621847218482184921850218512185221853218542185521856218572185821859218602186121862218632186421865218662186721868218692187021871218722187321874218752187621877218782187921880218812188221883218842188521886218872188821889218902189121892218932189421895218962189721898218992190021901219022190321904219052190621907219082190921910219112191221913219142191521916219172191821919219202192121922219232192421925219262192721928219292193021931219322193321934219352193621937219382193921940219412194221943219442194521946219472194821949219502195121952219532195421955219562195721958219592196021961219622196321964219652196621967219682196921970219712197221973219742197521976219772197821979219802198121982219832198421985219862198721988219892199021991219922199321994219952199621997219982199922000220012200222003220042200522006220072200822009220102201122012220132201422015220162201722018220192202022021220222202322024220252202622027220282202922030220312203222033220342203522036220372203822039220402204122042220432204422045220462204722048220492205022051220522205322054220552205622057220582205922060220612206222063220642206522066220672206822069220702207122072220732207422075220762207722078220792208022081220822208322084220852208622087220882208922090220912209222093220942209522096220972209822099221002210122102221032210422105221062210722108221092211022111221122211322114221152211622117221182211922120221212212222123221242212522126221272212822129221302213122132221332213422135221362213722138221392214022141221422214322144221452214622147221482214922150221512215222153221542215522156221572215822159221602216122162221632216422165221662216722168221692217022171221722217322174221752217622177221782217922180221812218222183221842218522186221872218822189221902219122192221932219422195221962219722198221992220022201222022220322204222052220622207222082220922210222112221222213222142221522216222172221822219222202222122222222232222422225222262222722228222292223022231222322223322234222352223622237222382223922240222412224222243222442224522246222472224822249222502225122252222532225422255222562225722258222592226022261222622226322264222652226622267222682226922270222712227222273222742227522276222772227822279222802228122282222832228422285222862228722288222892229022291222922229322294222952229622297222982229922300223012230222303223042230522306223072230822309223102231122312223132231422315223162231722318223192232022321223222232322324223252232622327223282232922330223312233222333223342233522336223372233822339223402234122342223432234422345223462234722348223492235022351223522235322354223552235622357223582235922360223612236222363223642236522366223672236822369223702237122372223732237422375223762237722378223792238022381223822238322384223852238622387223882238922390223912239222393223942239522396223972239822399224002240122402224032240422405224062240722408224092241022411224122241322414224152241622417224182241922420224212242222423224242242522426224272242822429224302243122432224332243422435224362243722438224392244022441224422244322444224452244622447224482244922450224512245222453224542245522456224572245822459224602246122462224632246422465224662246722468224692247022471224722247322474224752247622477224782247922480224812248222483224842248522486224872248822489224902249122492224932249422495224962249722498224992250022501225022250322504225052250622507225082250922510225112251222513225142251522516225172251822519225202252122522225232252422525225262252722528225292253022531225322253322534225352253622537225382253922540225412254222543225442254522546225472254822549225502255122552225532255422555225562255722558225592256022561225622256322564225652256622567225682256922570225712257222573225742257522576225772257822579225802258122582225832258422585225862258722588225892259022591225922259322594225952259622597225982259922600226012260222603226042260522606226072260822609226102261122612226132261422615226162261722618226192262022621226222262322624226252262622627226282262922630226312263222633226342263522636226372263822639226402264122642226432264422645226462264722648226492265022651226522265322654226552265622657226582265922660226612266222663226642266522666226672266822669226702267122672226732267422675226762267722678226792268022681226822268322684226852268622687226882268922690226912269222693226942269522696226972269822699227002270122702227032270422705227062270722708227092271022711227122271322714227152271622717227182271922720227212272222723227242272522726227272272822729227302273122732227332273422735227362273722738227392274022741227422274322744227452274622747227482274922750227512275222753227542275522756227572275822759227602276122762227632276422765227662276722768227692277022771227722277322774227752277622777227782277922780227812278222783227842278522786227872278822789227902279122792227932279422795227962279722798227992280022801228022280322804228052280622807228082280922810228112281222813228142281522816228172281822819228202282122822228232282422825228262282722828228292283022831228322283322834228352283622837228382283922840228412284222843228442284522846228472284822849228502285122852228532285422855228562285722858228592286022861228622286322864228652286622867228682286922870228712287222873228742287522876228772287822879228802288122882228832288422885228862288722888228892289022891228922289322894228952289622897228982289922900229012290222903229042290522906229072290822909229102291122912229132291422915229162291722918229192292022921229222292322924229252292622927229282292922930229312293222933229342293522936229372293822939229402294122942229432294422945229462294722948229492295022951229522295322954229552295622957229582295922960229612296222963229642296522966229672296822969229702297122972229732297422975229762297722978229792298022981229822298322984229852298622987229882298922990229912299222993229942299522996229972299822999230002300123002230032300423005230062300723008230092301023011230122301323014230152301623017230182301923020230212302223023230242302523026230272302823029230302303123032230332303423035230362303723038230392304023041230422304323044230452304623047230482304923050230512305223053230542305523056230572305823059230602306123062230632306423065230662306723068230692307023071230722307323074230752307623077230782307923080230812308223083230842308523086230872308823089230902309123092230932309423095230962309723098230992310023101231022310323104231052310623107231082310923110231112311223113231142311523116231172311823119231202312123122231232312423125231262312723128231292313023131231322313323134231352313623137231382313923140231412314223143231442314523146231472314823149231502315123152231532315423155231562315723158231592316023161231622316323164231652316623167231682316923170231712317223173231742317523176231772317823179231802318123182231832318423185231862318723188231892319023191231922319323194231952319623197231982319923200232012320223203232042320523206232072320823209232102321123212232132321423215232162321723218232192322023221232222322323224232252322623227232282322923230232312323223233232342323523236232372323823239232402324123242232432324423245232462324723248232492325023251232522325323254232552325623257232582325923260232612326223263232642326523266232672326823269232702327123272232732327423275232762327723278232792328023281232822328323284232852328623287232882328923290232912329223293232942329523296232972329823299233002330123302233032330423305233062330723308233092331023311233122331323314233152331623317233182331923320233212332223323233242332523326233272332823329233302333123332233332333423335233362333723338233392334023341233422334323344233452334623347233482334923350233512335223353233542335523356233572335823359233602336123362233632336423365233662336723368233692337023371233722337323374233752337623377233782337923380233812338223383233842338523386233872338823389233902339123392233932339423395233962339723398233992340023401234022340323404234052340623407234082340923410234112341223413234142341523416234172341823419234202342123422234232342423425234262342723428234292343023431234322343323434234352343623437234382343923440234412344223443234442344523446234472344823449234502345123452234532345423455234562345723458234592346023461234622346323464234652346623467234682346923470234712347223473234742347523476234772347823479234802348123482234832348423485234862348723488234892349023491234922349323494234952349623497234982349923500235012350223503235042350523506235072350823509235102351123512235132351423515235162351723518235192352023521235222352323524235252352623527235282352923530235312353223533235342353523536235372353823539235402354123542235432354423545235462354723548235492355023551235522355323554235552355623557235582355923560235612356223563235642356523566235672356823569235702357123572235732357423575235762357723578235792358023581235822358323584235852358623587235882358923590235912359223593235942359523596235972359823599236002360123602236032360423605236062360723608236092361023611236122361323614236152361623617236182361923620236212362223623236242362523626236272362823629236302363123632236332363423635236362363723638236392364023641236422364323644236452364623647236482364923650236512365223653236542365523656236572365823659236602366123662236632366423665236662366723668236692367023671236722367323674236752367623677236782367923680236812368223683236842368523686236872368823689236902369123692236932369423695236962369723698236992370023701237022370323704237052370623707237082370923710237112371223713237142371523716237172371823719237202372123722237232372423725237262372723728237292373023731237322373323734237352373623737237382373923740237412374223743237442374523746237472374823749237502375123752237532375423755237562375723758237592376023761237622376323764237652376623767237682376923770237712377223773237742377523776237772377823779237802378123782237832378423785237862378723788237892379023791237922379323794237952379623797237982379923800238012380223803238042380523806238072380823809238102381123812238132381423815238162381723818238192382023821238222382323824238252382623827238282382923830238312383223833238342383523836238372383823839238402384123842238432384423845238462384723848238492385023851238522385323854238552385623857238582385923860238612386223863238642386523866238672386823869238702387123872238732387423875238762387723878238792388023881238822388323884238852388623887238882388923890238912389223893238942389523896238972389823899239002390123902239032390423905239062390723908239092391023911239122391323914239152391623917239182391923920239212392223923239242392523926239272392823929239302393123932239332393423935239362393723938239392394023941239422394323944239452394623947239482394923950239512395223953239542395523956239572395823959239602396123962239632396423965239662396723968239692397023971239722397323974239752397623977239782397923980239812398223983239842398523986239872398823989239902399123992239932399423995239962399723998239992400024001240022400324004240052400624007240082400924010240112401224013240142401524016240172401824019240202402124022240232402424025240262402724028240292403024031240322403324034240352403624037240382403924040240412404224043240442404524046240472404824049240502405124052240532405424055240562405724058240592406024061240622406324064240652406624067240682406924070240712407224073240742407524076240772407824079240802408124082240832408424085240862408724088240892409024091240922409324094240952409624097240982409924100241012410224103241042410524106241072410824109241102411124112241132411424115241162411724118241192412024121241222412324124241252412624127241282412924130241312413224133241342413524136241372413824139241402414124142241432414424145241462414724148241492415024151241522415324154241552415624157241582415924160241612416224163241642416524166241672416824169241702417124172241732417424175241762417724178241792418024181241822418324184241852418624187241882418924190241912419224193241942419524196241972419824199242002420124202242032420424205242062420724208242092421024211242122421324214242152421624217242182421924220242212422224223242242422524226242272422824229242302423124232242332423424235242362423724238242392424024241242422424324244242452424624247242482424924250242512425224253242542425524256242572425824259242602426124262242632426424265242662426724268242692427024271242722427324274242752427624277242782427924280242812428224283242842428524286242872428824289242902429124292242932429424295242962429724298242992430024301243022430324304243052430624307243082430924310243112431224313243142431524316243172431824319243202432124322243232432424325243262432724328243292433024331243322433324334243352433624337243382433924340243412434224343243442434524346243472434824349243502435124352243532435424355243562435724358243592436024361243622436324364243652436624367243682436924370243712437224373243742437524376243772437824379243802438124382243832438424385243862438724388243892439024391243922439324394243952439624397243982439924400244012440224403244042440524406244072440824409244102441124412244132441424415244162441724418244192442024421244222442324424244252442624427244282442924430244312443224433244342443524436244372443824439244402444124442244432444424445244462444724448244492445024451244522445324454244552445624457244582445924460244612446224463244642446524466244672446824469244702447124472244732447424475244762447724478244792448024481244822448324484244852448624487244882448924490244912449224493244942449524496244972449824499245002450124502245032450424505245062450724508245092451024511245122451324514245152451624517245182451924520245212452224523245242452524526245272452824529245302453124532245332453424535245362453724538245392454024541245422454324544245452454624547245482454924550245512455224553245542455524556245572455824559245602456124562245632456424565245662456724568245692457024571245722457324574245752457624577245782457924580245812458224583245842458524586245872458824589245902459124592245932459424595245962459724598245992460024601246022460324604246052460624607246082460924610246112461224613246142461524616246172461824619246202462124622246232462424625246262462724628246292463024631246322463324634246352463624637246382463924640246412464224643246442464524646246472464824649246502465124652246532465424655246562465724658246592466024661246622466324664246652466624667246682466924670246712467224673246742467524676246772467824679246802468124682246832468424685246862468724688246892469024691246922469324694246952469624697246982469924700247012470224703247042470524706247072470824709247102471124712247132471424715247162471724718247192472024721247222472324724247252472624727247282472924730247312473224733247342473524736247372473824739247402474124742247432474424745247462474724748247492475024751247522475324754247552475624757247582475924760247612476224763247642476524766247672476824769247702477124772247732477424775247762477724778247792478024781247822478324784247852478624787247882478924790247912479224793247942479524796247972479824799248002480124802248032480424805248062480724808248092481024811248122481324814248152481624817248182481924820248212482224823248242482524826248272482824829248302483124832248332483424835248362483724838248392484024841248422484324844248452484624847248482484924850248512485224853248542485524856248572485824859248602486124862248632486424865248662486724868248692487024871248722487324874248752487624877248782487924880248812488224883248842488524886248872488824889248902489124892248932489424895248962489724898248992490024901249022490324904249052490624907249082490924910249112491224913249142491524916249172491824919249202492124922249232492424925249262492724928249292493024931249322493324934249352493624937249382493924940249412494224943249442494524946249472494824949249502495124952249532495424955249562495724958249592496024961249622496324964249652496624967249682496924970249712497224973249742497524976249772497824979249802498124982249832498424985249862498724988249892499024991249922499324994249952499624997249982499925000250012500225003250042500525006250072500825009250102501125012250132501425015250162501725018250192502025021250222502325024250252502625027250282502925030250312503225033250342503525036250372503825039250402504125042250432504425045250462504725048250492505025051250522505325054250552505625057250582505925060250612506225063250642506525066250672506825069250702507125072250732507425075250762507725078250792508025081250822508325084250852508625087250882508925090250912509225093250942509525096250972509825099251002510125102251032510425105251062510725108251092511025111251122511325114251152511625117251182511925120251212512225123251242512525126251272512825129251302513125132251332513425135251362513725138251392514025141251422514325144251452514625147251482514925150251512515225153251542515525156251572515825159251602516125162251632516425165251662516725168251692517025171251722517325174251752517625177251782517925180251812518225183251842518525186251872518825189251902519125192251932519425195251962519725198251992520025201252022520325204252052520625207252082520925210252112521225213252142521525216252172521825219252202522125222252232522425225252262522725228252292523025231252322523325234252352523625237252382523925240252412524225243252442524525246252472524825249252502525125252252532525425255252562525725258252592526025261252622526325264252652526625267252682526925270252712527225273252742527525276252772527825279252802528125282252832528425285252862528725288252892529025291252922529325294252952529625297252982529925300253012530225303253042530525306253072530825309253102531125312253132531425315253162531725318253192532025321253222532325324253252532625327253282532925330253312533225333253342533525336253372533825339253402534125342253432534425345253462534725348253492535025351253522535325354253552535625357253582535925360253612536225363253642536525366253672536825369253702537125372253732537425375253762537725378253792538025381253822538325384253852538625387253882538925390253912539225393253942539525396253972539825399254002540125402254032540425405254062540725408254092541025411254122541325414254152541625417254182541925420254212542225423254242542525426254272542825429254302543125432254332543425435254362543725438254392544025441254422544325444254452544625447254482544925450254512545225453254542545525456254572545825459254602546125462254632546425465254662546725468254692547025471254722547325474254752547625477254782547925480254812548225483254842548525486254872548825489254902549125492254932549425495254962549725498254992550025501255022550325504255052550625507255082550925510255112551225513255142551525516255172551825519255202552125522255232552425525255262552725528255292553025531255322553325534255352553625537255382553925540255412554225543255442554525546255472554825549255502555125552255532555425555255562555725558255592556025561255622556325564255652556625567255682556925570255712557225573255742557525576255772557825579255802558125582255832558425585255862558725588255892559025591255922559325594255952559625597255982559925600256012560225603256042560525606256072560825609256102561125612256132561425615256162561725618256192562025621256222562325624256252562625627256282562925630256312563225633256342563525636256372563825639256402564125642256432564425645256462564725648256492565025651256522565325654256552565625657256582565925660256612566225663256642566525666256672566825669256702567125672256732567425675256762567725678256792568025681256822568325684256852568625687256882568925690256912569225693256942569525696256972569825699257002570125702257032570425705257062570725708257092571025711257122571325714257152571625717257182571925720257212572225723257242572525726257272572825729257302573125732257332573425735257362573725738257392574025741257422574325744257452574625747257482574925750257512575225753257542575525756257572575825759257602576125762257632576425765257662576725768257692577025771257722577325774257752577625777257782577925780257812578225783257842578525786257872578825789257902579125792257932579425795257962579725798257992580025801258022580325804258052580625807258082580925810258112581225813258142581525816258172581825819258202582125822258232582425825258262582725828258292583025831258322583325834258352583625837258382583925840258412584225843258442584525846258472584825849258502585125852258532585425855258562585725858258592586025861258622586325864258652586625867258682586925870258712587225873258742587525876258772587825879258802588125882258832588425885258862588725888258892589025891258922589325894258952589625897258982589925900259012590225903259042590525906259072590825909259102591125912259132591425915259162591725918259192592025921259222592325924259252592625927259282592925930259312593225933259342593525936259372593825939259402594125942259432594425945259462594725948259492595025951259522595325954259552595625957259582595925960259612596225963259642596525966259672596825969259702597125972259732597425975259762597725978259792598025981259822598325984259852598625987259882598925990259912599225993259942599525996259972599825999260002600126002260032600426005260062600726008260092601026011260122601326014260152601626017260182601926020260212602226023260242602526026260272602826029260302603126032260332603426035260362603726038260392604026041260422604326044260452604626047260482604926050260512605226053260542605526056260572605826059260602606126062260632606426065260662606726068260692607026071260722607326074260752607626077260782607926080260812608226083260842608526086260872608826089260902609126092260932609426095260962609726098260992610026101261022610326104261052610626107261082610926110261112611226113261142611526116261172611826119261202612126122261232612426125261262612726128261292613026131261322613326134261352613626137261382613926140261412614226143261442614526146261472614826149261502615126152261532615426155261562615726158261592616026161261622616326164261652616626167261682616926170261712617226173261742617526176261772617826179261802618126182261832618426185261862618726188261892619026191261922619326194261952619626197261982619926200262012620226203262042620526206262072620826209262102621126212262132621426215262162621726218262192622026221262222622326224262252622626227262282622926230262312623226233262342623526236262372623826239262402624126242262432624426245262462624726248262492625026251262522625326254262552625626257262582625926260262612626226263262642626526266262672626826269262702627126272262732627426275262762627726278262792628026281262822628326284262852628626287262882628926290262912629226293262942629526296262972629826299263002630126302263032630426305263062630726308263092631026311263122631326314263152631626317263182631926320263212632226323263242632526326263272632826329263302633126332263332633426335263362633726338263392634026341263422634326344263452634626347263482634926350263512635226353263542635526356263572635826359263602636126362263632636426365263662636726368263692637026371263722637326374263752637626377263782637926380263812638226383263842638526386263872638826389263902639126392263932639426395263962639726398263992640026401264022640326404264052640626407264082640926410264112641226413264142641526416264172641826419264202642126422264232642426425264262642726428264292643026431264322643326434264352643626437264382643926440264412644226443264442644526446264472644826449264502645126452264532645426455264562645726458264592646026461264622646326464264652646626467264682646926470264712647226473264742647526476264772647826479264802648126482264832648426485264862648726488264892649026491264922649326494264952649626497264982649926500265012650226503265042650526506265072650826509265102651126512265132651426515265162651726518265192652026521265222652326524265252652626527265282652926530265312653226533265342653526536265372653826539265402654126542265432654426545265462654726548265492655026551265522655326554265552655626557265582655926560265612656226563265642656526566265672656826569265702657126572265732657426575265762657726578265792658026581265822658326584265852658626587265882658926590265912659226593265942659526596265972659826599266002660126602266032660426605266062660726608266092661026611266122661326614266152661626617266182661926620266212662226623266242662526626266272662826629266302663126632266332663426635266362663726638266392664026641266422664326644266452664626647266482664926650266512665226653266542665526656266572665826659266602666126662266632666426665266662666726668266692667026671266722667326674266752667626677266782667926680266812668226683266842668526686266872668826689266902669126692266932669426695266962669726698266992670026701267022670326704267052670626707267082670926710267112671226713267142671526716267172671826719267202672126722267232672426725267262672726728267292673026731267322673326734267352673626737267382673926740267412674226743267442674526746267472674826749267502675126752267532675426755267562675726758267592676026761267622676326764267652676626767267682676926770267712677226773267742677526776267772677826779267802678126782267832678426785267862678726788267892679026791267922679326794267952679626797267982679926800268012680226803268042680526806268072680826809268102681126812268132681426815268162681726818268192682026821268222682326824268252682626827268282682926830268312683226833268342683526836268372683826839268402684126842268432684426845268462684726848268492685026851268522685326854268552685626857268582685926860268612686226863268642686526866268672686826869268702687126872268732687426875268762687726878268792688026881268822688326884268852688626887268882688926890268912689226893268942689526896268972689826899269002690126902269032690426905269062690726908269092691026911269122691326914269152691626917269182691926920269212692226923269242692526926269272692826929269302693126932269332693426935269362693726938269392694026941269422694326944269452694626947269482694926950269512695226953269542695526956269572695826959269602696126962269632696426965269662696726968269692697026971269722697326974269752697626977269782697926980269812698226983269842698526986269872698826989269902699126992269932699426995269962699726998269992700027001270022700327004270052700627007270082700927010270112701227013270142701527016270172701827019270202702127022270232702427025270262702727028270292703027031270322703327034270352703627037270382703927040270412704227043270442704527046270472704827049270502705127052270532705427055270562705727058270592706027061270622706327064270652706627067270682706927070270712707227073270742707527076270772707827079270802708127082270832708427085270862708727088270892709027091270922709327094270952709627097270982709927100271012710227103271042710527106271072710827109271102711127112271132711427115271162711727118271192712027121271222712327124271252712627127271282712927130271312713227133271342713527136271372713827139271402714127142271432714427145271462714727148271492715027151271522715327154271552715627157271582715927160271612716227163271642716527166271672716827169271702717127172271732717427175271762717727178271792718027181271822718327184271852718627187271882718927190271912719227193271942719527196271972719827199272002720127202272032720427205272062720727208272092721027211272122721327214272152721627217272182721927220272212722227223272242722527226272272722827229272302723127232272332723427235272362723727238272392724027241272422724327244272452724627247272482724927250272512725227253272542725527256272572725827259272602726127262272632726427265272662726727268272692727027271272722727327274272752727627277272782727927280272812728227283272842728527286272872728827289272902729127292272932729427295272962729727298272992730027301273022730327304273052730627307273082730927310273112731227313273142731527316273172731827319273202732127322273232732427325273262732727328273292733027331273322733327334273352733627337273382733927340273412734227343273442734527346273472734827349273502735127352273532735427355273562735727358273592736027361273622736327364273652736627367273682736927370273712737227373273742737527376273772737827379273802738127382273832738427385273862738727388273892739027391273922739327394273952739627397273982739927400274012740227403274042740527406274072740827409274102741127412274132741427415274162741727418274192742027421274222742327424274252742627427274282742927430274312743227433274342743527436274372743827439274402744127442274432744427445274462744727448274492745027451274522745327454274552745627457274582745927460274612746227463274642746527466274672746827469274702747127472274732747427475274762747727478274792748027481274822748327484274852748627487274882748927490274912749227493274942749527496274972749827499275002750127502275032750427505275062750727508275092751027511275122751327514275152751627517275182751927520275212752227523275242752527526275272752827529275302753127532275332753427535275362753727538275392754027541275422754327544275452754627547275482754927550275512755227553275542755527556275572755827559275602756127562275632756427565275662756727568275692757027571275722757327574275752757627577275782757927580275812758227583275842758527586275872758827589275902759127592275932759427595275962759727598275992760027601276022760327604276052760627607276082760927610276112761227613276142761527616276172761827619276202762127622276232762427625276262762727628276292763027631276322763327634276352763627637276382763927640276412764227643276442764527646276472764827649276502765127652276532765427655276562765727658276592766027661276622766327664276652766627667276682766927670276712767227673276742767527676276772767827679276802768127682276832768427685276862768727688276892769027691276922769327694276952769627697276982769927700277012770227703277042770527706277072770827709277102771127712277132771427715277162771727718 |
- diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
- index 3a3b30ac2a75..9e0745cafbd8 100644
- --- a/Documentation/sysrq.txt
- +++ b/Documentation/sysrq.txt
- @@ -59,10 +59,17 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - <command key>,
- On other - If you know of the key combos for other architectures, please
- let me know so I can add them to this section.
-
- -On all - write a character to /proc/sysrq-trigger. e.g.:
- -
- +On all - write a character to /proc/sysrq-trigger, e.g.:
- echo t > /proc/sysrq-trigger
-
- +On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
- + echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
- + Send an ICMP echo request with this pattern plus the particular
- + SysRq command key. Example:
- + # ping -c1 -s57 -p0102030468
- + will trigger the SysRq-H (help) command.
- +
- +
- * What are the 'command' keys?
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- 'b' - Will immediately reboot the system without syncing or unmounting
- diff --git a/Documentation/trace/histograms.txt b/Documentation/trace/histograms.txt
- new file mode 100644
- index 000000000000..6f2aeabf7faa
- --- /dev/null
- +++ b/Documentation/trace/histograms.txt
- @@ -0,0 +1,186 @@
- + Using the Linux Kernel Latency Histograms
- +
- +
- +This document gives a short explanation how to enable, configure and use
- +latency histograms. Latency histograms are primarily relevant in the
- +context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
- +and are used in the quality management of the Linux real-time
- +capabilities.
- +
- +
- +* Purpose of latency histograms
- +
- +A latency histogram continuously accumulates the frequencies of latency
- +data. There are two types of histograms
- +- potential sources of latencies
- +- effective latencies
- +
- +
- +* Potential sources of latencies
- +
- +Potential sources of latencies are code segments where interrupts,
- +preemption or both are disabled (aka critical sections). To create
- +histograms of potential sources of latency, the kernel stores the time
- +stamp at the start of a critical section, determines the time elapsed
- +when the end of the section is reached, and increments the frequency
- +counter of that latency value - irrespective of whether any concurrently
- +running process is affected by latency or not.
- +- Configuration items (in the Kernel hacking/Tracers submenu)
- + CONFIG_INTERRUPT_OFF_LATENCY
- + CONFIG_PREEMPT_OFF_LATENCY
- +
- +
- +* Effective latencies
- +
- +Effective latencies are actually occuring during wakeup of a process. To
- +determine effective latencies, the kernel stores the time stamp when a
- +process is scheduled to be woken up, and determines the duration of the
- +wakeup time shortly before control is passed over to this process. Note
- +that the apparent latency in user space may be somewhat longer, since the
- +process may be interrupted after control is passed over to it but before
- +the execution in user space takes place. Simply measuring the interval
- +between enqueuing and wakeup may also not appropriate in cases when a
- +process is scheduled as a result of a timer expiration. The timer may have
- +missed its deadline, e.g. due to disabled interrupts, but this latency
- +would not be registered. Therefore, the offsets of missed timers are
- +recorded in a separate histogram. If both wakeup latency and missed timer
- +offsets are configured and enabled, a third histogram may be enabled that
- +records the overall latency as a sum of the timer latency, if any, and the
- +wakeup latency. This histogram is called "timerandwakeup".
- +- Configuration items (in the Kernel hacking/Tracers submenu)
- + CONFIG_WAKEUP_LATENCY
- + CONFIG_MISSED_TIMER_OFSETS
- +
- +
- +* Usage
- +
- +The interface to the administration of the latency histograms is located
- +in the debugfs file system. To mount it, either enter
- +
- +mount -t sysfs nodev /sys
- +mount -t debugfs nodev /sys/kernel/debug
- +
- +from shell command line level, or add
- +
- +nodev /sys sysfs defaults 0 0
- +nodev /sys/kernel/debug debugfs defaults 0 0
- +
- +to the file /etc/fstab. All latency histogram related files are then
- +available in the directory /sys/kernel/debug/tracing/latency_hist. A
- +particular histogram type is enabled by writing non-zero to the related
- +variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
- +Select "preemptirqsoff" for the histograms of potential sources of
- +latencies and "wakeup" for histograms of effective latencies etc. The
- +histogram data - one per CPU - are available in the files
- +
- +/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
- +/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
- +/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
- +/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
- +/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
- +/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
- +/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
- +
- +The histograms are reset by writing non-zero to the file "reset" in a
- +particular latency directory. To reset all latency data, use
- +
- +#!/bin/sh
- +
- +TRACINGDIR=/sys/kernel/debug/tracing
- +HISTDIR=$TRACINGDIR/latency_hist
- +
- +if test -d $HISTDIR
- +then
- + cd $HISTDIR
- + for i in `find . | grep /reset$`
- + do
- + echo 1 >$i
- + done
- +fi
- +
- +
- +* Data format
- +
- +Latency data are stored with a resolution of one microsecond. The
- +maximum latency is 10,240 microseconds. The data are only valid, if the
- +overflow register is empty. Every output line contains the latency in
- +microseconds in the first row and the number of samples in the second
- +row. To display only lines with a positive latency count, use, for
- +example,
- +
- +grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
- +
- +#Minimum latency: 0 microseconds.
- +#Average latency: 0 microseconds.
- +#Maximum latency: 25 microseconds.
- +#Total samples: 3104770694
- +#There are 0 samples greater or equal than 10240 microseconds
- +#usecs samples
- + 0 2984486876
- + 1 49843506
- + 2 58219047
- + 3 5348126
- + 4 2187960
- + 5 3388262
- + 6 959289
- + 7 208294
- + 8 40420
- + 9 4485
- + 10 14918
- + 11 18340
- + 12 25052
- + 13 19455
- + 14 5602
- + 15 969
- + 16 47
- + 17 18
- + 18 14
- + 19 1
- + 20 3
- + 21 2
- + 22 5
- + 23 2
- + 25 1
- +
- +
- +* Wakeup latency of a selected process
- +
- +To only collect wakeup latency data of a particular process, write the
- +PID of the requested process to
- +
- +/sys/kernel/debug/tracing/latency_hist/wakeup/pid
- +
- +PIDs are not considered, if this variable is set to 0.
- +
- +
- +* Details of the process with the highest wakeup latency so far
- +
- +Selected data of the process that suffered from the highest wakeup
- +latency that occurred in a particular CPU are available in the file
- +
- +/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
- +
- +In addition, other relevant system data at the time when the
- +latency occurred are given.
- +
- +The format of the data is (all in one line):
- +<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
- +<- <PID> <Priority> <Command> <Timestamp>
- +
- +The value of <Timeroffset> is only relevant in the combined timer
- +and wakeup latency recording. In the wakeup recording, it is
- +always 0, in the missed_timer_offsets recording, it is the same
- +as <Latency>.
- +
- +When retrospectively searching for the origin of a latency and
- +tracing was not enabled, it may be helpful to know the name and
- +some basic data of the task that (finally) was switching to the
- +late real-tlme task. In addition to the victim's data, also the
- +data of the possible culprit are therefore displayed after the
- +"<-" symbol.
- +
- +Finally, the timestamp of the time when the latency occurred
- +in <seconds>.<microseconds> after the most recent system boot
- +is provided.
- +
- +These data are also reset when the wakeup histogram is reset.
- diff --git a/MAINTAINERS b/MAINTAINERS
- index 63cefa62324c..be0ea1e5c4cc 100644
- --- a/MAINTAINERS
- +++ b/MAINTAINERS
- @@ -5196,6 +5196,23 @@ F: fs/fuse/
- F: include/uapi/linux/fuse.h
- F: Documentation/filesystems/fuse.txt
-
- +FUTEX SUBSYSTEM
- +M: Thomas Gleixner <tglx@linutronix.de>
- +M: Ingo Molnar <mingo@redhat.com>
- +R: Peter Zijlstra <peterz@infradead.org>
- +R: Darren Hart <dvhart@infradead.org>
- +L: linux-kernel@vger.kernel.org
- +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
- +S: Maintained
- +F: kernel/futex.c
- +F: kernel/futex_compat.c
- +F: include/asm-generic/futex.h
- +F: include/linux/futex.h
- +F: include/uapi/linux/futex.h
- +F: tools/testing/selftests/futex/
- +F: tools/perf/bench/futex*
- +F: Documentation/*futex*
- +
- FUTURE DOMAIN TMC-16x0 SCSI DRIVER (16-bit)
- M: Rik Faith <faith@cs.unc.edu>
- L: linux-scsi@vger.kernel.org
- diff --git a/arch/Kconfig b/arch/Kconfig
- index 659bdd079277..099fc0f5155e 100644
- --- a/arch/Kconfig
- +++ b/arch/Kconfig
- @@ -9,6 +9,7 @@ config OPROFILE
- tristate "OProfile system profiling"
- depends on PROFILING
- depends on HAVE_OPROFILE
- + depends on !PREEMPT_RT_FULL
- select RING_BUFFER
- select RING_BUFFER_ALLOW_SWAP
- help
- @@ -52,6 +53,7 @@ config KPROBES
- config JUMP_LABEL
- bool "Optimize very unlikely/likely branches"
- depends on HAVE_ARCH_JUMP_LABEL
- + depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST)
- help
- This option enables a transparent branch optimization that
- makes certain almost-always-true or almost-always-false branch
- diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
- index b5d529fdffab..5715844e83e3 100644
- --- a/arch/arm/Kconfig
- +++ b/arch/arm/Kconfig
- @@ -36,7 +36,7 @@ config ARM
- select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
- select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
- select HAVE_ARCH_HARDENED_USERCOPY
- - select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
- + select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE
- select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
- select HAVE_ARCH_MMAP_RND_BITS if MMU
- select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
- @@ -75,6 +75,7 @@ config ARM
- select HAVE_PERF_EVENTS
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
- + select HAVE_PREEMPT_LAZY
- select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_SYSCALL_TRACEPOINTS
- diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
- index e53638c8ed8a..6095a1649865 100644
- --- a/arch/arm/include/asm/irq.h
- +++ b/arch/arm/include/asm/irq.h
- @@ -22,6 +22,8 @@
- #endif
-
- #ifndef __ASSEMBLY__
- +#include <linux/cpumask.h>
- +
- struct irqaction;
- struct pt_regs;
- extern void migrate_irqs(void);
- diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
- index 12ebfcc1d539..c962084605bc 100644
- --- a/arch/arm/include/asm/switch_to.h
- +++ b/arch/arm/include/asm/switch_to.h
- @@ -3,6 +3,13 @@
-
- #include <linux/thread_info.h>
-
- +#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
- +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
- +#else
- +static inline void
- +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
- +#endif
- +
- /*
- * For v7 SMP cores running a preemptible kernel we may be pre-empted
- * during a TLB maintenance operation, so execute an inner-shareable dsb
- @@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
- #define switch_to(prev,next,last) \
- do { \
- __complete_pending_tlbi(); \
- + switch_kmaps(prev, next); \
- last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
- } while (0)
-
- diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
- index 776757d1604a..1f36a4eccc72 100644
- --- a/arch/arm/include/asm/thread_info.h
- +++ b/arch/arm/include/asm/thread_info.h
- @@ -49,6 +49,7 @@ struct cpu_context_save {
- struct thread_info {
- unsigned long flags; /* low level flags */
- int preempt_count; /* 0 => preemptable, <0 => bug */
- + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
- mm_segment_t addr_limit; /* address limit */
- struct task_struct *task; /* main task structure */
- __u32 cpu; /* cpu */
- @@ -142,7 +143,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
- #define TIF_SYSCALL_TRACE 4 /* syscall trace active */
- #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
- #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
- -#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
- +#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
- +#define TIF_NEED_RESCHED_LAZY 7
-
- #define TIF_NOHZ 12 /* in adaptive nohz mode */
- #define TIF_USING_IWMMXT 17
- @@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
- #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
- #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
- #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
- +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_UPROBE (1 << TIF_UPROBE)
- #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
- #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
- @@ -167,7 +170,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
- * Change these and you break ASM code in entry-common.S
- */
- #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- - _TIF_NOTIFY_RESUME | _TIF_UPROBE)
- + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- + _TIF_NEED_RESCHED_LAZY)
-
- #endif /* __KERNEL__ */
- #endif /* __ASM_ARM_THREAD_INFO_H */
- diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
- index 608008229c7d..3866da3f7bb7 100644
- --- a/arch/arm/kernel/asm-offsets.c
- +++ b/arch/arm/kernel/asm-offsets.c
- @@ -65,6 +65,7 @@ int main(void)
- BLANK();
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
- DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
- diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
- index 9f157e7c51e7..468e224d76aa 100644
- --- a/arch/arm/kernel/entry-armv.S
- +++ b/arch/arm/kernel/entry-armv.S
- @@ -220,11 +220,18 @@ __irq_svc:
-
- #ifdef CONFIG_PREEMPT
- ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
- - ldr r0, [tsk, #TI_FLAGS] @ get flags
- teq r8, #0 @ if preempt count != 0
- + bne 1f @ return from exeption
- + ldr r0, [tsk, #TI_FLAGS] @ get flags
- + tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
- + blne svc_preempt @ preempt!
- +
- + ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
- + teq r8, #0 @ if preempt lazy count != 0
- movne r0, #0 @ force flags to 0
- - tst r0, #_TIF_NEED_RESCHED
- + tst r0, #_TIF_NEED_RESCHED_LAZY
- blne svc_preempt
- +1:
- #endif
-
- svc_exit r5, irq = 1 @ return from exception
- @@ -239,8 +246,14 @@ svc_preempt:
- 1: bl preempt_schedule_irq @ irq en/disable is done inside
- ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
- tst r0, #_TIF_NEED_RESCHED
- + bne 1b
- + tst r0, #_TIF_NEED_RESCHED_LAZY
- reteq r8 @ go again
- - b 1b
- + ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
- + teq r0, #0 @ if preempt lazy count != 0
- + beq 1b
- + ret r8 @ go again
- +
- #endif
-
- __und_fault:
- diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
- index 10c3283d6c19..8872937862cc 100644
- --- a/arch/arm/kernel/entry-common.S
- +++ b/arch/arm/kernel/entry-common.S
- @@ -36,7 +36,9 @@ ret_fast_syscall:
- UNWIND(.cantunwind )
- disable_irq_notrace @ disable interrupts
- ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
- + tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
- + bne fast_work_pending
- + tst r1, #_TIF_SECCOMP
- bne fast_work_pending
-
- /* perform architecture specific actions before user return */
- @@ -62,8 +64,11 @@ ret_fast_syscall:
- str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
- disable_irq_notrace @ disable interrupts
- ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
- + tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
- + bne do_slower_path
- + tst r1, #_TIF_SECCOMP
- beq no_work_pending
- +do_slower_path:
- UNWIND(.fnend )
- ENDPROC(ret_fast_syscall)
-
- diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
- index 69bda1a5707e..1f665acaa6a9 100644
- --- a/arch/arm/kernel/patch.c
- +++ b/arch/arm/kernel/patch.c
- @@ -15,7 +15,7 @@ struct patch {
- unsigned int insn;
- };
-
- -static DEFINE_SPINLOCK(patch_lock);
- +static DEFINE_RAW_SPINLOCK(patch_lock);
-
- static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
- __acquires(&patch_lock)
- @@ -32,7 +32,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
- return addr;
-
- if (flags)
- - spin_lock_irqsave(&patch_lock, *flags);
- + raw_spin_lock_irqsave(&patch_lock, *flags);
- else
- __acquire(&patch_lock);
-
- @@ -47,7 +47,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
- clear_fixmap(fixmap);
-
- if (flags)
- - spin_unlock_irqrestore(&patch_lock, *flags);
- + raw_spin_unlock_irqrestore(&patch_lock, *flags);
- else
- __release(&patch_lock);
- }
- diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
- index 91d2d5b01414..750550098b59 100644
- --- a/arch/arm/kernel/process.c
- +++ b/arch/arm/kernel/process.c
- @@ -322,6 +322,30 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
- }
-
- #ifdef CONFIG_MMU
- +/*
- + * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
- + * initialized by pgtable_page_ctor() then a coredump of the vector page will
- + * fail.
- + */
- +static int __init vectors_user_mapping_init_page(void)
- +{
- + struct page *page;
- + unsigned long addr = 0xffff0000;
- + pgd_t *pgd;
- + pud_t *pud;
- + pmd_t *pmd;
- +
- + pgd = pgd_offset_k(addr);
- + pud = pud_offset(pgd, addr);
- + pmd = pmd_offset(pud, addr);
- + page = pmd_page(*(pmd));
- +
- + pgtable_page_ctor(page);
- +
- + return 0;
- +}
- +late_initcall(vectors_user_mapping_init_page);
- +
- #ifdef CONFIG_KUSER_HELPERS
- /*
- * The vectors page is always readable from user space for the
- diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
- index 7b8f2141427b..96541e00b74a 100644
- --- a/arch/arm/kernel/signal.c
- +++ b/arch/arm/kernel/signal.c
- @@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
- */
- trace_hardirqs_off();
- do {
- - if (likely(thread_flags & _TIF_NEED_RESCHED)) {
- + if (likely(thread_flags & (_TIF_NEED_RESCHED |
- + _TIF_NEED_RESCHED_LAZY))) {
- schedule();
- } else {
- if (unlikely(!user_mode(regs)))
- diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
- index 7dd14e8395e6..4cd7e3d98035 100644
- --- a/arch/arm/kernel/smp.c
- +++ b/arch/arm/kernel/smp.c
- @@ -234,8 +234,6 @@ int __cpu_disable(void)
- flush_cache_louis();
- local_flush_tlb_all();
-
- - clear_tasks_mm_cpumask(cpu);
- -
- return 0;
- }
-
- @@ -251,6 +249,9 @@ void __cpu_die(unsigned int cpu)
- pr_err("CPU%u: cpu didn't die\n", cpu);
- return;
- }
- +
- + clear_tasks_mm_cpumask(cpu);
- +
- pr_notice("CPU%u: shutdown\n", cpu);
-
- /*
- diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
- index 0bee233fef9a..314cfb232a63 100644
- --- a/arch/arm/kernel/unwind.c
- +++ b/arch/arm/kernel/unwind.c
- @@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
- static const struct unwind_idx *__origin_unwind_idx;
- extern const struct unwind_idx __stop_unwind_idx[];
-
- -static DEFINE_SPINLOCK(unwind_lock);
- +static DEFINE_RAW_SPINLOCK(unwind_lock);
- static LIST_HEAD(unwind_tables);
-
- /* Convert a prel31 symbol to an absolute address */
- @@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
- /* module unwind tables */
- struct unwind_table *table;
-
- - spin_lock_irqsave(&unwind_lock, flags);
- + raw_spin_lock_irqsave(&unwind_lock, flags);
- list_for_each_entry(table, &unwind_tables, list) {
- if (addr >= table->begin_addr &&
- addr < table->end_addr) {
- @@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
- break;
- }
- }
- - spin_unlock_irqrestore(&unwind_lock, flags);
- + raw_spin_unlock_irqrestore(&unwind_lock, flags);
- }
-
- pr_debug("%s: idx = %p\n", __func__, idx);
- @@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
- tab->begin_addr = text_addr;
- tab->end_addr = text_addr + text_size;
-
- - spin_lock_irqsave(&unwind_lock, flags);
- + raw_spin_lock_irqsave(&unwind_lock, flags);
- list_add_tail(&tab->list, &unwind_tables);
- - spin_unlock_irqrestore(&unwind_lock, flags);
- + raw_spin_unlock_irqrestore(&unwind_lock, flags);
-
- return tab;
- }
- @@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
- if (!tab)
- return;
-
- - spin_lock_irqsave(&unwind_lock, flags);
- + raw_spin_lock_irqsave(&unwind_lock, flags);
- list_del(&tab->list);
- - spin_unlock_irqrestore(&unwind_lock, flags);
- + raw_spin_unlock_irqrestore(&unwind_lock, flags);
-
- kfree(tab);
- }
- diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
- index 19b5f5c1c0ff..82aa639e6737 100644
- --- a/arch/arm/kvm/arm.c
- +++ b/arch/arm/kvm/arm.c
- @@ -619,7 +619,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
- * involves poking the GIC, which must be done in a
- * non-preemptible context.
- */
- - preempt_disable();
- + migrate_disable();
- kvm_pmu_flush_hwstate(vcpu);
- kvm_timer_flush_hwstate(vcpu);
- kvm_vgic_flush_hwstate(vcpu);
- @@ -640,7 +640,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
- kvm_pmu_sync_hwstate(vcpu);
- kvm_timer_sync_hwstate(vcpu);
- kvm_vgic_sync_hwstate(vcpu);
- - preempt_enable();
- + migrate_enable();
- continue;
- }
-
- @@ -696,7 +696,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
-
- kvm_vgic_sync_hwstate(vcpu);
-
- - preempt_enable();
- + migrate_enable();
-
- ret = handle_exit(vcpu, run, ret);
- }
- diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
- index 98ffe1e62ad5..df9769ddece5 100644
- --- a/arch/arm/mach-exynos/platsmp.c
- +++ b/arch/arm/mach-exynos/platsmp.c
- @@ -229,7 +229,7 @@ static void __iomem *scu_base_addr(void)
- return (void __iomem *)(S5P_VA_SCU);
- }
-
- -static DEFINE_SPINLOCK(boot_lock);
- +static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void exynos_secondary_init(unsigned int cpu)
- {
- @@ -242,8 +242,8 @@ static void exynos_secondary_init(unsigned int cpu)
- /*
- * Synchronise with the boot thread.
- */
- - spin_lock(&boot_lock);
- - spin_unlock(&boot_lock);
- + raw_spin_lock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
- }
-
- int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
- @@ -307,7 +307,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- - spin_lock(&boot_lock);
- + raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
- @@ -334,7 +334,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
-
- if (timeout == 0) {
- printk(KERN_ERR "cpu1 power enable failed");
- - spin_unlock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
- return -ETIMEDOUT;
- }
- }
- @@ -380,7 +380,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
- * calibrations, then wait for it to finish
- */
- fail:
- - spin_unlock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? ret : 0;
- }
- diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c
- index 4b653a8cb75c..b03d5a922cb1 100644
- --- a/arch/arm/mach-hisi/platmcpm.c
- +++ b/arch/arm/mach-hisi/platmcpm.c
- @@ -61,7 +61,7 @@
-
- static void __iomem *sysctrl, *fabric;
- static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
- -static DEFINE_SPINLOCK(boot_lock);
- +static DEFINE_RAW_SPINLOCK(boot_lock);
- static u32 fabric_phys_addr;
- /*
- * [0]: bootwrapper physical address
- @@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
- if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
- return -EINVAL;
-
- - spin_lock_irq(&boot_lock);
- + raw_spin_lock_irq(&boot_lock);
-
- if (hip04_cpu_table[cluster][cpu])
- goto out;
- @@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
-
- out:
- hip04_cpu_table[cluster][cpu]++;
- - spin_unlock_irq(&boot_lock);
- + raw_spin_unlock_irq(&boot_lock);
-
- return 0;
- }
- @@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l_cpu)
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-
- - spin_lock(&boot_lock);
- + raw_spin_lock(&boot_lock);
- hip04_cpu_table[cluster][cpu]--;
- if (hip04_cpu_table[cluster][cpu] == 1) {
- /* A power_up request went ahead of us. */
- - spin_unlock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
- return;
- } else if (hip04_cpu_table[cluster][cpu] > 1) {
- pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
- @@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l_cpu)
- }
-
- last_man = hip04_cluster_is_down(cluster);
- - spin_unlock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
- if (last_man) {
- /* Since it's Cortex A15, disable L2 prefetching. */
- asm volatile(
- @@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l_cpu)
- cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
-
- count = TIMEOUT_MSEC / POLL_MSEC;
- - spin_lock_irq(&boot_lock);
- + raw_spin_lock_irq(&boot_lock);
- for (tries = 0; tries < count; tries++) {
- if (hip04_cpu_table[cluster][cpu])
- goto err;
- @@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
- data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
- if (data & CORE_WFI_STATUS(cpu))
- break;
- - spin_unlock_irq(&boot_lock);
- + raw_spin_unlock_irq(&boot_lock);
- /* Wait for clean L2 when the whole cluster is down. */
- msleep(POLL_MSEC);
- - spin_lock_irq(&boot_lock);
- + raw_spin_lock_irq(&boot_lock);
- }
- if (tries >= count)
- goto err;
- @@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l_cpu)
- goto err;
- if (hip04_cluster_is_down(cluster))
- hip04_set_snoop_filter(cluster, 0);
- - spin_unlock_irq(&boot_lock);
- + raw_spin_unlock_irq(&boot_lock);
- return 1;
- err:
- - spin_unlock_irq(&boot_lock);
- + raw_spin_unlock_irq(&boot_lock);
- return 0;
- }
- #endif
- diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
- index b4de3da6dffa..b52893319d75 100644
- --- a/arch/arm/mach-omap2/omap-smp.c
- +++ b/arch/arm/mach-omap2/omap-smp.c
- @@ -64,7 +64,7 @@ static const struct omap_smp_config omap5_cfg __initconst = {
- .startup_addr = omap5_secondary_startup,
- };
-
- -static DEFINE_SPINLOCK(boot_lock);
- +static DEFINE_RAW_SPINLOCK(boot_lock);
-
- void __iomem *omap4_get_scu_base(void)
- {
- @@ -131,8 +131,8 @@ static void omap4_secondary_init(unsigned int cpu)
- /*
- * Synchronise with the boot thread.
- */
- - spin_lock(&boot_lock);
- - spin_unlock(&boot_lock);
- + raw_spin_lock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
- }
-
- static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
- @@ -146,7 +146,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- - spin_lock(&boot_lock);
- + raw_spin_lock(&boot_lock);
-
- /*
- * Update the AuxCoreBoot0 with boot state for secondary core.
- @@ -223,7 +223,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- - spin_unlock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
-
- return 0;
- }
- diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
- index 0875b99add18..18b6d98d2581 100644
- --- a/arch/arm/mach-prima2/platsmp.c
- +++ b/arch/arm/mach-prima2/platsmp.c
- @@ -22,7 +22,7 @@
-
- static void __iomem *clk_base;
-
- -static DEFINE_SPINLOCK(boot_lock);
- +static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void sirfsoc_secondary_init(unsigned int cpu)
- {
- @@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu)
- /*
- * Synchronise with the boot thread.
- */
- - spin_lock(&boot_lock);
- - spin_unlock(&boot_lock);
- + raw_spin_lock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
- }
-
- static const struct of_device_id clk_ids[] = {
- @@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
- /* make sure write buffer is drained */
- mb();
-
- - spin_lock(&boot_lock);
- + raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
- @@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- - spin_unlock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
- diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
- index 5494c9e0c909..e8ce157d3548 100644
- --- a/arch/arm/mach-qcom/platsmp.c
- +++ b/arch/arm/mach-qcom/platsmp.c
- @@ -46,7 +46,7 @@
-
- extern void secondary_startup_arm(void);
-
- -static DEFINE_SPINLOCK(boot_lock);
- +static DEFINE_RAW_SPINLOCK(boot_lock);
-
- #ifdef CONFIG_HOTPLUG_CPU
- static void qcom_cpu_die(unsigned int cpu)
- @@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu)
- /*
- * Synchronise with the boot thread.
- */
- - spin_lock(&boot_lock);
- - spin_unlock(&boot_lock);
- + raw_spin_lock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
- }
-
- static int scss_release_secondary(unsigned int cpu)
- @@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
- * set synchronisation state between this boot processor
- * and the secondary one
- */
- - spin_lock(&boot_lock);
- + raw_spin_lock(&boot_lock);
-
- /*
- * Send the secondary CPU a soft interrupt, thereby causing
- @@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- - spin_unlock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
-
- return ret;
- }
- diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
- index 8d1e2d551786..7fa56cc78118 100644
- --- a/arch/arm/mach-spear/platsmp.c
- +++ b/arch/arm/mach-spear/platsmp.c
- @@ -32,7 +32,7 @@ static void write_pen_release(int val)
- sync_cache_w(&pen_release);
- }
-
- -static DEFINE_SPINLOCK(boot_lock);
- +static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
-
- @@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu)
- /*
- * Synchronise with the boot thread.
- */
- - spin_lock(&boot_lock);
- - spin_unlock(&boot_lock);
- + raw_spin_lock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
- }
-
- static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
- @@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
- * set synchronisation state between this boot processor
- * and the secondary one
- */
- - spin_lock(&boot_lock);
- + raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
- @@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- - spin_unlock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
- diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
- index ea5a2277ee46..b988e081ac79 100644
- --- a/arch/arm/mach-sti/platsmp.c
- +++ b/arch/arm/mach-sti/platsmp.c
- @@ -35,7 +35,7 @@ static void write_pen_release(int val)
- sync_cache_w(&pen_release);
- }
-
- -static DEFINE_SPINLOCK(boot_lock);
- +static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void sti_secondary_init(unsigned int cpu)
- {
- @@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned int cpu)
- /*
- * Synchronise with the boot thread.
- */
- - spin_lock(&boot_lock);
- - spin_unlock(&boot_lock);
- + raw_spin_lock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
- }
-
- static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
- @@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
- * set synchronisation state between this boot processor
- * and the secondary one
- */
- - spin_lock(&boot_lock);
- + raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
- @@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- - spin_unlock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
- diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
- index f7861dc83182..ce47dfe25fb0 100644
- --- a/arch/arm/mm/fault.c
- +++ b/arch/arm/mm/fault.c
- @@ -433,6 +433,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
- if (addr < TASK_SIZE)
- return do_page_fault(addr, fsr, regs);
-
- + if (interrupts_enabled(regs))
- + local_irq_enable();
- +
- if (user_mode(regs))
- goto bad_area;
-
- @@ -500,6 +503,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
- static int
- do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
- {
- + if (interrupts_enabled(regs))
- + local_irq_enable();
- +
- do_bad_area(addr, fsr, regs);
- return 0;
- }
- diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
- index d02f8187b1cc..542692dbd40a 100644
- --- a/arch/arm/mm/highmem.c
- +++ b/arch/arm/mm/highmem.c
- @@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr)
- return *ptep;
- }
-
- +static unsigned int fixmap_idx(int type)
- +{
- + return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
- +}
- +
- void *kmap(struct page *page)
- {
- might_sleep();
- @@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap);
-
- void *kmap_atomic(struct page *page)
- {
- + pte_t pte = mk_pte(page, kmap_prot);
- unsigned int idx;
- unsigned long vaddr;
- void *kmap;
- int type;
-
- - preempt_disable();
- + preempt_disable_nort();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
- @@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page)
-
- type = kmap_atomic_idx_push();
-
- - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
- + idx = fixmap_idx(type);
- vaddr = __fix_to_virt(idx);
- #ifdef CONFIG_DEBUG_HIGHMEM
- /*
- @@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page)
- * in place, so the contained TLB flush ensures the TLB is updated
- * with the new mapping.
- */
- - set_fixmap_pte(idx, mk_pte(page, kmap_prot));
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + current->kmap_pte[type] = pte;
- +#endif
- + set_fixmap_pte(idx, pte);
-
- return (void *)vaddr;
- }
- @@ -106,44 +115,75 @@ void __kunmap_atomic(void *kvaddr)
-
- if (kvaddr >= (void *)FIXADDR_START) {
- type = kmap_atomic_idx();
- - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
- + idx = fixmap_idx(type);
-
- if (cache_is_vivt())
- __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + current->kmap_pte[type] = __pte(0);
- +#endif
- #ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(vaddr != __fix_to_virt(idx));
- - set_fixmap_pte(idx, __pte(0));
- #else
- (void) idx; /* to kill a warning */
- #endif
- + set_fixmap_pte(idx, __pte(0));
- kmap_atomic_idx_pop();
- } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
- /* this address was obtained through kmap_high_get() */
- kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
- }
- pagefault_enable();
- - preempt_enable();
- + preempt_enable_nort();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
- void *kmap_atomic_pfn(unsigned long pfn)
- {
- + pte_t pte = pfn_pte(pfn, kmap_prot);
- unsigned long vaddr;
- int idx, type;
- struct page *page = pfn_to_page(pfn);
-
- - preempt_disable();
- + preempt_disable_nort();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- type = kmap_atomic_idx_push();
- - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
- + idx = fixmap_idx(type);
- vaddr = __fix_to_virt(idx);
- #ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
- #endif
- - set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + current->kmap_pte[type] = pte;
- +#endif
- + set_fixmap_pte(idx, pte);
-
- return (void *)vaddr;
- }
- +#if defined CONFIG_PREEMPT_RT_FULL
- +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
- +{
- + int i;
- +
- + /*
- + * Clear @prev's kmap_atomic mappings
- + */
- + for (i = 0; i < prev_p->kmap_idx; i++) {
- + int idx = fixmap_idx(i);
- +
- + set_fixmap_pte(idx, __pte(0));
- + }
- + /*
- + * Restore @next_p's kmap_atomic mappings
- + */
- + for (i = 0; i < next_p->kmap_idx; i++) {
- + int idx = fixmap_idx(i);
- +
- + if (!pte_none(next_p->kmap_pte[i]))
- + set_fixmap_pte(idx, next_p->kmap_pte[i]);
- + }
- +}
- +#endif
- diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
- index c2366510187a..6b60f582b738 100644
- --- a/arch/arm/plat-versatile/platsmp.c
- +++ b/arch/arm/plat-versatile/platsmp.c
- @@ -32,7 +32,7 @@ static void write_pen_release(int val)
- sync_cache_w(&pen_release);
- }
-
- -static DEFINE_SPINLOCK(boot_lock);
- +static DEFINE_RAW_SPINLOCK(boot_lock);
-
- void versatile_secondary_init(unsigned int cpu)
- {
- @@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned int cpu)
- /*
- * Synchronise with the boot thread.
- */
- - spin_lock(&boot_lock);
- - spin_unlock(&boot_lock);
- + raw_spin_lock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
- }
-
- int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
- @@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- - spin_lock(&boot_lock);
- + raw_spin_lock(&boot_lock);
-
- /*
- * This is really belt and braces; we hold unintended secondary
- @@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- - spin_unlock(&boot_lock);
- + raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
- diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
- index cf57a7799a0f..78d1b49fbed5 100644
- --- a/arch/arm64/Kconfig
- +++ b/arch/arm64/Kconfig
- @@ -91,6 +91,7 @@ config ARM64
- select HAVE_PERF_EVENTS
- select HAVE_PERF_REGS
- select HAVE_PERF_USER_STACK_DUMP
- + select HAVE_PREEMPT_LAZY
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RCU_TABLE_FREE
- select HAVE_SYSCALL_TRACEPOINTS
- @@ -704,7 +705,7 @@ config XEN_DOM0
-
- config XEN
- bool "Xen guest support on ARM64"
- - depends on ARM64 && OF
- + depends on ARM64 && OF && !PREEMPT_RT_FULL
- select SWIOTLB_XEN
- select PARAVIRT
- help
- diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
- index e9ea5a6bd449..6c500ad63c6a 100644
- --- a/arch/arm64/include/asm/thread_info.h
- +++ b/arch/arm64/include/asm/thread_info.h
- @@ -49,6 +49,7 @@ struct thread_info {
- mm_segment_t addr_limit; /* address limit */
- struct task_struct *task; /* main task structure */
- int preempt_count; /* 0 => preemptable, <0 => bug */
- + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
- int cpu; /* cpu */
- };
-
- @@ -112,6 +113,7 @@ static inline struct thread_info *current_thread_info(void)
- #define TIF_NEED_RESCHED 1
- #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
- #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
- +#define TIF_NEED_RESCHED_LAZY 4
- #define TIF_NOHZ 7
- #define TIF_SYSCALL_TRACE 8
- #define TIF_SYSCALL_AUDIT 9
- @@ -127,6 +129,7 @@ static inline struct thread_info *current_thread_info(void)
- #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
- #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
- #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
- +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_NOHZ (1 << TIF_NOHZ)
- #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
- #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
- @@ -135,7 +138,9 @@ static inline struct thread_info *current_thread_info(void)
- #define _TIF_32BIT (1 << TIF_32BIT)
-
- #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- - _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
- + _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
- + _TIF_NEED_RESCHED_LAZY)
- +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
-
- #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
- diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
- index c58ddf8c4062..a8f2f7c1fe12 100644
- --- a/arch/arm64/kernel/asm-offsets.c
- +++ b/arch/arm64/kernel/asm-offsets.c
- @@ -38,6 +38,7 @@ int main(void)
- BLANK();
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
- DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
- diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
- index b4c7db434654..433d846f4f51 100644
- --- a/arch/arm64/kernel/entry.S
- +++ b/arch/arm64/kernel/entry.S
- @@ -430,11 +430,16 @@ el1_irq:
-
- #ifdef CONFIG_PREEMPT
- ldr w24, [tsk, #TI_PREEMPT] // get preempt count
- - cbnz w24, 1f // preempt count != 0
- + cbnz w24, 2f // preempt count != 0
- ldr x0, [tsk, #TI_FLAGS] // get flags
- - tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
- - bl el1_preempt
- + tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
- +
- + ldr w24, [tsk, #TI_PREEMPT_LAZY] // get preempt lazy count
- + cbnz w24, 2f // preempt lazy count != 0
- + tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
- 1:
- + bl el1_preempt
- +2:
- #endif
- #ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
- @@ -448,6 +453,7 @@ el1_preempt:
- 1: bl preempt_schedule_irq // irq en/disable is done inside
- ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
- tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
- + tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling?
- ret x24
- #endif
-
- diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
- index 404dd67080b9..639dc6d12e72 100644
- --- a/arch/arm64/kernel/signal.c
- +++ b/arch/arm64/kernel/signal.c
- @@ -409,7 +409,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
- */
- trace_hardirqs_off();
- do {
- - if (thread_flags & _TIF_NEED_RESCHED) {
- + if (thread_flags & _TIF_NEED_RESCHED_MASK) {
- schedule();
- } else {
- local_irq_enable();
- diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
- index 5e844f68e847..dc613cc10f54 100644
- --- a/arch/mips/Kconfig
- +++ b/arch/mips/Kconfig
- @@ -2516,7 +2516,7 @@ config MIPS_ASID_BITS_VARIABLE
- #
- config HIGHMEM
- bool "High Memory Support"
- - depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
- + depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
-
- config CPU_SUPPORTS_HIGHMEM
- bool
- diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
- index 6eda5abbd719..601e27701a4a 100644
- --- a/arch/powerpc/Kconfig
- +++ b/arch/powerpc/Kconfig
- @@ -52,10 +52,11 @@ config LOCKDEP_SUPPORT
-
- config RWSEM_GENERIC_SPINLOCK
- bool
- + default y if PREEMPT_RT_FULL
-
- config RWSEM_XCHGADD_ALGORITHM
- bool
- - default y
- + default y if !PREEMPT_RT_FULL
-
- config GENERIC_LOCKBREAK
- bool
- @@ -134,6 +135,7 @@ config PPC
- select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select GENERIC_STRNCPY_FROM_USER
- select GENERIC_STRNLEN_USER
- + select HAVE_PREEMPT_LAZY
- select HAVE_MOD_ARCH_SPECIFIC
- select MODULES_USE_ELF_RELA
- select CLONE_BACKWARDS
- @@ -321,7 +323,7 @@ menu "Kernel options"
-
- config HIGHMEM
- bool "High memory support"
- - depends on PPC32
- + depends on PPC32 && !PREEMPT_RT_FULL
-
- source kernel/Kconfig.hz
- source kernel/Kconfig.preempt
- diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
- index 87e4b2d8dcd4..981e501a4359 100644
- --- a/arch/powerpc/include/asm/thread_info.h
- +++ b/arch/powerpc/include/asm/thread_info.h
- @@ -43,6 +43,8 @@ struct thread_info {
- int cpu; /* cpu we're on */
- int preempt_count; /* 0 => preemptable,
- <0 => BUG */
- + int preempt_lazy_count; /* 0 => preemptable,
- + <0 => BUG */
- unsigned long local_flags; /* private flags for thread */
- #ifdef CONFIG_LIVEPATCH
- unsigned long *livepatch_sp;
- @@ -88,8 +90,7 @@ static inline struct thread_info *current_thread_info(void)
- #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
- #define TIF_SIGPENDING 1 /* signal pending */
- #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
- -#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
- - TIF_NEED_RESCHED */
- +#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */
- #define TIF_32BIT 4 /* 32 bit binary */
- #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
- #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
- @@ -107,6 +108,8 @@ static inline struct thread_info *current_thread_info(void)
- #if defined(CONFIG_PPC64)
- #define TIF_ELF2ABI 18 /* function descriptors must die! */
- #endif
- +#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling
- + TIF_NEED_RESCHED */
-
- /* as above, but as bit values */
- #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
- @@ -125,14 +128,16 @@ static inline struct thread_info *current_thread_info(void)
- #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
- #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
- #define _TIF_NOHZ (1<<TIF_NOHZ)
- +#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
- #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
- _TIF_NOHZ)
-
- #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- - _TIF_RESTORE_TM)
- + _TIF_RESTORE_TM | _TIF_NEED_RESCHED_LAZY)
- #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
- +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
-
- /* Bits in local_flags */
- /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
- diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
- index c833d88c423d..96e9fbc3f684 100644
- --- a/arch/powerpc/kernel/asm-offsets.c
- +++ b/arch/powerpc/kernel/asm-offsets.c
- @@ -156,6 +156,7 @@ int main(void)
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-
- diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
- index 3841d749a430..6dbaeff192b9 100644
- --- a/arch/powerpc/kernel/entry_32.S
- +++ b/arch/powerpc/kernel/entry_32.S
- @@ -835,7 +835,14 @@ resume_kernel:
- cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
- bne restore
- andi. r8,r8,_TIF_NEED_RESCHED
- + bne+ 1f
- + lwz r0,TI_PREEMPT_LAZY(r9)
- + cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
- + bne restore
- + lwz r0,TI_FLAGS(r9)
- + andi. r0,r0,_TIF_NEED_RESCHED_LAZY
- beq+ restore
- +1:
- lwz r3,_MSR(r1)
- andi. r0,r3,MSR_EE /* interrupts off? */
- beq restore /* don't schedule if so */
- @@ -846,11 +853,11 @@ resume_kernel:
- */
- bl trace_hardirqs_off
- #endif
- -1: bl preempt_schedule_irq
- +2: bl preempt_schedule_irq
- CURRENT_THREAD_INFO(r9, r1)
- lwz r3,TI_FLAGS(r9)
- - andi. r0,r3,_TIF_NEED_RESCHED
- - bne- 1b
- + andi. r0,r3,_TIF_NEED_RESCHED_MASK
- + bne- 2b
- #ifdef CONFIG_TRACE_IRQFLAGS
- /* And now, to properly rebalance the above, we tell lockdep they
- * are being turned back on, which will happen when we return
- @@ -1171,7 +1178,7 @@ global_dbcr0:
- #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
-
- do_work: /* r10 contains MSR_KERNEL here */
- - andi. r0,r9,_TIF_NEED_RESCHED
- + andi. r0,r9,_TIF_NEED_RESCHED_MASK
- beq do_user_signal
-
- do_resched: /* r10 contains MSR_KERNEL here */
- @@ -1192,7 +1199,7 @@ recheck:
- MTMSRD(r10) /* disable interrupts */
- CURRENT_THREAD_INFO(r9, r1)
- lwz r9,TI_FLAGS(r9)
- - andi. r0,r9,_TIF_NEED_RESCHED
- + andi. r0,r9,_TIF_NEED_RESCHED_MASK
- bne- do_resched
- andi. r0,r9,_TIF_USER_WORK_MASK
- beq restore_user
- diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
- index caa659671599..891080c4a41e 100644
- --- a/arch/powerpc/kernel/entry_64.S
- +++ b/arch/powerpc/kernel/entry_64.S
- @@ -656,7 +656,7 @@ _GLOBAL(ret_from_except_lite)
- bl restore_math
- b restore
- #endif
- -1: andi. r0,r4,_TIF_NEED_RESCHED
- +1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
- beq 2f
- bl restore_interrupts
- SCHEDULE_USER
- @@ -718,10 +718,18 @@ resume_kernel:
-
- #ifdef CONFIG_PREEMPT
- /* Check if we need to preempt */
- + lwz r8,TI_PREEMPT(r9)
- + cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
- + bne restore
- andi. r0,r4,_TIF_NEED_RESCHED
- + bne+ check_count
- +
- + andi. r0,r4,_TIF_NEED_RESCHED_LAZY
- beq+ restore
- + lwz r8,TI_PREEMPT_LAZY(r9)
- +
- /* Check that preempt_count() == 0 and interrupts are enabled */
- - lwz r8,TI_PREEMPT(r9)
- +check_count:
- cmpwi cr1,r8,0
- ld r0,SOFTE(r1)
- cmpdi r0,0
- @@ -738,7 +746,7 @@ resume_kernel:
- /* Re-test flags and eventually loop */
- CURRENT_THREAD_INFO(r9, r1)
- ld r4,TI_FLAGS(r9)
- - andi. r0,r4,_TIF_NEED_RESCHED
- + andi. r0,r4,_TIF_NEED_RESCHED_MASK
- bne 1b
-
- /*
- diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
- index 028a22bfa90c..a75e2dd3e71f 100644
- --- a/arch/powerpc/kernel/irq.c
- +++ b/arch/powerpc/kernel/irq.c
- @@ -651,6 +651,7 @@ void irq_ctx_init(void)
- }
- }
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
- struct thread_info *curtp, *irqtp;
- @@ -668,6 +669,7 @@ void do_softirq_own_stack(void)
- if (irqtp->flags)
- set_bits(irqtp->flags, &curtp->flags);
- }
- +#endif
-
- irq_hw_number_t virq_to_hw(unsigned int virq)
- {
- diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
- index 030d72df5dd5..b471a709e100 100644
- --- a/arch/powerpc/kernel/misc_32.S
- +++ b/arch/powerpc/kernel/misc_32.S
- @@ -41,6 +41,7 @@
- * We store the saved ksp_limit in the unused part
- * of the STACK_FRAME_OVERHEAD
- */
- +#ifndef CONFIG_PREEMPT_RT_FULL
- _GLOBAL(call_do_softirq)
- mflr r0
- stw r0,4(r1)
- @@ -57,6 +58,7 @@ _GLOBAL(call_do_softirq)
- stw r10,THREAD+KSP_LIMIT(r2)
- mtlr r0
- blr
- +#endif
-
- /*
- * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
- diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
- index 4cefe6888b18..cb2ee4be999a 100644
- --- a/arch/powerpc/kernel/misc_64.S
- +++ b/arch/powerpc/kernel/misc_64.S
- @@ -31,6 +31,7 @@
-
- .text
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- _GLOBAL(call_do_softirq)
- mflr r0
- std r0,16(r1)
- @@ -41,6 +42,7 @@ _GLOBAL(call_do_softirq)
- ld r0,16(r1)
- mtlr r0
- blr
- +#endif
-
- _GLOBAL(call_do_irq)
- mflr r0
- diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
- index 029be26b5a17..9528089ea142 100644
- --- a/arch/powerpc/kvm/Kconfig
- +++ b/arch/powerpc/kvm/Kconfig
- @@ -175,6 +175,7 @@ config KVM_E500MC
- config KVM_MPIC
- bool "KVM in-kernel MPIC emulation"
- depends on KVM && E500
- + depends on !PREEMPT_RT_FULL
- select HAVE_KVM_IRQCHIP
- select HAVE_KVM_IRQFD
- select HAVE_KVM_IRQ_ROUTING
- diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
- index e48462447ff0..2670cee66064 100644
- --- a/arch/powerpc/platforms/ps3/device-init.c
- +++ b/arch/powerpc/platforms/ps3/device-init.c
- @@ -752,7 +752,7 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev,
- }
- pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
-
- - res = wait_event_interruptible(dev->done.wait,
- + res = swait_event_interruptible(dev->done.wait,
- dev->done.done || kthread_should_stop());
- if (kthread_should_stop())
- res = -EINTR;
- diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
- index 6c0378c0b8b5..abd58b4dff97 100644
- --- a/arch/sh/kernel/irq.c
- +++ b/arch/sh/kernel/irq.c
- @@ -147,6 +147,7 @@ void irq_ctx_exit(int cpu)
- hardirq_ctx[cpu] = NULL;
- }
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
- struct thread_info *curctx;
- @@ -174,6 +175,7 @@ void do_softirq_own_stack(void)
- "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
- );
- }
- +#endif
- #else
- static inline void handle_one_irq(unsigned int irq)
- {
- diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
- index 8b4152f3a764..c5cca159692a 100644
- --- a/arch/sparc/Kconfig
- +++ b/arch/sparc/Kconfig
- @@ -194,12 +194,10 @@ config NR_CPUS
- source kernel/Kconfig.hz
-
- config RWSEM_GENERIC_SPINLOCK
- - bool
- - default y if SPARC32
- + def_bool PREEMPT_RT_FULL
-
- config RWSEM_XCHGADD_ALGORITHM
- - bool
- - default y if SPARC64
- + def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
-
- config GENERIC_HWEIGHT
- bool
- diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
- index 5cbf03c14981..6067d9379e5b 100644
- --- a/arch/sparc/kernel/irq_64.c
- +++ b/arch/sparc/kernel/irq_64.c
- @@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
- set_irq_regs(old_regs);
- }
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
- void *orig_sp, *sp = softirq_stack[smp_processor_id()];
- @@ -868,6 +869,7 @@ void do_softirq_own_stack(void)
- __asm__ __volatile__("mov %0, %%sp"
- : : "r" (orig_sp));
- }
- +#endif
-
- #ifdef CONFIG_HOTPLUG_CPU
- void fixup_irqs(void)
- diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
- index da8156fd3d58..d8cd3bc807fc 100644
- --- a/arch/x86/Kconfig
- +++ b/arch/x86/Kconfig
- @@ -17,6 +17,7 @@ config X86_64
- ### Arch settings
- config X86
- def_bool y
- + select HAVE_PREEMPT_LAZY
- select ACPI_LEGACY_TABLES_LOOKUP if ACPI
- select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
- select ANON_INODES
- @@ -232,8 +233,11 @@ config ARCH_MAY_HAVE_PC_FDC
- def_bool y
- depends on ISA_DMA_API
-
- +config RWSEM_GENERIC_SPINLOCK
- + def_bool PREEMPT_RT_FULL
- +
- config RWSEM_XCHGADD_ALGORITHM
- - def_bool y
- + def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
-
- config GENERIC_CALIBRATE_DELAY
- def_bool y
- @@ -897,7 +901,7 @@ config IOMMU_HELPER
- config MAXSMP
- bool "Enable Maximum number of SMP Processors and NUMA Nodes"
- depends on X86_64 && SMP && DEBUG_KERNEL
- - select CPUMASK_OFFSTACK
- + select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
- ---help---
- Enable maximum number of CPUS and NUMA Nodes for this architecture.
- If unsure, say N.
- diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
- index aa8b0672f87a..2429414bfc71 100644
- --- a/arch/x86/crypto/aesni-intel_glue.c
- +++ b/arch/x86/crypto/aesni-intel_glue.c
- @@ -372,14 +372,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- - kernel_fpu_begin();
- while ((nbytes = walk.nbytes)) {
- + kernel_fpu_begin();
- aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- - nbytes & AES_BLOCK_MASK);
- + nbytes & AES_BLOCK_MASK);
- + kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
- - kernel_fpu_end();
-
- return err;
- }
- @@ -396,14 +396,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- - kernel_fpu_begin();
- while ((nbytes = walk.nbytes)) {
- + kernel_fpu_begin();
- aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK);
- + kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
- - kernel_fpu_end();
-
- return err;
- }
- @@ -420,14 +420,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- - kernel_fpu_begin();
- while ((nbytes = walk.nbytes)) {
- + kernel_fpu_begin();
- aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK, walk.iv);
- + kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
- - kernel_fpu_end();
-
- return err;
- }
- @@ -444,14 +444,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- - kernel_fpu_begin();
- while ((nbytes = walk.nbytes)) {
- + kernel_fpu_begin();
- aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK, walk.iv);
- + kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
- - kernel_fpu_end();
-
- return err;
- }
- @@ -503,18 +503,20 @@ static int ctr_crypt(struct blkcipher_desc *desc,
- err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- - kernel_fpu_begin();
- while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
- + kernel_fpu_begin();
- aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes & AES_BLOCK_MASK, walk.iv);
- + kernel_fpu_end();
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
- if (walk.nbytes) {
- + kernel_fpu_begin();
- ctr_crypt_final(ctx, &walk);
- + kernel_fpu_end();
- err = blkcipher_walk_done(desc, &walk, 0);
- }
- - kernel_fpu_end();
-
- return err;
- }
- diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
- index 8648158f3916..d7699130ee36 100644
- --- a/arch/x86/crypto/cast5_avx_glue.c
- +++ b/arch/x86/crypto/cast5_avx_glue.c
- @@ -59,7 +59,7 @@ static inline void cast5_fpu_end(bool fpu_enabled)
- static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
- bool enc)
- {
- - bool fpu_enabled = false;
- + bool fpu_enabled;
- struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- const unsigned int bsize = CAST5_BLOCK_SIZE;
- unsigned int nbytes;
- @@ -75,7 +75,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
- u8 *wsrc = walk->src.virt.addr;
- u8 *wdst = walk->dst.virt.addr;
-
- - fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
- + fpu_enabled = cast5_fpu_begin(false, nbytes);
-
- /* Process multi-block batch */
- if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
- @@ -103,10 +103,9 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
- } while (nbytes >= bsize);
-
- done:
- + cast5_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, walk, nbytes);
- }
- -
- - cast5_fpu_end(fpu_enabled);
- return err;
- }
-
- @@ -227,7 +226,7 @@ static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
- static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
- {
- - bool fpu_enabled = false;
- + bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
- @@ -236,12 +235,11 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- while ((nbytes = walk.nbytes)) {
- - fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
- + fpu_enabled = cast5_fpu_begin(false, nbytes);
- nbytes = __cbc_decrypt(desc, &walk);
- + cast5_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
- -
- - cast5_fpu_end(fpu_enabled);
- return err;
- }
-
- @@ -311,7 +309,7 @@ static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
- static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
- {
- - bool fpu_enabled = false;
- + bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
- @@ -320,13 +318,12 @@ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
- while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
- - fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
- + fpu_enabled = cast5_fpu_begin(false, nbytes);
- nbytes = __ctr_crypt(desc, &walk);
- + cast5_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- - cast5_fpu_end(fpu_enabled);
- -
- if (walk.nbytes) {
- ctr_crypt_final(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, 0);
- diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
- index 6a85598931b5..3a506ce7ed93 100644
- --- a/arch/x86/crypto/glue_helper.c
- +++ b/arch/x86/crypto/glue_helper.c
- @@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
- void *ctx = crypto_blkcipher_ctx(desc->tfm);
- const unsigned int bsize = 128 / 8;
- unsigned int nbytes, i, func_bytes;
- - bool fpu_enabled = false;
- + bool fpu_enabled;
- int err;
-
- err = blkcipher_walk_virt(desc, walk);
- @@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
- u8 *wdst = walk->dst.virt.addr;
-
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- - desc, fpu_enabled, nbytes);
- + desc, false, nbytes);
-
- for (i = 0; i < gctx->num_funcs; i++) {
- func_bytes = bsize * gctx->funcs[i].num_blocks;
- @@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
- }
-
- done:
- + glue_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, walk, nbytes);
- }
-
- - glue_fpu_end(fpu_enabled);
- return err;
- }
-
- @@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
- struct scatterlist *src, unsigned int nbytes)
- {
- const unsigned int bsize = 128 / 8;
- - bool fpu_enabled = false;
- + bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
- @@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
-
- while ((nbytes = walk.nbytes)) {
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- - desc, fpu_enabled, nbytes);
- + desc, false, nbytes);
- nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
- + glue_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- - glue_fpu_end(fpu_enabled);
- return err;
- }
- EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
- @@ -277,7 +277,7 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
- struct scatterlist *src, unsigned int nbytes)
- {
- const unsigned int bsize = 128 / 8;
- - bool fpu_enabled = false;
- + bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
- @@ -286,13 +286,12 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
-
- while ((nbytes = walk.nbytes) >= bsize) {
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- - desc, fpu_enabled, nbytes);
- + desc, false, nbytes);
- nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
- + glue_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- - glue_fpu_end(fpu_enabled);
- -
- if (walk.nbytes) {
- glue_ctr_crypt_final_128bit(
- gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
- @@ -347,7 +346,7 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
- void *tweak_ctx, void *crypt_ctx)
- {
- const unsigned int bsize = 128 / 8;
- - bool fpu_enabled = false;
- + bool fpu_enabled;
- struct blkcipher_walk walk;
- int err;
-
- @@ -360,21 +359,21 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
-
- /* set minimum length to bsize, for tweak_fn */
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- - desc, fpu_enabled,
- + desc, false,
- nbytes < bsize ? bsize : nbytes);
- -
- /* calculate first value of T */
- tweak_fn(tweak_ctx, walk.iv, walk.iv);
- + glue_fpu_end(fpu_enabled);
-
- while (nbytes) {
- + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- + desc, false, nbytes);
- nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
-
- + glue_fpu_end(fpu_enabled);
- err = blkcipher_walk_done(desc, &walk, nbytes);
- nbytes = walk.nbytes;
- }
- -
- - glue_fpu_end(fpu_enabled);
- -
- return err;
- }
- EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
- diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
- index bdd9cc59d20f..56d01a339ba4 100644
- --- a/arch/x86/entry/common.c
- +++ b/arch/x86/entry/common.c
- @@ -129,7 +129,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
-
- #define EXIT_TO_USERMODE_LOOP_FLAGS \
- (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- - _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
- + _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY)
-
- static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
- {
- @@ -145,9 +145,16 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
- /* We have work to do. */
- local_irq_enable();
-
- - if (cached_flags & _TIF_NEED_RESCHED)
- + if (cached_flags & _TIF_NEED_RESCHED_MASK)
- schedule();
-
- +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
- + if (unlikely(current->forced_info.si_signo)) {
- + struct task_struct *t = current;
- + force_sig_info(t->forced_info.si_signo, &t->forced_info, t);
- + t->forced_info.si_signo = 0;
- + }
- +#endif
- if (cached_flags & _TIF_UPROBE)
- uprobe_notify_resume(regs);
-
- diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
- index edba8606b99a..4a3389535fc6 100644
- --- a/arch/x86/entry/entry_32.S
- +++ b/arch/x86/entry/entry_32.S
- @@ -308,8 +308,25 @@ END(ret_from_exception)
- ENTRY(resume_kernel)
- DISABLE_INTERRUPTS(CLBR_ANY)
- need_resched:
- + # preempt count == 0 + NEED_RS set?
- cmpl $0, PER_CPU_VAR(__preempt_count)
- +#ifndef CONFIG_PREEMPT_LAZY
- jnz restore_all
- +#else
- + jz test_int_off
- +
- + # atleast preempt count == 0 ?
- + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
- + jne restore_all
- +
- + movl PER_CPU_VAR(current_task), %ebp
- + cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
- + jnz restore_all
- +
- + testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
- + jz restore_all
- +test_int_off:
- +#endif
- testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
- jz restore_all
- call preempt_schedule_irq
- diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
- index af4e58132d91..22803e2f7495 100644
- --- a/arch/x86/entry/entry_64.S
- +++ b/arch/x86/entry/entry_64.S
- @@ -575,7 +575,23 @@ retint_kernel:
- bt $9, EFLAGS(%rsp) /* were interrupts off? */
- jnc 1f
- 0: cmpl $0, PER_CPU_VAR(__preempt_count)
- +#ifndef CONFIG_PREEMPT_LAZY
- jnz 1f
- +#else
- + jz do_preempt_schedule_irq
- +
- + # atleast preempt count == 0 ?
- + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
- + jnz 1f
- +
- + movq PER_CPU_VAR(current_task), %rcx
- + cmpl $0, TASK_TI_preempt_lazy_count(%rcx)
- + jnz 1f
- +
- + bt $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx)
- + jnc 1f
- +do_preempt_schedule_irq:
- +#endif
- call preempt_schedule_irq
- jmp 0b
- 1:
- @@ -925,6 +941,7 @@ bad_gs:
- jmp 2b
- .previous
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- /* Call softirq on interrupt stack. Interrupts are off. */
- ENTRY(do_softirq_own_stack)
- pushq %rbp
- @@ -937,6 +954,7 @@ ENTRY(do_softirq_own_stack)
- decl PER_CPU_VAR(irq_count)
- ret
- END(do_softirq_own_stack)
- +#endif
-
- #ifdef CONFIG_XEN
- idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
- diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
- index 17f218645701..11bd1b7ee6eb 100644
- --- a/arch/x86/include/asm/preempt.h
- +++ b/arch/x86/include/asm/preempt.h
- @@ -79,17 +79,46 @@ static __always_inline void __preempt_count_sub(int val)
- * a decrement which hits zero means we have no preempt_count and should
- * reschedule.
- */
- -static __always_inline bool __preempt_count_dec_and_test(void)
- +static __always_inline bool ____preempt_count_dec_and_test(void)
- {
- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
- }
-
- +static __always_inline bool __preempt_count_dec_and_test(void)
- +{
- + if (____preempt_count_dec_and_test())
- + return true;
- +#ifdef CONFIG_PREEMPT_LAZY
- + if (current_thread_info()->preempt_lazy_count)
- + return false;
- + return test_thread_flag(TIF_NEED_RESCHED_LAZY);
- +#else
- + return false;
- +#endif
- +}
- +
- /*
- * Returns true when we need to resched and can (barring IRQ state).
- */
- static __always_inline bool should_resched(int preempt_offset)
- {
- +#ifdef CONFIG_PREEMPT_LAZY
- + u32 tmp;
- +
- + tmp = raw_cpu_read_4(__preempt_count);
- + if (tmp == preempt_offset)
- + return true;
- +
- + /* preempt count == 0 ? */
- + tmp &= ~PREEMPT_NEED_RESCHED;
- + if (tmp)
- + return false;
- + if (current_thread_info()->preempt_lazy_count)
- + return false;
- + return test_thread_flag(TIF_NEED_RESCHED_LAZY);
- +#else
- return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
- +#endif
- }
-
- #ifdef CONFIG_PREEMPT
- diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
- index 8af22be0fe61..d1328789b759 100644
- --- a/arch/x86/include/asm/signal.h
- +++ b/arch/x86/include/asm/signal.h
- @@ -27,6 +27,19 @@ typedef struct {
- #define SA_IA32_ABI 0x02000000u
- #define SA_X32_ABI 0x01000000u
-
- +/*
- + * Because some traps use the IST stack, we must keep preemption
- + * disabled while calling do_trap(), but do_trap() may call
- + * force_sig_info() which will grab the signal spin_locks for the
- + * task, which in PREEMPT_RT_FULL are mutexes. By defining
- + * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
- + * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
- + * trap.
- + */
- +#if defined(CONFIG_PREEMPT_RT_FULL)
- +#define ARCH_RT_DELAYS_SIGNAL_SEND
- +#endif
- +
- #ifndef CONFIG_COMPAT
- typedef sigset_t compat_sigset_t;
- #endif
- diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
- index 58505f01962f..02fa39652cd6 100644
- --- a/arch/x86/include/asm/stackprotector.h
- +++ b/arch/x86/include/asm/stackprotector.h
- @@ -59,7 +59,7 @@
- */
- static __always_inline void boot_init_stack_canary(void)
- {
- - u64 canary;
- + u64 uninitialized_var(canary);
- u64 tsc;
-
- #ifdef CONFIG_X86_64
- @@ -70,8 +70,15 @@ static __always_inline void boot_init_stack_canary(void)
- * of randomness. The TSC only matters for very early init,
- * there it already has some randomness on most systems. Later
- * on during the bootup the random pool has true entropy too.
- + *
- + * For preempt-rt we need to weaken the randomness a bit, as
- + * we can't call into the random generator from atomic context
- + * due to locking constraints. We just leave canary
- + * uninitialized and use the TSC based randomness on top of it.
- */
- +#ifndef CONFIG_PREEMPT_RT_FULL
- get_random_bytes(&canary, sizeof(canary));
- +#endif
- tsc = rdtsc();
- canary += tsc + (tsc << 32UL);
-
- diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
- index ad6f5eb07a95..5ceb3a1c2b1a 100644
- --- a/arch/x86/include/asm/thread_info.h
- +++ b/arch/x86/include/asm/thread_info.h
- @@ -54,11 +54,14 @@ struct task_struct;
-
- struct thread_info {
- unsigned long flags; /* low level flags */
- + int preempt_lazy_count; /* 0 => lazy preemptable
- + <0 => BUG */
- };
-
- #define INIT_THREAD_INFO(tsk) \
- { \
- .flags = 0, \
- + .preempt_lazy_count = 0, \
- }
-
- #define init_stack (init_thread_union.stack)
- @@ -67,6 +70,10 @@ struct thread_info {
-
- #include <asm/asm-offsets.h>
-
- +#define GET_THREAD_INFO(reg) \
- + _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
- + _ASM_SUB $(THREAD_SIZE),reg ;
- +
- #endif
-
- /*
- @@ -85,6 +92,7 @@ struct thread_info {
- #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
- #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
- #define TIF_SECCOMP 8 /* secure computing */
- +#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
- #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
- #define TIF_UPROBE 12 /* breakpointed or singlestepping */
- #define TIF_NOTSC 16 /* TSC is not accessible in userland */
- @@ -108,6 +116,7 @@ struct thread_info {
- #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
- #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
- #define _TIF_SECCOMP (1 << TIF_SECCOMP)
- +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
- #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
- #define _TIF_UPROBE (1 << TIF_UPROBE)
- #define _TIF_NOTSC (1 << TIF_NOTSC)
- @@ -143,6 +152,8 @@ struct thread_info {
- #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
- #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
-
- +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
- +
- #define STACK_WARN (THREAD_SIZE/8)
-
- /*
- diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
- index 57ab86d94d64..35d25e27180f 100644
- --- a/arch/x86/include/asm/uv/uv_bau.h
- +++ b/arch/x86/include/asm/uv/uv_bau.h
- @@ -624,9 +624,9 @@ struct bau_control {
- cycles_t send_message;
- cycles_t period_end;
- cycles_t period_time;
- - spinlock_t uvhub_lock;
- - spinlock_t queue_lock;
- - spinlock_t disable_lock;
- + raw_spinlock_t uvhub_lock;
- + raw_spinlock_t queue_lock;
- + raw_spinlock_t disable_lock;
- /* tunables */
- int max_concurr;
- int max_concurr_const;
- @@ -815,15 +815,15 @@ static inline int atom_asr(short i, struct atomic_short *v)
- * to be lowered below the current 'v'. atomic_add_unless can only stop
- * on equal.
- */
- -static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
- +static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u)
- {
- - spin_lock(lock);
- + raw_spin_lock(lock);
- if (atomic_read(v) >= u) {
- - spin_unlock(lock);
- + raw_spin_unlock(lock);
- return 0;
- }
- atomic_inc(v);
- - spin_unlock(lock);
- + raw_spin_unlock(lock);
- return 1;
- }
-
- diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
- index 11cc600f4df0..8cbfc51ce339 100644
- --- a/arch/x86/kernel/acpi/boot.c
- +++ b/arch/x86/kernel/acpi/boot.c
- @@ -87,7 +87,9 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
- * ->ioapic_mutex
- * ->ioapic_lock
- */
- +#ifdef CONFIG_X86_IO_APIC
- static DEFINE_MUTEX(acpi_ioapic_lock);
- +#endif
-
- /* --------------------------------------------------------------------------
- Boot-time Configuration
- diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
- index cf89928dbd46..18b5ec2a71df 100644
- --- a/arch/x86/kernel/apic/io_apic.c
- +++ b/arch/x86/kernel/apic/io_apic.c
- @@ -1712,7 +1712,8 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
- static inline bool ioapic_irqd_mask(struct irq_data *data)
- {
- /* If we are moving the irq we need to mask it */
- - if (unlikely(irqd_is_setaffinity_pending(data))) {
- + if (unlikely(irqd_is_setaffinity_pending(data) &&
- + !irqd_irq_inprogress(data))) {
- mask_ioapic_irq(data);
- return true;
- }
- diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
- index c62e015b126c..0cc71257fca6 100644
- --- a/arch/x86/kernel/asm-offsets.c
- +++ b/arch/x86/kernel/asm-offsets.c
- @@ -36,6 +36,7 @@ void common(void) {
-
- BLANK();
- OFFSET(TASK_TI_flags, task_struct, thread_info.flags);
- + OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count);
- OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
-
- BLANK();
- @@ -91,4 +92,5 @@ void common(void) {
-
- BLANK();
- DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
- + DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
- }
- diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
- index 8ca5f8ad008e..edcbd18b3189 100644
- --- a/arch/x86/kernel/cpu/mcheck/mce.c
- +++ b/arch/x86/kernel/cpu/mcheck/mce.c
- @@ -41,6 +41,8 @@
- #include <linux/debugfs.h>
- #include <linux/irq_work.h>
- #include <linux/export.h>
- +#include <linux/jiffies.h>
- +#include <linux/swork.h>
- #include <linux/jump_label.h>
-
- #include <asm/processor.h>
- @@ -1306,7 +1308,7 @@ void mce_log_therm_throt_event(__u64 status)
- static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
-
- static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
- -static DEFINE_PER_CPU(struct timer_list, mce_timer);
- +static DEFINE_PER_CPU(struct hrtimer, mce_timer);
-
- static unsigned long mce_adjust_timer_default(unsigned long interval)
- {
- @@ -1315,32 +1317,18 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
-
- static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
-
- -static void __restart_timer(struct timer_list *t, unsigned long interval)
- +static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned long interval)
- {
- - unsigned long when = jiffies + interval;
- - unsigned long flags;
- -
- - local_irq_save(flags);
- -
- - if (timer_pending(t)) {
- - if (time_before(when, t->expires))
- - mod_timer(t, when);
- - } else {
- - t->expires = round_jiffies(when);
- - add_timer_on(t, smp_processor_id());
- - }
- -
- - local_irq_restore(flags);
- + if (!interval)
- + return HRTIMER_NORESTART;
- + hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval)));
- + return HRTIMER_RESTART;
- }
-
- -static void mce_timer_fn(unsigned long data)
- +static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
- {
- - struct timer_list *t = this_cpu_ptr(&mce_timer);
- - int cpu = smp_processor_id();
- unsigned long iv;
-
- - WARN_ON(cpu != data);
- -
- iv = __this_cpu_read(mce_next_interval);
-
- if (mce_available(this_cpu_ptr(&cpu_info))) {
- @@ -1363,7 +1351,7 @@ static void mce_timer_fn(unsigned long data)
-
- done:
- __this_cpu_write(mce_next_interval, iv);
- - __restart_timer(t, iv);
- + return __restart_timer(timer, iv);
- }
-
- /*
- @@ -1371,7 +1359,7 @@ static void mce_timer_fn(unsigned long data)
- */
- void mce_timer_kick(unsigned long interval)
- {
- - struct timer_list *t = this_cpu_ptr(&mce_timer);
- + struct hrtimer *t = this_cpu_ptr(&mce_timer);
- unsigned long iv = __this_cpu_read(mce_next_interval);
-
- __restart_timer(t, interval);
- @@ -1386,7 +1374,7 @@ static void mce_timer_delete_all(void)
- int cpu;
-
- for_each_online_cpu(cpu)
- - del_timer_sync(&per_cpu(mce_timer, cpu));
- + hrtimer_cancel(&per_cpu(mce_timer, cpu));
- }
-
- static void mce_do_trigger(struct work_struct *work)
- @@ -1396,6 +1384,56 @@ static void mce_do_trigger(struct work_struct *work)
-
- static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
-
- +static void __mce_notify_work(struct swork_event *event)
- +{
- + /* Not more than two messages every minute */
- + static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
- +
- + /* wake processes polling /dev/mcelog */
- + wake_up_interruptible(&mce_chrdev_wait);
- +
- + /*
- + * There is no risk of missing notifications because
- + * work_pending is always cleared before the function is
- + * executed.
- + */
- + if (mce_helper[0] && !work_pending(&mce_trigger_work))
- + schedule_work(&mce_trigger_work);
- +
- + if (__ratelimit(&ratelimit))
- + pr_info(HW_ERR "Machine check events logged\n");
- +}
- +
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +static bool notify_work_ready __read_mostly;
- +static struct swork_event notify_work;
- +
- +static int mce_notify_work_init(void)
- +{
- + int err;
- +
- + err = swork_get();
- + if (err)
- + return err;
- +
- + INIT_SWORK(¬ify_work, __mce_notify_work);
- + notify_work_ready = true;
- + return 0;
- +}
- +
- +static void mce_notify_work(void)
- +{
- + if (notify_work_ready)
- + swork_queue(¬ify_work);
- +}
- +#else
- +static void mce_notify_work(void)
- +{
- + __mce_notify_work(NULL);
- +}
- +static inline int mce_notify_work_init(void) { return 0; }
- +#endif
- +
- /*
- * Notify the user(s) about new machine check events.
- * Can be called from interrupt context, but not from machine check/NMI
- @@ -1403,19 +1441,8 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
- */
- int mce_notify_irq(void)
- {
- - /* Not more than two messages every minute */
- - static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
- -
- if (test_and_clear_bit(0, &mce_need_notify)) {
- - /* wake processes polling /dev/mcelog */
- - wake_up_interruptible(&mce_chrdev_wait);
- -
- - if (mce_helper[0])
- - schedule_work(&mce_trigger_work);
- -
- - if (__ratelimit(&ratelimit))
- - pr_info(HW_ERR "Machine check events logged\n");
- -
- + mce_notify_work();
- return 1;
- }
- return 0;
- @@ -1721,7 +1748,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
- }
- }
-
- -static void mce_start_timer(unsigned int cpu, struct timer_list *t)
- +static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
- {
- unsigned long iv = check_interval * HZ;
-
- @@ -1730,16 +1757,17 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
-
- per_cpu(mce_next_interval, cpu) = iv;
-
- - t->expires = round_jiffies(jiffies + iv);
- - add_timer_on(t, cpu);
- + hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
- + 0, HRTIMER_MODE_REL_PINNED);
- }
-
- static void __mcheck_cpu_init_timer(void)
- {
- - struct timer_list *t = this_cpu_ptr(&mce_timer);
- + struct hrtimer *t = this_cpu_ptr(&mce_timer);
- unsigned int cpu = smp_processor_id();
-
- - setup_pinned_timer(t, mce_timer_fn, cpu);
- + hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- + t->function = mce_timer_fn;
- mce_start_timer(cpu, t);
- }
-
- @@ -2464,6 +2492,8 @@ static void mce_disable_cpu(void *h)
- if (!mce_available(raw_cpu_ptr(&cpu_info)))
- return;
-
- + hrtimer_cancel(this_cpu_ptr(&mce_timer));
- +
- if (!(action & CPU_TASKS_FROZEN))
- cmci_clear();
-
- @@ -2486,6 +2516,7 @@ static void mce_reenable_cpu(void *h)
- if (b->init)
- wrmsrl(msr_ops.ctl(i), b->ctl);
- }
- + __mcheck_cpu_init_timer();
- }
-
- /* Get notified when a cpu comes on/off. Be hotplug friendly. */
- @@ -2493,7 +2524,6 @@ static int
- mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
- {
- unsigned int cpu = (unsigned long)hcpu;
- - struct timer_list *t = &per_cpu(mce_timer, cpu);
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- @@ -2513,11 +2543,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
- break;
- case CPU_DOWN_PREPARE:
- smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
- - del_timer_sync(t);
- break;
- case CPU_DOWN_FAILED:
- smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
- - mce_start_timer(cpu, t);
- break;
- }
-
- @@ -2556,6 +2584,10 @@ static __init int mcheck_init_device(void)
- goto err_out;
- }
-
- + err = mce_notify_work_init();
- + if (err)
- + goto err_out;
- +
- if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
- err = -ENOMEM;
- goto err_out;
- diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
- index 1f38d9a4d9de..053bf3b2ef39 100644
- --- a/arch/x86/kernel/irq_32.c
- +++ b/arch/x86/kernel/irq_32.c
- @@ -127,6 +127,7 @@ void irq_ctx_init(int cpu)
- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
- }
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- void do_softirq_own_stack(void)
- {
- struct irq_stack *irqstk;
- @@ -143,6 +144,7 @@ void do_softirq_own_stack(void)
-
- call_on_stack(__do_softirq, isp);
- }
- +#endif
-
- bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
- {
- diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
- index bd7be8efdc4c..b3b0a7f7b1ca 100644
- --- a/arch/x86/kernel/process_32.c
- +++ b/arch/x86/kernel/process_32.c
- @@ -35,6 +35,7 @@
- #include <linux/uaccess.h>
- #include <linux/io.h>
- #include <linux/kdebug.h>
- +#include <linux/highmem.h>
-
- #include <asm/pgtable.h>
- #include <asm/ldt.h>
- @@ -195,6 +196,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
- }
- EXPORT_SYMBOL_GPL(start_thread);
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
- +{
- + int i;
- +
- + /*
- + * Clear @prev's kmap_atomic mappings
- + */
- + for (i = 0; i < prev_p->kmap_idx; i++) {
- + int idx = i + KM_TYPE_NR * smp_processor_id();
- + pte_t *ptep = kmap_pte - idx;
- +
- + kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
- + }
- + /*
- + * Restore @next_p's kmap_atomic mappings
- + */
- + for (i = 0; i < next_p->kmap_idx; i++) {
- + int idx = i + KM_TYPE_NR * smp_processor_id();
- +
- + if (!pte_none(next_p->kmap_pte[i]))
- + set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
- + }
- +}
- +#else
- +static inline void
- +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
- +#endif
- +
-
- /*
- * switch_to(x,y) should switch tasks from x to y.
- @@ -271,6 +301,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
- __switch_to_xtra(prev_p, next_p, tss);
-
- + switch_kmaps(prev_p, next_p);
- +
- /*
- * Leave lazy mode, flushing any hypercalls made here.
- * This must be done before restoring TLS segments so
- diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
- index b24b3c6d686e..02a062b0de5d 100644
- --- a/arch/x86/kvm/lapic.c
- +++ b/arch/x86/kvm/lapic.c
- @@ -1944,6 +1944,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
- hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS_PINNED);
- apic->lapic_timer.timer.function = apic_timer_fn;
- + apic->lapic_timer.timer.irqsafe = 1;
-
- /*
- * APIC is created enabled. This will prevent kvm_lapic_set_base from
- diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
- index 73304b1a03cc..2a0fae2ef089 100644
- --- a/arch/x86/kvm/x86.c
- +++ b/arch/x86/kvm/x86.c
- @@ -5967,6 +5967,13 @@ int kvm_arch_init(void *opaque)
- goto out;
- }
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
- + printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
- + return -EOPNOTSUPP;
- + }
- +#endif
- +
- r = kvm_mmu_module_init();
- if (r)
- goto out_free_percpu;
- diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
- index 6d18b70ed5a9..f752724c22e8 100644
- --- a/arch/x86/mm/highmem_32.c
- +++ b/arch/x86/mm/highmem_32.c
- @@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap);
- */
- void *kmap_atomic_prot(struct page *page, pgprot_t prot)
- {
- + pte_t pte = mk_pte(page, prot);
- unsigned long vaddr;
- int idx, type;
-
- - preempt_disable();
- + preempt_disable_nort();
- pagefault_disable();
-
- if (!PageHighMem(page))
- @@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- BUG_ON(!pte_none(*(kmap_pte-idx)));
- - set_pte(kmap_pte-idx, mk_pte(page, prot));
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + current->kmap_pte[type] = pte;
- +#endif
- + set_pte(kmap_pte-idx, pte);
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
- @@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr)
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + current->kmap_pte[type] = __pte(0);
- +#endif
- kpte_clear_flush(kmap_pte-idx, vaddr);
- kmap_atomic_idx_pop();
- arch_flush_lazy_mmu_mode();
- @@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr)
- #endif
-
- pagefault_enable();
- - preempt_enable();
- + preempt_enable_nort();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
- diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
- index ada98b39b8ad..585f6829653b 100644
- --- a/arch/x86/mm/iomap_32.c
- +++ b/arch/x86/mm/iomap_32.c
- @@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free);
-
- void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
- {
- + pte_t pte = pfn_pte(pfn, prot);
- unsigned long vaddr;
- int idx, type;
-
- @@ -65,7 +66,12 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- - set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
- + WARN_ON(!pte_none(*(kmap_pte - idx)));
- +
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + current->kmap_pte[type] = pte;
- +#endif
- + set_pte(kmap_pte - idx, pte);
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
- @@ -113,6 +119,9 @@ iounmap_atomic(void __iomem *kvaddr)
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + current->kmap_pte[type] = __pte(0);
- +#endif
- kpte_clear_flush(kmap_pte-idx, vaddr);
- kmap_atomic_idx_pop();
- }
- diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
- index 73dcb0e18c1b..c1085c7ee212 100644
- --- a/arch/x86/mm/pageattr.c
- +++ b/arch/x86/mm/pageattr.c
- @@ -215,7 +215,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
- int in_flags, struct page **pages)
- {
- unsigned int i, level;
- +#ifdef CONFIG_PREEMPT
- + /*
- + * Avoid wbinvd() because it causes latencies on all CPUs,
- + * regardless of any CPU isolation that may be in effect.
- + */
- + unsigned long do_wbinvd = 0;
- +#else
- unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
- +#endif
-
- BUG_ON(irqs_disabled());
-
- diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
- index 0f0175186f1b..39b5d5b2627d 100644
- --- a/arch/x86/platform/uv/tlb_uv.c
- +++ b/arch/x86/platform/uv/tlb_uv.c
- @@ -748,9 +748,9 @@ static void destination_plugged(struct bau_desc *bau_desc,
-
- quiesce_local_uvhub(hmaster);
-
- - spin_lock(&hmaster->queue_lock);
- + raw_spin_lock(&hmaster->queue_lock);
- reset_with_ipi(&bau_desc->distribution, bcp);
- - spin_unlock(&hmaster->queue_lock);
- + raw_spin_unlock(&hmaster->queue_lock);
-
- end_uvhub_quiesce(hmaster);
-
- @@ -770,9 +770,9 @@ static void destination_timeout(struct bau_desc *bau_desc,
-
- quiesce_local_uvhub(hmaster);
-
- - spin_lock(&hmaster->queue_lock);
- + raw_spin_lock(&hmaster->queue_lock);
- reset_with_ipi(&bau_desc->distribution, bcp);
- - spin_unlock(&hmaster->queue_lock);
- + raw_spin_unlock(&hmaster->queue_lock);
-
- end_uvhub_quiesce(hmaster);
-
- @@ -793,7 +793,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
- cycles_t tm1;
-
- hmaster = bcp->uvhub_master;
- - spin_lock(&hmaster->disable_lock);
- + raw_spin_lock(&hmaster->disable_lock);
- if (!bcp->baudisabled) {
- stat->s_bau_disabled++;
- tm1 = get_cycles();
- @@ -806,7 +806,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
- }
- }
- }
- - spin_unlock(&hmaster->disable_lock);
- + raw_spin_unlock(&hmaster->disable_lock);
- }
-
- static void count_max_concurr(int stat, struct bau_control *bcp,
- @@ -869,7 +869,7 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
- */
- static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
- {
- - spinlock_t *lock = &hmaster->uvhub_lock;
- + raw_spinlock_t *lock = &hmaster->uvhub_lock;
- atomic_t *v;
-
- v = &hmaster->active_descriptor_count;
- @@ -1002,7 +1002,7 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
- struct bau_control *hmaster;
-
- hmaster = bcp->uvhub_master;
- - spin_lock(&hmaster->disable_lock);
- + raw_spin_lock(&hmaster->disable_lock);
- if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
- stat->s_bau_reenabled++;
- for_each_present_cpu(tcpu) {
- @@ -1014,10 +1014,10 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
- tbcp->period_giveups = 0;
- }
- }
- - spin_unlock(&hmaster->disable_lock);
- + raw_spin_unlock(&hmaster->disable_lock);
- return 0;
- }
- - spin_unlock(&hmaster->disable_lock);
- + raw_spin_unlock(&hmaster->disable_lock);
- return -1;
- }
-
- @@ -1939,9 +1939,9 @@ static void __init init_per_cpu_tunables(void)
- bcp->cong_reps = congested_reps;
- bcp->disabled_period = sec_2_cycles(disabled_period);
- bcp->giveup_limit = giveup_limit;
- - spin_lock_init(&bcp->queue_lock);
- - spin_lock_init(&bcp->uvhub_lock);
- - spin_lock_init(&bcp->disable_lock);
- + raw_spin_lock_init(&bcp->queue_lock);
- + raw_spin_lock_init(&bcp->uvhub_lock);
- + raw_spin_lock_init(&bcp->disable_lock);
- }
- }
-
- diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
- index b333fc45f9ec..8b85916e6986 100644
- --- a/arch/x86/platform/uv/uv_time.c
- +++ b/arch/x86/platform/uv/uv_time.c
- @@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
-
- /* There is one of these allocated per node */
- struct uv_rtc_timer_head {
- - spinlock_t lock;
- + raw_spinlock_t lock;
- /* next cpu waiting for timer, local node relative: */
- int next_cpu;
- /* number of cpus on this node: */
- @@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers(void)
- uv_rtc_deallocate_timers();
- return -ENOMEM;
- }
- - spin_lock_init(&head->lock);
- + raw_spin_lock_init(&head->lock);
- head->ncpus = uv_blade_nr_possible_cpus(bid);
- head->next_cpu = -1;
- blade_info[bid] = head;
- @@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
- unsigned long flags;
- int next_cpu;
-
- - spin_lock_irqsave(&head->lock, flags);
- + raw_spin_lock_irqsave(&head->lock, flags);
-
- next_cpu = head->next_cpu;
- *t = expires;
- @@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
- if (uv_setup_intr(cpu, expires)) {
- *t = ULLONG_MAX;
- uv_rtc_find_next_timer(head, pnode);
- - spin_unlock_irqrestore(&head->lock, flags);
- + raw_spin_unlock_irqrestore(&head->lock, flags);
- return -ETIME;
- }
- }
-
- - spin_unlock_irqrestore(&head->lock, flags);
- + raw_spin_unlock_irqrestore(&head->lock, flags);
- return 0;
- }
-
- @@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
- unsigned long flags;
- int rc = 0;
-
- - spin_lock_irqsave(&head->lock, flags);
- + raw_spin_lock_irqsave(&head->lock, flags);
-
- if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
- rc = 1;
- @@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
- uv_rtc_find_next_timer(head, pnode);
- }
-
- - spin_unlock_irqrestore(&head->lock, flags);
- + raw_spin_unlock_irqrestore(&head->lock, flags);
-
- return rc;
- }
- @@ -299,13 +299,18 @@ static int uv_rtc_unset_timer(int cpu, int force)
- static cycle_t uv_read_rtc(struct clocksource *cs)
- {
- unsigned long offset;
- + cycle_t cycles;
-
- + preempt_disable();
- if (uv_get_min_hub_revision_id() == 1)
- offset = 0;
- else
- offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
-
- - return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
- + cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
- + preempt_enable();
- +
- + return cycles;
- }
-
- /*
- diff --git a/block/blk-core.c b/block/blk-core.c
- index 23daf40be371..e8341f78f119 100644
- --- a/block/blk-core.c
- +++ b/block/blk-core.c
- @@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
-
- INIT_LIST_HEAD(&rq->queuelist);
- INIT_LIST_HEAD(&rq->timeout_list);
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
- +#endif
- rq->cpu = -1;
- rq->q = q;
- rq->__sector = (sector_t) -1;
- @@ -233,7 +236,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
- **/
- void blk_start_queue(struct request_queue *q)
- {
- - WARN_ON(!in_interrupt() && !irqs_disabled());
- + WARN_ON_NONRT(!in_interrupt() && !irqs_disabled());
-
- queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- __blk_run_queue(q);
- @@ -660,7 +663,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
- if (nowait)
- return -EBUSY;
-
- - ret = wait_event_interruptible(q->mq_freeze_wq,
- + ret = swait_event_interruptible(q->mq_freeze_wq,
- !atomic_read(&q->mq_freeze_depth) ||
- blk_queue_dying(q));
- if (blk_queue_dying(q))
- @@ -680,7 +683,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
- struct request_queue *q =
- container_of(ref, struct request_queue, q_usage_counter);
-
- - wake_up_all(&q->mq_freeze_wq);
- + swake_up_all(&q->mq_freeze_wq);
- }
-
- static void blk_rq_timed_out_timer(unsigned long data)
- @@ -750,7 +753,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
- q->bypass_depth = 1;
- __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
-
- - init_waitqueue_head(&q->mq_freeze_wq);
- + init_swait_queue_head(&q->mq_freeze_wq);
-
- /*
- * Init percpu_ref in atomic mode so that it's faster to shutdown.
- @@ -3202,7 +3205,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
- blk_run_queue_async(q);
- else
- __blk_run_queue(q);
- - spin_unlock(q->queue_lock);
- + spin_unlock_irq(q->queue_lock);
- }
-
- static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
- @@ -3250,7 +3253,6 @@ EXPORT_SYMBOL(blk_check_plugged);
- void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
- {
- struct request_queue *q;
- - unsigned long flags;
- struct request *rq;
- LIST_HEAD(list);
- unsigned int depth;
- @@ -3270,11 +3272,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
- q = NULL;
- depth = 0;
-
- - /*
- - * Save and disable interrupts here, to avoid doing it for every
- - * queue lock we have to take.
- - */
- - local_irq_save(flags);
- while (!list_empty(&list)) {
- rq = list_entry_rq(list.next);
- list_del_init(&rq->queuelist);
- @@ -3287,7 +3284,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
- queue_unplugged(q, depth, from_schedule);
- q = rq->q;
- depth = 0;
- - spin_lock(q->queue_lock);
- + spin_lock_irq(q->queue_lock);
- }
-
- /*
- @@ -3314,8 +3311,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
- */
- if (q)
- queue_unplugged(q, depth, from_schedule);
- -
- - local_irq_restore(flags);
- }
-
- void blk_finish_plug(struct blk_plug *plug)
- diff --git a/block/blk-ioc.c b/block/blk-ioc.c
- index 381cb50a673c..dc8785233d94 100644
- --- a/block/blk-ioc.c
- +++ b/block/blk-ioc.c
- @@ -7,6 +7,7 @@
- #include <linux/bio.h>
- #include <linux/blkdev.h>
- #include <linux/slab.h>
- +#include <linux/delay.h>
-
- #include "blk.h"
-
- @@ -109,7 +110,7 @@ static void ioc_release_fn(struct work_struct *work)
- spin_unlock(q->queue_lock);
- } else {
- spin_unlock_irqrestore(&ioc->lock, flags);
- - cpu_relax();
- + cpu_chill();
- spin_lock_irqsave_nested(&ioc->lock, flags, 1);
- }
- }
- @@ -187,7 +188,7 @@ void put_io_context_active(struct io_context *ioc)
- spin_unlock(icq->q->queue_lock);
- } else {
- spin_unlock_irqrestore(&ioc->lock, flags);
- - cpu_relax();
- + cpu_chill();
- goto retry;
- }
- }
- diff --git a/block/blk-mq.c b/block/blk-mq.c
- index 10f8f94b7f20..82500641f37b 100644
- --- a/block/blk-mq.c
- +++ b/block/blk-mq.c
- @@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
-
- static void blk_mq_freeze_queue_wait(struct request_queue *q)
- {
- - wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
- + swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
- }
-
- /*
- @@ -110,7 +110,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
- WARN_ON_ONCE(freeze_depth < 0);
- if (!freeze_depth) {
- percpu_ref_reinit(&q->q_usage_counter);
- - wake_up_all(&q->mq_freeze_wq);
- + swake_up_all(&q->mq_freeze_wq);
- }
- }
- EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
- @@ -129,7 +129,7 @@ void blk_mq_wake_waiters(struct request_queue *q)
- * dying, we need to ensure that processes currently waiting on
- * the queue are notified as well.
- */
- - wake_up_all(&q->mq_freeze_wq);
- + swake_up_all(&q->mq_freeze_wq);
- }
-
- bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
- @@ -177,6 +177,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
- rq->resid_len = 0;
- rq->sense = NULL;
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
- +#endif
- INIT_LIST_HEAD(&rq->timeout_list);
- rq->timeout = 0;
-
- @@ -345,6 +348,17 @@ void blk_mq_end_request(struct request *rq, int error)
- }
- EXPORT_SYMBOL(blk_mq_end_request);
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +
- +void __blk_mq_complete_request_remote_work(struct work_struct *work)
- +{
- + struct request *rq = container_of(work, struct request, work);
- +
- + rq->q->softirq_done_fn(rq);
- +}
- +
- +#else
- +
- static void __blk_mq_complete_request_remote(void *data)
- {
- struct request *rq = data;
- @@ -352,6 +366,8 @@ static void __blk_mq_complete_request_remote(void *data)
- rq->q->softirq_done_fn(rq);
- }
-
- +#endif
- +
- static void blk_mq_ipi_complete_request(struct request *rq)
- {
- struct blk_mq_ctx *ctx = rq->mq_ctx;
- @@ -363,19 +379,23 @@ static void blk_mq_ipi_complete_request(struct request *rq)
- return;
- }
-
- - cpu = get_cpu();
- + cpu = get_cpu_light();
- if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
- shared = cpus_share_cache(cpu, ctx->cpu);
-
- if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + schedule_work_on(ctx->cpu, &rq->work);
- +#else
- rq->csd.func = __blk_mq_complete_request_remote;
- rq->csd.info = rq;
- rq->csd.flags = 0;
- smp_call_function_single_async(ctx->cpu, &rq->csd);
- +#endif
- } else {
- rq->q->softirq_done_fn(rq);
- }
- - put_cpu();
- + put_cpu_light();
- }
-
- static void __blk_mq_complete_request(struct request *rq)
- @@ -906,14 +926,14 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
- return;
-
- if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
- - int cpu = get_cpu();
- + int cpu = get_cpu_light();
- if (cpumask_test_cpu(cpu, hctx->cpumask)) {
- __blk_mq_run_hw_queue(hctx);
- - put_cpu();
- + put_cpu_light();
- return;
- }
-
- - put_cpu();
- + put_cpu_light();
- }
-
- kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
- diff --git a/block/blk-mq.h b/block/blk-mq.h
- index c55bcf67b956..c26a84d44cc4 100644
- --- a/block/blk-mq.h
- +++ b/block/blk-mq.h
- @@ -73,12 +73,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
- */
- static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
- {
- - return __blk_mq_get_ctx(q, get_cpu());
- + return __blk_mq_get_ctx(q, get_cpu_light());
- }
-
- static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
- {
- - put_cpu();
- + put_cpu_light();
- }
-
- struct blk_mq_alloc_data {
- diff --git a/block/blk-softirq.c b/block/blk-softirq.c
- index 06cf9807f49a..c40342643ca0 100644
- --- a/block/blk-softirq.c
- +++ b/block/blk-softirq.c
- @@ -51,6 +51,7 @@ static void trigger_softirq(void *data)
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
-
- local_irq_restore(flags);
- + preempt_check_resched_rt();
- }
-
- /*
- @@ -89,6 +90,7 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
- this_cpu_ptr(&blk_cpu_done));
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_enable();
- + preempt_check_resched_rt();
-
- return 0;
- }
- @@ -141,6 +143,7 @@ void __blk_complete_request(struct request *req)
- goto do_local;
-
- local_irq_restore(flags);
- + preempt_check_resched_rt();
- }
-
- /**
- diff --git a/block/bounce.c b/block/bounce.c
- index 1cb5dd3a5da1..2f1ec8a67cbe 100644
- --- a/block/bounce.c
- +++ b/block/bounce.c
- @@ -55,11 +55,11 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
- unsigned long flags;
- unsigned char *vto;
-
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- vto = kmap_atomic(to->bv_page);
- memcpy(vto + to->bv_offset, vfrom, to->bv_len);
- kunmap_atomic(vto);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- }
-
- #else /* CONFIG_HIGHMEM */
- diff --git a/crypto/algapi.c b/crypto/algapi.c
- index 1fad2a6b3bbb..ecb7315426a9 100644
- --- a/crypto/algapi.c
- +++ b/crypto/algapi.c
- @@ -719,13 +719,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
-
- int crypto_register_notifier(struct notifier_block *nb)
- {
- - return blocking_notifier_chain_register(&crypto_chain, nb);
- + return srcu_notifier_chain_register(&crypto_chain, nb);
- }
- EXPORT_SYMBOL_GPL(crypto_register_notifier);
-
- int crypto_unregister_notifier(struct notifier_block *nb)
- {
- - return blocking_notifier_chain_unregister(&crypto_chain, nb);
- + return srcu_notifier_chain_unregister(&crypto_chain, nb);
- }
- EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
-
- diff --git a/crypto/api.c b/crypto/api.c
- index bbc147cb5dec..bc1a848f02ec 100644
- --- a/crypto/api.c
- +++ b/crypto/api.c
- @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list);
- DECLARE_RWSEM(crypto_alg_sem);
- EXPORT_SYMBOL_GPL(crypto_alg_sem);
-
- -BLOCKING_NOTIFIER_HEAD(crypto_chain);
- +SRCU_NOTIFIER_HEAD(crypto_chain);
- EXPORT_SYMBOL_GPL(crypto_chain);
-
- static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
- @@ -236,10 +236,10 @@ int crypto_probing_notify(unsigned long val, void *v)
- {
- int ok;
-
- - ok = blocking_notifier_call_chain(&crypto_chain, val, v);
- + ok = srcu_notifier_call_chain(&crypto_chain, val, v);
- if (ok == NOTIFY_DONE) {
- request_module("cryptomgr");
- - ok = blocking_notifier_call_chain(&crypto_chain, val, v);
- + ok = srcu_notifier_call_chain(&crypto_chain, val, v);
- }
-
- return ok;
- diff --git a/crypto/internal.h b/crypto/internal.h
- index 7eefcdb00227..0ecc7f5a2f40 100644
- --- a/crypto/internal.h
- +++ b/crypto/internal.h
- @@ -47,7 +47,7 @@ struct crypto_larval {
-
- extern struct list_head crypto_alg_list;
- extern struct rw_semaphore crypto_alg_sem;
- -extern struct blocking_notifier_head crypto_chain;
- +extern struct srcu_notifier_head crypto_chain;
-
- #ifdef CONFIG_PROC_FS
- void __init crypto_init_proc(void);
- @@ -146,7 +146,7 @@ static inline int crypto_is_moribund(struct crypto_alg *alg)
-
- static inline void crypto_notify(unsigned long val, void *v)
- {
- - blocking_notifier_call_chain(&crypto_chain, val, v);
- + srcu_notifier_call_chain(&crypto_chain, val, v);
- }
-
- #endif /* _CRYPTO_INTERNAL_H */
- diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
- index 750fa824d42c..441edf51484a 100644
- --- a/drivers/acpi/acpica/acglobal.h
- +++ b/drivers/acpi/acpica/acglobal.h
- @@ -116,7 +116,7 @@ ACPI_GLOBAL(u8, acpi_gbl_global_lock_pending);
- * interrupt level
- */
- ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
- -ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
- +ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
- ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
-
- /* Mutex for _OSI support */
- diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
- index 3b7fb99362b6..696bf8e62afb 100644
- --- a/drivers/acpi/acpica/hwregs.c
- +++ b/drivers/acpi/acpica/hwregs.c
- @@ -363,14 +363,14 @@ acpi_status acpi_hw_clear_acpi_status(void)
- ACPI_BITMASK_ALL_FIXED_STATUS,
- ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
-
- - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
- + raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
-
- /* Clear the fixed events in PM1 A/B */
-
- status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
- ACPI_BITMASK_ALL_FIXED_STATUS);
-
- - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
- + raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
-
- if (ACPI_FAILURE(status)) {
- goto exit;
- diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
- index 98c26ff39409..6e236f2ea791 100644
- --- a/drivers/acpi/acpica/hwxface.c
- +++ b/drivers/acpi/acpica/hwxface.c
- @@ -373,7 +373,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
- - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
- + raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
-
- /*
- * At this point, we know that the parent register is one of the
- @@ -434,7 +434,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value)
-
- unlock_and_exit:
-
- - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
- + raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
- return_ACPI_STATUS(status);
- }
-
- diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
- index 15073375bd00..357e7ca5a587 100644
- --- a/drivers/acpi/acpica/utmutex.c
- +++ b/drivers/acpi/acpica/utmutex.c
- @@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(void)
- return_ACPI_STATUS (status);
- }
-
- - status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
- + status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock);
- if (ACPI_FAILURE (status)) {
- return_ACPI_STATUS (status);
- }
- @@ -145,7 +145,7 @@ void acpi_ut_mutex_terminate(void)
- /* Delete the spinlocks */
-
- acpi_os_delete_lock(acpi_gbl_gpe_lock);
- - acpi_os_delete_lock(acpi_gbl_hardware_lock);
- + acpi_os_delete_raw_lock(acpi_gbl_hardware_lock);
- acpi_os_delete_lock(acpi_gbl_reference_count_lock);
-
- /* Delete the reader/writer lock */
- diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
- index 8d22acdf90f0..64fbad747da9 100644
- --- a/drivers/ata/libata-sff.c
- +++ b/drivers/ata/libata-sff.c
- @@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
- unsigned long flags;
- unsigned int consumed;
-
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
-
- return consumed;
- }
- @@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
- unsigned long flags;
-
- /* FIXME: use a bounce buffer */
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- buf = kmap_atomic(page);
-
- /* do the actual data transfer */
- @@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
- do_write);
-
- kunmap_atomic(buf);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- } else {
- buf = page_address(page);
- ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
- @@ -864,7 +864,7 @@ static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
- unsigned long flags;
-
- /* FIXME: use bounce buffer */
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- buf = kmap_atomic(page);
-
- /* do the actual data transfer */
- @@ -872,7 +872,7 @@ static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
- count, rw);
-
- kunmap_atomic(buf);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- } else {
- buf = page_address(page);
- consumed = ap->ops->sff_data_xfer(dev, buf + offset,
- diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
- index 4b5cd3a7b2b6..8c93ee150ee8 100644
- --- a/drivers/block/zram/zcomp.c
- +++ b/drivers/block/zram/zcomp.c
- @@ -118,12 +118,20 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
-
- struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
- {
- - return *get_cpu_ptr(comp->stream);
- + struct zcomp_strm *zstrm;
- +
- + zstrm = *get_local_ptr(comp->stream);
- + spin_lock(&zstrm->zcomp_lock);
- + return zstrm;
- }
-
- void zcomp_stream_put(struct zcomp *comp)
- {
- - put_cpu_ptr(comp->stream);
- + struct zcomp_strm *zstrm;
- +
- + zstrm = *this_cpu_ptr(comp->stream);
- + spin_unlock(&zstrm->zcomp_lock);
- + put_local_ptr(zstrm);
- }
-
- int zcomp_compress(struct zcomp_strm *zstrm,
- @@ -174,6 +182,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp,
- pr_err("Can't allocate a compression stream\n");
- return NOTIFY_BAD;
- }
- + spin_lock_init(&zstrm->zcomp_lock);
- *per_cpu_ptr(comp->stream, cpu) = zstrm;
- break;
- case CPU_DEAD:
- diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
- index 478cac2ed465..f7a6efdc3285 100644
- --- a/drivers/block/zram/zcomp.h
- +++ b/drivers/block/zram/zcomp.h
- @@ -14,6 +14,7 @@ struct zcomp_strm {
- /* compression/decompression buffer */
- void *buffer;
- struct crypto_comp *tfm;
- + spinlock_t zcomp_lock;
- };
-
- /* dynamic per-device compression frontend */
- diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
- index b7c0b69a02f5..47d033b8a966 100644
- --- a/drivers/block/zram/zram_drv.c
- +++ b/drivers/block/zram/zram_drv.c
- @@ -528,6 +528,8 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
- goto out_error;
- }
-
- + zram_meta_init_table_locks(meta, disksize);
- +
- return meta;
-
- out_error:
- @@ -575,28 +577,28 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
- struct zram_meta *meta = zram->meta;
- unsigned long handle;
- unsigned int size;
- + struct zcomp_strm *zstrm;
-
- - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
- + zram_lock_table(&meta->table[index]);
- handle = meta->table[index].handle;
- size = zram_get_obj_size(meta, index);
-
- if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
- - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- + zram_unlock_table(&meta->table[index]);
- memset(mem, 0, PAGE_SIZE);
- return 0;
- }
-
- + zstrm = zcomp_stream_get(zram->comp);
- cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
- if (size == PAGE_SIZE) {
- memcpy(mem, cmem, PAGE_SIZE);
- } else {
- - struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
- -
- ret = zcomp_decompress(zstrm, cmem, size, mem);
- - zcomp_stream_put(zram->comp);
- }
- zs_unmap_object(meta->mem_pool, handle);
- - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- + zcomp_stream_put(zram->comp);
- + zram_unlock_table(&meta->table[index]);
-
- /* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret)) {
- @@ -616,14 +618,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- struct zram_meta *meta = zram->meta;
- page = bvec->bv_page;
-
- - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
- + zram_lock_table(&meta->table[index]);
- if (unlikely(!meta->table[index].handle) ||
- zram_test_flag(meta, index, ZRAM_ZERO)) {
- - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- + zram_unlock_table(&meta->table[index]);
- handle_zero_page(bvec);
- return 0;
- }
- - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- + zram_unlock_table(&meta->table[index]);
-
- if (is_partial_io(bvec))
- /* Use a temporary buffer to decompress the page */
- @@ -700,10 +702,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
- if (user_mem)
- kunmap_atomic(user_mem);
- /* Free memory associated with this sector now. */
- - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
- + zram_lock_table(&meta->table[index]);
- zram_free_page(zram, index);
- zram_set_flag(meta, index, ZRAM_ZERO);
- - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- + zram_unlock_table(&meta->table[index]);
-
- atomic64_inc(&zram->stats.zero_pages);
- ret = 0;
- @@ -794,12 +796,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
- * Free memory associated with this sector
- * before overwriting unused sectors.
- */
- - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
- + zram_lock_table(&meta->table[index]);
- zram_free_page(zram, index);
-
- meta->table[index].handle = handle;
- zram_set_obj_size(meta, index, clen);
- - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- + zram_unlock_table(&meta->table[index]);
-
- /* Update stats */
- atomic64_add(clen, &zram->stats.compr_data_size);
- @@ -842,9 +844,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
- }
-
- while (n >= PAGE_SIZE) {
- - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
- + zram_lock_table(&meta->table[index]);
- zram_free_page(zram, index);
- - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- + zram_unlock_table(&meta->table[index]);
- atomic64_inc(&zram->stats.notify_free);
- index++;
- n -= PAGE_SIZE;
- @@ -973,9 +975,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
- zram = bdev->bd_disk->private_data;
- meta = zram->meta;
-
- - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
- + zram_lock_table(&meta->table[index]);
- zram_free_page(zram, index);
- - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- + zram_unlock_table(&meta->table[index]);
- atomic64_inc(&zram->stats.notify_free);
- }
-
- diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
- index 74fcf10da374..fd4020c99b9e 100644
- --- a/drivers/block/zram/zram_drv.h
- +++ b/drivers/block/zram/zram_drv.h
- @@ -73,6 +73,9 @@ enum zram_pageflags {
- struct zram_table_entry {
- unsigned long handle;
- unsigned long value;
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + spinlock_t lock;
- +#endif
- };
-
- struct zram_stats {
- @@ -120,4 +123,42 @@ struct zram {
- */
- bool claim; /* Protected by bdev->bd_mutex */
- };
- +
- +#ifndef CONFIG_PREEMPT_RT_BASE
- +static inline void zram_lock_table(struct zram_table_entry *table)
- +{
- + bit_spin_lock(ZRAM_ACCESS, &table->value);
- +}
- +
- +static inline void zram_unlock_table(struct zram_table_entry *table)
- +{
- + bit_spin_unlock(ZRAM_ACCESS, &table->value);
- +}
- +
- +static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) { }
- +#else /* CONFIG_PREEMPT_RT_BASE */
- +static inline void zram_lock_table(struct zram_table_entry *table)
- +{
- + spin_lock(&table->lock);
- + __set_bit(ZRAM_ACCESS, &table->value);
- +}
- +
- +static inline void zram_unlock_table(struct zram_table_entry *table)
- +{
- + __clear_bit(ZRAM_ACCESS, &table->value);
- + spin_unlock(&table->lock);
- +}
- +
- +static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize)
- +{
- + size_t num_pages = disksize >> PAGE_SHIFT;
- + size_t index;
- +
- + for (index = 0; index < num_pages; index++) {
- + spinlock_t *lock = &meta->table[index].lock;
- + spin_lock_init(lock);
- + }
- +}
- +#endif /* CONFIG_PREEMPT_RT_BASE */
- +
- #endif
- diff --git a/drivers/char/random.c b/drivers/char/random.c
- index 08d1dd58c0d2..25ee319dc8e3 100644
- --- a/drivers/char/random.c
- +++ b/drivers/char/random.c
- @@ -262,6 +262,7 @@
- #include <linux/syscalls.h>
- #include <linux/completion.h>
- #include <linux/uuid.h>
- +#include <linux/locallock.h>
- #include <crypto/chacha20.h>
-
- #include <asm/processor.h>
- @@ -1028,8 +1029,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
- } sample;
- long delta, delta2, delta3;
-
- - preempt_disable();
- -
- sample.jiffies = jiffies;
- sample.cycles = random_get_entropy();
- sample.num = num;
- @@ -1070,7 +1069,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
- */
- credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
- }
- - preempt_enable();
- }
-
- void add_input_randomness(unsigned int type, unsigned int code,
- @@ -1123,28 +1121,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
- return *(ptr + f->reg_idx++);
- }
-
- -void add_interrupt_randomness(int irq, int irq_flags)
- +void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
- {
- struct entropy_store *r;
- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
- - struct pt_regs *regs = get_irq_regs();
- unsigned long now = jiffies;
- cycles_t cycles = random_get_entropy();
- __u32 c_high, j_high;
- - __u64 ip;
- unsigned long seed;
- int credit = 0;
-
- if (cycles == 0)
- - cycles = get_reg(fast_pool, regs);
- + cycles = get_reg(fast_pool, NULL);
- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
- j_high = (sizeof(now) > 4) ? now >> 32 : 0;
- fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
- fast_pool->pool[1] ^= now ^ c_high;
- - ip = regs ? instruction_pointer(regs) : _RET_IP_;
- + if (!ip)
- + ip = _RET_IP_;
- fast_pool->pool[2] ^= ip;
- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
- - get_reg(fast_pool, regs);
- + get_reg(fast_pool, NULL);
-
- fast_mix(fast_pool);
- add_interrupt_bench(cycles);
- @@ -2056,6 +2053,7 @@ struct batched_entropy {
- * goal of being quite fast and not depleting entropy.
- */
- static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
- +static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_long_lock);
- unsigned long get_random_long(void)
- {
- unsigned long ret;
- @@ -2064,13 +2062,13 @@ unsigned long get_random_long(void)
- if (arch_get_random_long(&ret))
- return ret;
-
- - batch = &get_cpu_var(batched_entropy_long);
- + batch = &get_locked_var(batched_entropy_long_lock, batched_entropy_long);
- if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
- extract_crng((u8 *)batch->entropy_long);
- batch->position = 0;
- }
- ret = batch->entropy_long[batch->position++];
- - put_cpu_var(batched_entropy_long);
- + put_locked_var(batched_entropy_long_lock, batched_entropy_long);
- return ret;
- }
- EXPORT_SYMBOL(get_random_long);
- @@ -2082,6 +2080,8 @@ unsigned int get_random_int(void)
- }
- #else
- static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int);
- +static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_int_lock);
- +
- unsigned int get_random_int(void)
- {
- unsigned int ret;
- @@ -2090,13 +2090,13 @@ unsigned int get_random_int(void)
- if (arch_get_random_int(&ret))
- return ret;
-
- - batch = &get_cpu_var(batched_entropy_int);
- + batch = &get_locked_var(batched_entropy_int_lock, batched_entropy_int);
- if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
- extract_crng((u8 *)batch->entropy_int);
- batch->position = 0;
- }
- ret = batch->entropy_int[batch->position++];
- - put_cpu_var(batched_entropy_int);
- + put_locked_var(batched_entropy_int_lock, batched_entropy_int);
- return ret;
- }
- #endif
- diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
- index 8022bea27fed..247330efd310 100644
- --- a/drivers/char/tpm/tpm_tis.c
- +++ b/drivers/char/tpm/tpm_tis.c
- @@ -50,6 +50,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
- return container_of(data, struct tpm_tis_tcg_phy, priv);
- }
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +/*
- + * Flushes previous write operations to chip so that a subsequent
- + * ioread*()s won't stall a cpu.
- + */
- +static inline void tpm_tis_flush(void __iomem *iobase)
- +{
- + ioread8(iobase + TPM_ACCESS(0));
- +}
- +#else
- +#define tpm_tis_flush(iobase) do { } while (0)
- +#endif
- +
- +static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr)
- +{
- + iowrite8(b, iobase + addr);
- + tpm_tis_flush(iobase);
- +}
- +
- +static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr)
- +{
- + iowrite32(b, iobase + addr);
- + tpm_tis_flush(iobase);
- +}
- +
- static bool interrupts = true;
- module_param(interrupts, bool, 0444);
- MODULE_PARM_DESC(interrupts, "Enable interrupts");
- @@ -103,7 +128,7 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
- struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
-
- while (len--)
- - iowrite8(*value++, phy->iobase + addr);
- + tpm_tis_iowrite8(*value++, phy->iobase, addr);
- return 0;
- }
-
- @@ -127,7 +152,7 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
- {
- struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
-
- - iowrite32(value, phy->iobase + addr);
- + tpm_tis_iowrite32(value, phy->iobase, addr);
- return 0;
- }
-
- diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
- index 4da2af9694a2..5b6f57f500b8 100644
- --- a/drivers/clocksource/tcb_clksrc.c
- +++ b/drivers/clocksource/tcb_clksrc.c
- @@ -23,8 +23,7 @@
- * this 32 bit free-running counter. the second channel is not used.
- *
- * - The third channel may be used to provide a 16-bit clockevent
- - * source, used in either periodic or oneshot mode. This runs
- - * at 32 KiHZ, and can handle delays of up to two seconds.
- + * source, used in either periodic or oneshot mode.
- *
- * A boot clocksource and clockevent source are also currently needed,
- * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
- @@ -74,6 +73,8 @@ static struct clocksource clksrc = {
- struct tc_clkevt_device {
- struct clock_event_device clkevt;
- struct clk *clk;
- + bool clk_enabled;
- + u32 freq;
- void __iomem *regs;
- };
-
- @@ -82,15 +83,26 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
- return container_of(clkevt, struct tc_clkevt_device, clkevt);
- }
-
- -/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
- - * because using one of the divided clocks would usually mean the
- - * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
- - *
- - * A divided clock could be good for high resolution timers, since
- - * 30.5 usec resolution can seem "low".
- - */
- static u32 timer_clock;
-
- +static void tc_clk_disable(struct clock_event_device *d)
- +{
- + struct tc_clkevt_device *tcd = to_tc_clkevt(d);
- +
- + clk_disable(tcd->clk);
- + tcd->clk_enabled = false;
- +}
- +
- +static void tc_clk_enable(struct clock_event_device *d)
- +{
- + struct tc_clkevt_device *tcd = to_tc_clkevt(d);
- +
- + if (tcd->clk_enabled)
- + return;
- + clk_enable(tcd->clk);
- + tcd->clk_enabled = true;
- +}
- +
- static int tc_shutdown(struct clock_event_device *d)
- {
- struct tc_clkevt_device *tcd = to_tc_clkevt(d);
- @@ -98,8 +110,14 @@ static int tc_shutdown(struct clock_event_device *d)
-
- __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
- __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
- + return 0;
- +}
- +
- +static int tc_shutdown_clk_off(struct clock_event_device *d)
- +{
- + tc_shutdown(d);
- if (!clockevent_state_detached(d))
- - clk_disable(tcd->clk);
- + tc_clk_disable(d);
-
- return 0;
- }
- @@ -112,9 +130,9 @@ static int tc_set_oneshot(struct clock_event_device *d)
- if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
- tc_shutdown(d);
-
- - clk_enable(tcd->clk);
- + tc_clk_enable(d);
-
- - /* slow clock, count up to RC, then irq and stop */
- + /* count up to RC, then irq and stop */
- __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
- ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
- __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
- @@ -134,12 +152,12 @@ static int tc_set_periodic(struct clock_event_device *d)
- /* By not making the gentime core emulate periodic mode on top
- * of oneshot, we get lower overhead and improved accuracy.
- */
- - clk_enable(tcd->clk);
- + tc_clk_enable(d);
-
- - /* slow clock, count up to RC, then irq and restart */
- + /* count up to RC, then irq and restart */
- __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
- regs + ATMEL_TC_REG(2, CMR));
- - __raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
- + __raw_writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
-
- /* Enable clock and interrupts on RC compare */
- __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
- @@ -166,9 +184,13 @@ static struct tc_clkevt_device clkevt = {
- .features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT,
- /* Should be lower than at91rm9200's system timer */
- +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
- .rating = 125,
- +#else
- + .rating = 200,
- +#endif
- .set_next_event = tc_next_event,
- - .set_state_shutdown = tc_shutdown,
- + .set_state_shutdown = tc_shutdown_clk_off,
- .set_state_periodic = tc_set_periodic,
- .set_state_oneshot = tc_set_oneshot,
- },
- @@ -188,8 +210,9 @@ static irqreturn_t ch2_irq(int irq, void *handle)
- return IRQ_NONE;
- }
-
- -static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
- +static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
- {
- + unsigned divisor = atmel_tc_divisors[divisor_idx];
- int ret;
- struct clk *t2_clk = tc->clk[2];
- int irq = tc->irq[2];
- @@ -210,7 +233,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
- clkevt.regs = tc->regs;
- clkevt.clk = t2_clk;
-
- - timer_clock = clk32k_divisor_idx;
- + timer_clock = divisor_idx;
- + if (!divisor)
- + clkevt.freq = 32768;
- + else
- + clkevt.freq = clk_get_rate(t2_clk) / divisor;
-
- clkevt.clkevt.cpumask = cpumask_of(0);
-
- @@ -221,7 +248,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
- return ret;
- }
-
- - clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
- + clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
-
- return ret;
- }
- @@ -358,7 +385,11 @@ static int __init tcb_clksrc_init(void)
- goto err_disable_t1;
-
- /* channel 2: periodic and oneshot timer support */
- +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
- ret = setup_clkevents(tc, clk32k_divisor_idx);
- +#else
- + ret = setup_clkevents(tc, best_divisor_idx);
- +#endif
- if (ret)
- goto err_unregister_clksrc;
-
- diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
- index 6555821bbdae..93288849b2bd 100644
- --- a/drivers/clocksource/timer-atmel-pit.c
- +++ b/drivers/clocksource/timer-atmel-pit.c
- @@ -46,6 +46,7 @@ struct pit_data {
- u32 cycle;
- u32 cnt;
- unsigned int irq;
- + bool irq_requested;
- struct clk *mck;
- };
-
- @@ -96,15 +97,29 @@ static int pit_clkevt_shutdown(struct clock_event_device *dev)
-
- /* disable irq, leaving the clocksource active */
- pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN);
- + if (data->irq_requested) {
- + free_irq(data->irq, data);
- + data->irq_requested = false;
- + }
- return 0;
- }
-
- +static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id);
- /*
- * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
- */
- static int pit_clkevt_set_periodic(struct clock_event_device *dev)
- {
- struct pit_data *data = clkevt_to_pit_data(dev);
- + int ret;
- +
- + ret = request_irq(data->irq, at91sam926x_pit_interrupt,
- + IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
- + "at91_tick", data);
- + if (ret)
- + panic(pr_fmt("Unable to setup IRQ\n"));
- +
- + data->irq_requested = true;
-
- /* update clocksource counter */
- data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
- @@ -230,15 +245,6 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
- return ret;
- }
-
- - /* Set up irq handler */
- - ret = request_irq(data->irq, at91sam926x_pit_interrupt,
- - IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
- - "at91_tick", data);
- - if (ret) {
- - pr_err("Unable to setup IRQ\n");
- - return ret;
- - }
- -
- /* Set up and register clockevents */
- data->clkevt.name = "pit";
- data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC;
- diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
- index e90ab5b63a90..9e124087c55f 100644
- --- a/drivers/clocksource/timer-atmel-st.c
- +++ b/drivers/clocksource/timer-atmel-st.c
- @@ -115,18 +115,29 @@ static void clkdev32k_disable_and_flush_irq(void)
- last_crtr = read_CRTR();
- }
-
- +static int atmel_st_irq;
- +
- static int clkevt32k_shutdown(struct clock_event_device *evt)
- {
- clkdev32k_disable_and_flush_irq();
- irqmask = 0;
- regmap_write(regmap_st, AT91_ST_IER, irqmask);
- + free_irq(atmel_st_irq, regmap_st);
- return 0;
- }
-
- static int clkevt32k_set_oneshot(struct clock_event_device *dev)
- {
- + int ret;
- +
- clkdev32k_disable_and_flush_irq();
-
- + ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt,
- + IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
- + "at91_tick", regmap_st);
- + if (ret)
- + panic(pr_fmt("Unable to setup IRQ\n"));
- +
- /*
- * ALM for oneshot irqs, set by next_event()
- * before 32 seconds have passed.
- @@ -139,8 +150,16 @@ static int clkevt32k_set_oneshot(struct clock_event_device *dev)
-
- static int clkevt32k_set_periodic(struct clock_event_device *dev)
- {
- + int ret;
- +
- clkdev32k_disable_and_flush_irq();
-
- + ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt,
- + IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
- + "at91_tick", regmap_st);
- + if (ret)
- + panic(pr_fmt("Unable to setup IRQ\n"));
- +
- /* PIT for periodic irqs; fixed rate of 1/HZ */
- irqmask = AT91_ST_PITS;
- regmap_write(regmap_st, AT91_ST_PIMR, timer_latch);
- @@ -198,7 +217,7 @@ static int __init atmel_st_timer_init(struct device_node *node)
- {
- struct clk *sclk;
- unsigned int sclk_rate, val;
- - int irq, ret;
- + int ret;
-
- regmap_st = syscon_node_to_regmap(node);
- if (IS_ERR(regmap_st)) {
- @@ -212,21 +231,12 @@ static int __init atmel_st_timer_init(struct device_node *node)
- regmap_read(regmap_st, AT91_ST_SR, &val);
-
- /* Get the interrupts property */
- - irq = irq_of_parse_and_map(node, 0);
- - if (!irq) {
- + atmel_st_irq = irq_of_parse_and_map(node, 0);
- + if (!atmel_st_irq) {
- pr_err("Unable to get IRQ from DT\n");
- return -EINVAL;
- }
-
- - /* Make IRQs happen for the system timer */
- - ret = request_irq(irq, at91rm9200_timer_interrupt,
- - IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
- - "at91_tick", regmap_st);
- - if (ret) {
- - pr_err("Unable to setup IRQ\n");
- - return ret;
- - }
- -
- sclk = of_clk_get(node, 0);
- if (IS_ERR(sclk)) {
- pr_err("Unable to get slow clock\n");
- diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
- index a782ce87715c..19d265948526 100644
- --- a/drivers/connector/cn_proc.c
- +++ b/drivers/connector/cn_proc.c
- @@ -32,6 +32,7 @@
- #include <linux/pid_namespace.h>
-
- #include <linux/cn_proc.h>
- +#include <linux/locallock.h>
-
- /*
- * Size of a cn_msg followed by a proc_event structure. Since the
- @@ -54,10 +55,11 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
-
- /* proc_event_counts is used as the sequence number of the netlink message */
- static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
- +static DEFINE_LOCAL_IRQ_LOCK(send_msg_lock);
-
- static inline void send_msg(struct cn_msg *msg)
- {
- - preempt_disable();
- + local_lock(send_msg_lock);
-
- msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
- ((struct proc_event *)msg->data)->cpu = smp_processor_id();
- @@ -70,7 +72,7 @@ static inline void send_msg(struct cn_msg *msg)
- */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
-
- - preempt_enable();
- + local_unlock(send_msg_lock);
- }
-
- void proc_fork_connector(struct task_struct *task)
- diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
- index adbd1de1cea5..1fac5074f2cf 100644
- --- a/drivers/cpufreq/Kconfig.x86
- +++ b/drivers/cpufreq/Kconfig.x86
- @@ -124,7 +124,7 @@ config X86_POWERNOW_K7_ACPI
-
- config X86_POWERNOW_K8
- tristate "AMD Opteron/Athlon64 PowerNow!"
- - depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
- + depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE
- help
- This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
- Support for K10 and newer processors is now in acpi-cpufreq.
- diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
- index 2117f172d7a2..96c15501b0c8 100644
- --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
- +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
- @@ -1489,7 +1489,9 @@ execbuf_submit(struct i915_execbuffer_params *params,
- if (ret)
- return ret;
-
- +#ifndef CONFIG_PREEMPT_RT_BASE
- trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
- +#endif
-
- i915_gem_execbuffer_move_to_active(vmas, params->request);
-
- diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
- index 755d78832a66..97fb03dc4971 100644
- --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
- +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
- @@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
- if (!mutex_is_locked(mutex))
- return false;
-
- -#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
- +#if (defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)) && !defined(CONFIG_PREEMPT_RT_BASE)
- return mutex->owner == task;
- #else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
- index 02908e37c228..05c0480576e1 100644
- --- a/drivers/gpu/drm/i915/i915_irq.c
- +++ b/drivers/gpu/drm/i915/i915_irq.c
- @@ -812,6 +812,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
- + preempt_disable_rt();
-
- /* Get optional system timestamp before query. */
- if (stime)
- @@ -863,6 +864,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- *etime = ktime_get();
-
- /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
- + preempt_enable_rt();
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
- diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
- index ce32303b3013..c0a53bf2e952 100644
- --- a/drivers/gpu/drm/i915/intel_display.c
- +++ b/drivers/gpu/drm/i915/intel_display.c
- @@ -12138,7 +12138,7 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_flip_work *work;
-
- - WARN_ON(!in_interrupt());
- + WARN_ON_NONRT(!in_interrupt());
-
- if (crtc == NULL)
- return;
- diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
- index 64f4e2e18594..aebf1e9eabcb 100644
- --- a/drivers/gpu/drm/i915/intel_sprite.c
- +++ b/drivers/gpu/drm/i915/intel_sprite.c
- @@ -35,6 +35,7 @@
- #include <drm/drm_rect.h>
- #include <drm/drm_atomic.h>
- #include <drm/drm_plane_helper.h>
- +#include <linux/locallock.h>
- #include "intel_drv.h"
- #include "intel_frontbuffer.h"
- #include <drm/i915_drm.h>
- @@ -65,6 +66,8 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
- 1000 * adjusted_mode->crtc_htotal);
- }
-
- +static DEFINE_LOCAL_IRQ_LOCK(pipe_update_lock);
- +
- /**
- * intel_pipe_update_start() - start update of a set of display registers
- * @crtc: the crtc of which the registers are going to be updated
- @@ -98,7 +101,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
- min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
- max = vblank_start - 1;
-
- - local_irq_disable();
- + local_lock_irq(pipe_update_lock);
-
- if (min <= 0 || max <= 0)
- return;
- @@ -128,11 +131,11 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
- break;
- }
-
- - local_irq_enable();
- + local_unlock_irq(pipe_update_lock);
-
- timeout = schedule_timeout(timeout);
-
- - local_irq_disable();
- + local_lock_irq(pipe_update_lock);
- }
-
- finish_wait(wq, &wait);
- @@ -202,7 +205,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
- crtc->base.state->event = NULL;
- }
-
- - local_irq_enable();
- + local_unlock_irq(pipe_update_lock);
-
- if (crtc->debug.start_vbl_count &&
- crtc->debug.start_vbl_count != end_vbl_count) {
- diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
- index 192b2d3a79cb..d5372a207326 100644
- --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
- +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
- @@ -23,7 +23,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
- if (!mutex_is_locked(mutex))
- return false;
-
- -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
- +#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
- return mutex->owner == task;
- #else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
- index cdb8cb568c15..b6d7fd964cbc 100644
- --- a/drivers/gpu/drm/radeon/radeon_display.c
- +++ b/drivers/gpu/drm/radeon/radeon_display.c
- @@ -1845,6 +1845,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- struct radeon_device *rdev = dev->dev_private;
-
- /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
- + preempt_disable_rt();
-
- /* Get optional system timestamp before query. */
- if (stime)
- @@ -1937,6 +1938,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- *etime = ktime_get();
-
- /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
- + preempt_enable_rt();
-
- /* Decode into vertical and horizontal scanout position. */
- *vpos = position & 0x1fff;
- diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
- index 0276d2ef06ee..8868045eabde 100644
- --- a/drivers/hv/vmbus_drv.c
- +++ b/drivers/hv/vmbus_drv.c
- @@ -761,6 +761,8 @@ static void vmbus_isr(void)
- void *page_addr;
- struct hv_message *msg;
- union hv_synic_event_flags *event;
- + struct pt_regs *regs = get_irq_regs();
- + u64 ip = regs ? instruction_pointer(regs) : 0;
- bool handled = false;
-
- page_addr = hv_context.synic_event_page[cpu];
- @@ -808,7 +810,7 @@ static void vmbus_isr(void)
- tasklet_schedule(hv_context.msg_dpc[cpu]);
- }
-
- - add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
- + add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip);
- }
-
-
- diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
- index 36f76e28a0bf..394f142f90c7 100644
- --- a/drivers/ide/alim15x3.c
- +++ b/drivers/ide/alim15x3.c
- @@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev)
-
- isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
-
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
-
- if (m5229_revision < 0xC2) {
- /*
- @@ -325,7 +325,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev)
- }
- pci_dev_put(north);
- pci_dev_put(isa_dev);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- return 0;
- }
-
- diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
- index 0ceae5cbd89a..c212e85d7f3e 100644
- --- a/drivers/ide/hpt366.c
- +++ b/drivers/ide/hpt366.c
- @@ -1236,7 +1236,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
-
- dma_old = inb(base + 2);
-
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
-
- dma_new = dma_old;
- pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
- @@ -1247,7 +1247,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif,
- if (dma_new != dma_old)
- outb(dma_new, base + 2);
-
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
-
- printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
- hwif->name, base, base + 7);
- diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c
- index 19763977568c..4169433faab5 100644
- --- a/drivers/ide/ide-io-std.c
- +++ b/drivers/ide/ide-io-std.c
- @@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
- unsigned long uninitialized_var(flags);
-
- if ((io_32bit & 2) && !mmio) {
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- ata_vlb_sync(io_ports->nsect_addr);
- }
-
- @@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
- insl(data_addr, buf, words);
-
- if ((io_32bit & 2) && !mmio)
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
-
- if (((len + 1) & 3) < 2)
- return;
- @@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
- unsigned long uninitialized_var(flags);
-
- if ((io_32bit & 2) && !mmio) {
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- ata_vlb_sync(io_ports->nsect_addr);
- }
-
- @@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf,
- outsl(data_addr, buf, words);
-
- if ((io_32bit & 2) && !mmio)
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
-
- if (((len + 1) & 3) < 2)
- return;
- diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
- index 669ea1e45795..e12e43e62245 100644
- --- a/drivers/ide/ide-io.c
- +++ b/drivers/ide/ide-io.c
- @@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long data)
- /* disable_irq_nosync ?? */
- disable_irq(hwif->irq);
- /* local CPU only, as if we were handling an interrupt */
- - local_irq_disable();
- + local_irq_disable_nort();
- if (hwif->polling) {
- startstop = handler(drive);
- } else if (drive_is_ready(drive)) {
- diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
- index 376f2dc410c5..f014dd1b73dc 100644
- --- a/drivers/ide/ide-iops.c
- +++ b/drivers/ide/ide-iops.c
- @@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
- if ((stat & ATA_BUSY) == 0)
- break;
-
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- *rstat = stat;
- return -EBUSY;
- }
- }
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- }
- /*
- * Allow status to settle, then read it again.
- diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
- index 0b63facd1d87..4ceba37afc0c 100644
- --- a/drivers/ide/ide-probe.c
- +++ b/drivers/ide/ide-probe.c
- @@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id)
- int bswap = 1;
-
- /* local CPU only; some systems need this */
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- /* read 512 bytes of id info */
- hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
-
- drive->dev_flags |= IDE_DFLAG_ID_READ;
- #ifdef DEBUG
- diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
- index a716693417a3..be0568c722d6 100644
- --- a/drivers/ide/ide-taskfile.c
- +++ b/drivers/ide/ide-taskfile.c
- @@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
-
- page_is_high = PageHighMem(page);
- if (page_is_high)
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
-
- buf = kmap_atomic(page) + offset;
-
- @@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
- kunmap_atomic(buf);
-
- if (page_is_high)
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
-
- len -= nr_bytes;
- }
- @@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
- }
-
- if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
- - local_irq_disable();
- + local_irq_disable_nort();
-
- ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
-
- diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
- index fddff403d5d2..cca1bb4fbfe3 100644
- --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
- +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
- @@ -902,7 +902,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
-
- ipoib_dbg_mcast(priv, "restarting multicast task\n");
-
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- netif_addr_lock(dev);
- spin_lock(&priv->lock);
-
- @@ -984,7 +984,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
-
- spin_unlock(&priv->lock);
- netif_addr_unlock(dev);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
-
- /*
- * make sure the in-flight joins have finished before we attempt
- diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
- index 4a2a9e370be7..e970d9afd179 100644
- --- a/drivers/input/gameport/gameport.c
- +++ b/drivers/input/gameport/gameport.c
- @@ -91,13 +91,13 @@ static int gameport_measure_speed(struct gameport *gameport)
- tx = ~0;
-
- for (i = 0; i < 50; i++) {
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- t1 = ktime_get_ns();
- for (t = 0; t < 50; t++)
- gameport_read(gameport);
- t2 = ktime_get_ns();
- t3 = ktime_get_ns();
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- udelay(i * 10);
- t = (t2 - t1) - (t3 - t2);
- if (t < tx)
- @@ -124,12 +124,12 @@ static int old_gameport_measure_speed(struct gameport *gameport)
- tx = 1 << 30;
-
- for(i = 0; i < 50; i++) {
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- GET_TIME(t1);
- for (t = 0; t < 50; t++) gameport_read(gameport);
- GET_TIME(t2);
- GET_TIME(t3);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- udelay(i * 10);
- if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
- }
- @@ -148,11 +148,11 @@ static int old_gameport_measure_speed(struct gameport *gameport)
- tx = 1 << 30;
-
- for(i = 0; i < 50; i++) {
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- t1 = rdtsc();
- for (t = 0; t < 50; t++) gameport_read(gameport);
- t2 = rdtsc();
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- udelay(i * 10);
- if (t2 - t1 < tx) tx = t2 - t1;
- }
- diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
- index 0c910a863581..3408e5dd1b93 100644
- --- a/drivers/iommu/amd_iommu.c
- +++ b/drivers/iommu/amd_iommu.c
- @@ -1923,10 +1923,10 @@ static int __attach_device(struct iommu_dev_data *dev_data,
- int ret;
-
- /*
- - * Must be called with IRQs disabled. Warn here to detect early
- - * when its not.
- + * Must be called with IRQs disabled on a non RT kernel. Warn here to
- + * detect early when its not.
- */
- - WARN_ON(!irqs_disabled());
- + WARN_ON_NONRT(!irqs_disabled());
-
- /* lock domain */
- spin_lock(&domain->lock);
- @@ -2094,10 +2094,10 @@ static void __detach_device(struct iommu_dev_data *dev_data)
- struct protection_domain *domain;
-
- /*
- - * Must be called with IRQs disabled. Warn here to detect early
- - * when its not.
- + * Must be called with IRQs disabled on a non RT kernel. Warn here to
- + * detect early when its not.
- */
- - WARN_ON(!irqs_disabled());
- + WARN_ON_NONRT(!irqs_disabled());
-
- if (WARN_ON(!dev_data->domain))
- return;
- @@ -2283,7 +2283,7 @@ static void queue_add(struct dma_ops_domain *dma_dom,
- pages = __roundup_pow_of_two(pages);
- address >>= PAGE_SHIFT;
-
- - queue = get_cpu_ptr(&flush_queue);
- + queue = raw_cpu_ptr(&flush_queue);
- spin_lock_irqsave(&queue->lock, flags);
-
- if (queue->next == FLUSH_QUEUE_SIZE)
- @@ -2300,8 +2300,6 @@ static void queue_add(struct dma_ops_domain *dma_dom,
-
- if (atomic_cmpxchg(&queue_timer_on, 0, 1) == 0)
- mod_timer(&queue_timer, jiffies + msecs_to_jiffies(10));
- -
- - put_cpu_ptr(&flush_queue);
- }
-
-
- diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
- index 88bbc8ccc5e3..8a1a8432a6bd 100644
- --- a/drivers/iommu/intel-iommu.c
- +++ b/drivers/iommu/intel-iommu.c
- @@ -479,7 +479,7 @@ struct deferred_flush_data {
- struct deferred_flush_table *tables;
- };
-
- -DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
- +static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
-
- /* bitmap for indexing intel_iommus */
- static int g_num_of_iommus;
- @@ -3721,10 +3721,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
- struct intel_iommu *iommu;
- struct deferred_flush_entry *entry;
- struct deferred_flush_data *flush_data;
- - unsigned int cpuid;
-
- - cpuid = get_cpu();
- - flush_data = per_cpu_ptr(&deferred_flush, cpuid);
- + flush_data = raw_cpu_ptr(&deferred_flush);
-
- /* Flush all CPUs' entries to avoid deferring too much. If
- * this becomes a bottleneck, can just flush us, and rely on
- @@ -3757,8 +3755,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
- }
- flush_data->size++;
- spin_unlock_irqrestore(&flush_data->lock, flags);
- -
- - put_cpu();
- }
-
- static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
- diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
- index e23001bfcfee..359d5d169ec0 100644
- --- a/drivers/iommu/iova.c
- +++ b/drivers/iommu/iova.c
- @@ -22,6 +22,7 @@
- #include <linux/slab.h>
- #include <linux/smp.h>
- #include <linux/bitops.h>
- +#include <linux/cpu.h>
-
- static bool iova_rcache_insert(struct iova_domain *iovad,
- unsigned long pfn,
- @@ -420,10 +421,8 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
-
- /* Try replenishing IOVAs by flushing rcache. */
- flushed_rcache = true;
- - preempt_disable();
- for_each_online_cpu(cpu)
- free_cpu_cached_iovas(cpu, iovad);
- - preempt_enable();
- goto retry;
- }
-
- @@ -751,7 +750,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
- bool can_insert = false;
- unsigned long flags;
-
- - cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
- + cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
- spin_lock_irqsave(&cpu_rcache->lock, flags);
-
- if (!iova_magazine_full(cpu_rcache->loaded)) {
- @@ -781,7 +780,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
- iova_magazine_push(cpu_rcache->loaded, iova_pfn);
-
- spin_unlock_irqrestore(&cpu_rcache->lock, flags);
- - put_cpu_ptr(rcache->cpu_rcaches);
-
- if (mag_to_free) {
- iova_magazine_free_pfns(mag_to_free, iovad);
- @@ -815,7 +813,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
- bool has_pfn = false;
- unsigned long flags;
-
- - cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
- + cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
- spin_lock_irqsave(&cpu_rcache->lock, flags);
-
- if (!iova_magazine_empty(cpu_rcache->loaded)) {
- @@ -837,7 +835,6 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
- iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
-
- spin_unlock_irqrestore(&cpu_rcache->lock, flags);
- - put_cpu_ptr(rcache->cpu_rcaches);
-
- return iova_pfn;
- }
- diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
- index 3f9ddb9fafa7..09da5b6b44a1 100644
- --- a/drivers/leds/trigger/Kconfig
- +++ b/drivers/leds/trigger/Kconfig
- @@ -69,7 +69,7 @@ config LEDS_TRIGGER_BACKLIGHT
-
- config LEDS_TRIGGER_CPU
- bool "LED CPU Trigger"
- - depends on LEDS_TRIGGERS
- + depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE
- help
- This allows LEDs to be controlled by active CPUs. This shows
- the active CPUs across an array of LEDs so you can see which
- diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
- index 4d200883c505..98b64ed5cb81 100644
- --- a/drivers/md/bcache/Kconfig
- +++ b/drivers/md/bcache/Kconfig
- @@ -1,6 +1,7 @@
-
- config BCACHE
- tristate "Block device as cache"
- + depends on !PREEMPT_RT_FULL
- ---help---
- Allows a block device to be used as cache for other devices; uses
- a btree for indexing and the layout is optimized for SSDs.
- diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
- index ba7c4c685db3..834ec328f217 100644
- --- a/drivers/md/dm-rq.c
- +++ b/drivers/md/dm-rq.c
- @@ -842,7 +842,7 @@ static void dm_old_request_fn(struct request_queue *q)
- /* Establish tio->ti before queuing work (map_tio_request) */
- tio->ti = ti;
- kthread_queue_work(&md->kworker, &tio->work);
- - BUG_ON(!irqs_disabled());
- + BUG_ON_NONRT(!irqs_disabled());
- }
- }
-
- diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
- index 475a7a1bcfe0..8d2c9d70042e 100644
- --- a/drivers/md/raid5.c
- +++ b/drivers/md/raid5.c
- @@ -429,7 +429,7 @@ void raid5_release_stripe(struct stripe_head *sh)
- md_wakeup_thread(conf->mddev->thread);
- return;
- slow_path:
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
- if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
- INIT_LIST_HEAD(&list);
- @@ -438,7 +438,7 @@ void raid5_release_stripe(struct stripe_head *sh)
- spin_unlock(&conf->device_lock);
- release_inactive_stripe_list(conf, &list, hash);
- }
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- }
-
- static inline void remove_hash(struct stripe_head *sh)
- @@ -1937,8 +1937,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
- struct raid5_percpu *percpu;
- unsigned long cpu;
-
- - cpu = get_cpu();
- + cpu = get_cpu_light();
- percpu = per_cpu_ptr(conf->percpu, cpu);
- + spin_lock(&percpu->lock);
- if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
- ops_run_biofill(sh);
- overlap_clear++;
- @@ -1994,7 +1995,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
- if (test_and_clear_bit(R5_Overlap, &dev->flags))
- wake_up(&sh->raid_conf->wait_for_overlap);
- }
- - put_cpu();
- + spin_unlock(&percpu->lock);
- + put_cpu_light();
- }
-
- static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
- @@ -6410,6 +6412,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
- __func__, cpu);
- return -ENOMEM;
- }
- + spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
- return 0;
- }
-
- @@ -6420,7 +6423,6 @@ static int raid5_alloc_percpu(struct r5conf *conf)
- conf->percpu = alloc_percpu(struct raid5_percpu);
- if (!conf->percpu)
- return -ENOMEM;
- -
- err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
- if (!err) {
- conf->scribble_disks = max(conf->raid_disks,
- diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
- index 57ec49f0839e..0739604990b7 100644
- --- a/drivers/md/raid5.h
- +++ b/drivers/md/raid5.h
- @@ -504,6 +504,7 @@ struct r5conf {
- int recovery_disabled;
- /* per cpu variables */
- struct raid5_percpu {
- + spinlock_t lock; /* Protection for -RT */
- struct page *spare_page; /* Used when checking P/Q in raid6 */
- struct flex_array *scribble; /* space for constructing buffer
- * lists and performing address
- diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
- index 64971baf11fa..215e91e36198 100644
- --- a/drivers/misc/Kconfig
- +++ b/drivers/misc/Kconfig
- @@ -54,6 +54,7 @@ config AD525X_DPOT_SPI
- config ATMEL_TCLIB
- bool "Atmel AT32/AT91 Timer/Counter Library"
- depends on (AVR32 || ARCH_AT91)
- + default y if PREEMPT_RT_FULL
- help
- Select this if you want a library to allocate the Timer/Counter
- blocks found on many Atmel processors. This facilitates using
- @@ -69,8 +70,7 @@ config ATMEL_TCB_CLKSRC
- are combined to make a single 32-bit timer.
-
- When GENERIC_CLOCKEVENTS is defined, the third timer channel
- - may be used as a clock event device supporting oneshot mode
- - (delays of up to two seconds) based on the 32 KiHz clock.
- + may be used as a clock event device supporting oneshot mode.
-
- config ATMEL_TCB_CLKSRC_BLOCK
- int
- @@ -84,6 +84,15 @@ config ATMEL_TCB_CLKSRC_BLOCK
- TC can be used for other purposes, such as PWM generation and
- interval timing.
-
- +config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
- + bool "TC Block use 32 KiHz clock"
- + depends on ATMEL_TCB_CLKSRC
- + default y if !PREEMPT_RT_FULL
- + help
- + Select this to use 32 KiHz base clock rate as TC block clock
- + source for clock events.
- +
- +
- config DUMMY_IRQ
- tristate "Dummy IRQ handler"
- default n
- diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
- index df990bb8c873..1a162709a85e 100644
- --- a/drivers/mmc/host/mmci.c
- +++ b/drivers/mmc/host/mmci.c
- @@ -1147,15 +1147,12 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
- struct sg_mapping_iter *sg_miter = &host->sg_miter;
- struct variant_data *variant = host->variant;
- void __iomem *base = host->base;
- - unsigned long flags;
- u32 status;
-
- status = readl(base + MMCISTATUS);
-
- dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
-
- - local_irq_save(flags);
- -
- do {
- unsigned int remain, len;
- char *buffer;
- @@ -1195,8 +1192,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
-
- sg_miter_stop(sg_miter);
-
- - local_irq_restore(flags);
- -
- /*
- * If we have less than the fifo 'half-full' threshold to transfer,
- * trigger a PIO interrupt as soon as any data is available.
- diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
- index 9133e7926da5..63afb921ed40 100644
- --- a/drivers/net/ethernet/3com/3c59x.c
- +++ b/drivers/net/ethernet/3com/3c59x.c
- @@ -842,9 +842,9 @@ static void poll_vortex(struct net_device *dev)
- {
- struct vortex_private *vp = netdev_priv(dev);
- unsigned long flags;
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- }
- #endif
-
- @@ -1910,12 +1910,12 @@ static void vortex_tx_timeout(struct net_device *dev)
- * Block interrupts because vortex_interrupt does a bare spin_lock()
- */
- unsigned long flags;
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- if (vp->full_bus_master_tx)
- boomerang_interrupt(dev->irq, dev);
- else
- vortex_interrupt(dev->irq, dev);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- }
- }
-
- diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
- index da4c2d8a4173..1420dfb56bac 100644
- --- a/drivers/net/ethernet/realtek/8139too.c
- +++ b/drivers/net/ethernet/realtek/8139too.c
- @@ -2233,7 +2233,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
- struct rtl8139_private *tp = netdev_priv(dev);
- const int irq = tp->pci_dev->irq;
-
- - disable_irq(irq);
- + disable_irq_nosync(irq);
- rtl8139_interrupt(irq, dev);
- enable_irq(irq);
- }
- diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
- index bca6935a94db..d7a35ee34d03 100644
- --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
- +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
- @@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
- while (!ctx->done.done && msecs--)
- udelay(1000);
- } else {
- - wait_event_interruptible(ctx->done.wait,
- + swait_event_interruptible(ctx->done.wait,
- ctx->done.done);
- }
- break;
- diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
- index bedce3453dd3..faf038978650 100644
- --- a/drivers/pinctrl/qcom/pinctrl-msm.c
- +++ b/drivers/pinctrl/qcom/pinctrl-msm.c
- @@ -61,7 +61,7 @@ struct msm_pinctrl {
- struct notifier_block restart_nb;
- int irq;
-
- - spinlock_t lock;
- + raw_spinlock_t lock;
-
- DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
- DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
- @@ -153,14 +153,14 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
- if (WARN_ON(i == g->nfuncs))
- return -EINVAL;
-
- - spin_lock_irqsave(&pctrl->lock, flags);
- + raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->ctl_reg);
- val &= ~mask;
- val |= i << g->mux_bit;
- writel(val, pctrl->regs + g->ctl_reg);
-
- - spin_unlock_irqrestore(&pctrl->lock, flags);
- + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
- return 0;
- }
- @@ -323,14 +323,14 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
- break;
- case PIN_CONFIG_OUTPUT:
- /* set output value */
- - spin_lock_irqsave(&pctrl->lock, flags);
- + raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->io_reg);
- if (arg)
- val |= BIT(g->out_bit);
- else
- val &= ~BIT(g->out_bit);
- writel(val, pctrl->regs + g->io_reg);
- - spin_unlock_irqrestore(&pctrl->lock, flags);
- + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
- /* enable output */
- arg = 1;
- @@ -351,12 +351,12 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
- return -EINVAL;
- }
-
- - spin_lock_irqsave(&pctrl->lock, flags);
- + raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->ctl_reg);
- val &= ~(mask << bit);
- val |= arg << bit;
- writel(val, pctrl->regs + g->ctl_reg);
- - spin_unlock_irqrestore(&pctrl->lock, flags);
- + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
-
- return 0;
- @@ -384,13 +384,13 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-
- g = &pctrl->soc->groups[offset];
-
- - spin_lock_irqsave(&pctrl->lock, flags);
- + raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->ctl_reg);
- val &= ~BIT(g->oe_bit);
- writel(val, pctrl->regs + g->ctl_reg);
-
- - spin_unlock_irqrestore(&pctrl->lock, flags);
- + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
- return 0;
- }
- @@ -404,7 +404,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
-
- g = &pctrl->soc->groups[offset];
-
- - spin_lock_irqsave(&pctrl->lock, flags);
- + raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->io_reg);
- if (value)
- @@ -417,7 +417,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
- val |= BIT(g->oe_bit);
- writel(val, pctrl->regs + g->ctl_reg);
-
- - spin_unlock_irqrestore(&pctrl->lock, flags);
- + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
- return 0;
- }
- @@ -443,7 +443,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-
- g = &pctrl->soc->groups[offset];
-
- - spin_lock_irqsave(&pctrl->lock, flags);
- + raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->io_reg);
- if (value)
- @@ -452,7 +452,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
- val &= ~BIT(g->out_bit);
- writel(val, pctrl->regs + g->io_reg);
-
- - spin_unlock_irqrestore(&pctrl->lock, flags);
- + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
-
- #ifdef CONFIG_DEBUG_FS
- @@ -571,7 +571,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
-
- g = &pctrl->soc->groups[d->hwirq];
-
- - spin_lock_irqsave(&pctrl->lock, flags);
- + raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->intr_cfg_reg);
- val &= ~BIT(g->intr_enable_bit);
- @@ -579,7 +579,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
-
- clear_bit(d->hwirq, pctrl->enabled_irqs);
-
- - spin_unlock_irqrestore(&pctrl->lock, flags);
- + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
-
- static void msm_gpio_irq_unmask(struct irq_data *d)
- @@ -592,7 +592,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
-
- g = &pctrl->soc->groups[d->hwirq];
-
- - spin_lock_irqsave(&pctrl->lock, flags);
- + raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->intr_cfg_reg);
- val |= BIT(g->intr_enable_bit);
- @@ -600,7 +600,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
-
- set_bit(d->hwirq, pctrl->enabled_irqs);
-
- - spin_unlock_irqrestore(&pctrl->lock, flags);
- + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
-
- static void msm_gpio_irq_ack(struct irq_data *d)
- @@ -613,7 +613,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
-
- g = &pctrl->soc->groups[d->hwirq];
-
- - spin_lock_irqsave(&pctrl->lock, flags);
- + raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->intr_status_reg);
- if (g->intr_ack_high)
- @@ -625,7 +625,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
- if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
- msm_gpio_update_dual_edge_pos(pctrl, g, d);
-
- - spin_unlock_irqrestore(&pctrl->lock, flags);
- + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
-
- static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
- @@ -638,7 +638,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
-
- g = &pctrl->soc->groups[d->hwirq];
-
- - spin_lock_irqsave(&pctrl->lock, flags);
- + raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- /*
- * For hw without possibility of detecting both edges
- @@ -712,7 +712,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
- if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
- msm_gpio_update_dual_edge_pos(pctrl, g, d);
-
- - spin_unlock_irqrestore(&pctrl->lock, flags);
- + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- irq_set_handler_locked(d, handle_level_irq);
- @@ -728,11 +728,11 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
- struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
- unsigned long flags;
-
- - spin_lock_irqsave(&pctrl->lock, flags);
- + raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- irq_set_irq_wake(pctrl->irq, on);
-
- - spin_unlock_irqrestore(&pctrl->lock, flags);
- + raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
- return 0;
- }
- @@ -878,7 +878,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
- pctrl->soc = soc_data;
- pctrl->chip = msm_gpio_template;
-
- - spin_lock_init(&pctrl->lock);
- + raw_spin_lock_init(&pctrl->lock);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
- diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
- index 9bd41a35a78a..8e2d436c2e3f 100644
- --- a/drivers/scsi/fcoe/fcoe.c
- +++ b/drivers/scsi/fcoe/fcoe.c
- @@ -1455,11 +1455,11 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
- static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
- {
- struct fcoe_percpu_s *fps;
- - int rc;
- + int rc, cpu = get_cpu_light();
-
- - fps = &get_cpu_var(fcoe_percpu);
- + fps = &per_cpu(fcoe_percpu, cpu);
- rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
- - put_cpu_var(fcoe_percpu);
- + put_cpu_light();
-
- return rc;
- }
- @@ -1646,11 +1646,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
- return 0;
- }
-
- - stats = per_cpu_ptr(lport->stats, get_cpu());
- + stats = per_cpu_ptr(lport->stats, get_cpu_light());
- stats->InvalidCRCCount++;
- if (stats->InvalidCRCCount < 5)
- printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
- - put_cpu();
- + put_cpu_light();
- return -EINVAL;
- }
-
- @@ -1693,7 +1693,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
- */
- hp = (struct fcoe_hdr *) skb_network_header(skb);
-
- - stats = per_cpu_ptr(lport->stats, get_cpu());
- + stats = per_cpu_ptr(lport->stats, get_cpu_light());
- if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
- if (stats->ErrorFrames < 5)
- printk(KERN_WARNING "fcoe: FCoE version "
- @@ -1725,13 +1725,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
- goto drop;
-
- if (!fcoe_filter_frames(lport, fp)) {
- - put_cpu();
- + put_cpu_light();
- fc_exch_recv(lport, fp);
- return;
- }
- drop:
- stats->ErrorFrames++;
- - put_cpu();
- + put_cpu_light();
- kfree_skb(skb);
- }
-
- diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
- index dcf36537a767..1a1f2e46452c 100644
- --- a/drivers/scsi/fcoe/fcoe_ctlr.c
- +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
- @@ -834,7 +834,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
-
- INIT_LIST_HEAD(&del_list);
-
- - stats = per_cpu_ptr(fip->lp->stats, get_cpu());
- + stats = per_cpu_ptr(fip->lp->stats, get_cpu_light());
-
- list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
- deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
- @@ -870,7 +870,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
- sel_time = fcf->time;
- }
- }
- - put_cpu();
- + put_cpu_light();
-
- list_for_each_entry_safe(fcf, next, &del_list, list) {
- /* Removes fcf from current list */
- diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
- index 16ca31ad5ec0..c3987347e762 100644
- --- a/drivers/scsi/libfc/fc_exch.c
- +++ b/drivers/scsi/libfc/fc_exch.c
- @@ -814,10 +814,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
- }
- memset(ep, 0, sizeof(*ep));
-
- - cpu = get_cpu();
- + cpu = get_cpu_light();
- pool = per_cpu_ptr(mp->pool, cpu);
- spin_lock_bh(&pool->lock);
- - put_cpu();
- + put_cpu_light();
-
- /* peek cache of free slot */
- if (pool->left != FC_XID_UNKNOWN) {
- diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
- index 87f5e694dbed..23c0a50fb6aa 100644
- --- a/drivers/scsi/libsas/sas_ata.c
- +++ b/drivers/scsi/libsas/sas_ata.c
- @@ -190,7 +190,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
- /* TODO: audit callers to ensure they are ready for qc_issue to
- * unconditionally re-enable interrupts
- */
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- spin_unlock(ap->lock);
-
- /* If the device fell off, no sense in issuing commands */
- @@ -252,7 +252,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
-
- out:
- spin_lock(ap->lock);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- return ret;
- }
-
- diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
- index edc48f3b8230..ee5c6f9dfb6f 100644
- --- a/drivers/scsi/qla2xxx/qla_inline.h
- +++ b/drivers/scsi/qla2xxx/qla_inline.h
- @@ -59,12 +59,12 @@ qla2x00_poll(struct rsp_que *rsp)
- {
- unsigned long flags;
- struct qla_hw_data *ha = rsp->hw;
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- if (IS_P3P_TYPE(ha))
- qla82xx_poll(0, rsp);
- else
- ha->isp_ops->intr_handler(0, rsp);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- }
-
- static inline uint8_t *
- diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
- index bddaabb288d4..8de0ec4222fe 100644
- --- a/drivers/scsi/qla2xxx/qla_isr.c
- +++ b/drivers/scsi/qla2xxx/qla_isr.c
- @@ -3129,7 +3129,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
- * kref_put().
- */
- kref_get(&qentry->irq_notify.kref);
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + swork_queue(&qentry->irq_notify.swork);
- +#else
- schedule_work(&qentry->irq_notify.work);
- +#endif
- }
-
- /*
- diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
- index 95f4c1bcdb4c..0be934799bff 100644
- --- a/drivers/thermal/x86_pkg_temp_thermal.c
- +++ b/drivers/thermal/x86_pkg_temp_thermal.c
- @@ -29,6 +29,7 @@
- #include <linux/pm.h>
- #include <linux/thermal.h>
- #include <linux/debugfs.h>
- +#include <linux/swork.h>
- #include <asm/cpu_device_id.h>
- #include <asm/mce.h>
-
- @@ -353,7 +354,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
- }
- }
-
- -static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
- +static void platform_thermal_notify_work(struct swork_event *event)
- {
- unsigned long flags;
- int cpu = smp_processor_id();
- @@ -370,7 +371,7 @@ static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
- pkg_work_scheduled[phy_id]) {
- disable_pkg_thres_interrupt();
- spin_unlock_irqrestore(&pkg_work_lock, flags);
- - return -EINVAL;
- + return;
- }
- pkg_work_scheduled[phy_id] = 1;
- spin_unlock_irqrestore(&pkg_work_lock, flags);
- @@ -379,9 +380,48 @@ static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
- schedule_delayed_work_on(cpu,
- &per_cpu(pkg_temp_thermal_threshold_work, cpu),
- msecs_to_jiffies(notify_delay_ms));
- +}
- +
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +static struct swork_event notify_work;
- +
- +static int thermal_notify_work_init(void)
- +{
- + int err;
- +
- + err = swork_get();
- + if (err)
- + return err;
- +
- + INIT_SWORK(¬ify_work, platform_thermal_notify_work);
- return 0;
- }
-
- +static void thermal_notify_work_cleanup(void)
- +{
- + swork_put();
- +}
- +
- +static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
- +{
- + swork_queue(¬ify_work);
- + return 0;
- +}
- +
- +#else /* !CONFIG_PREEMPT_RT_FULL */
- +
- +static int thermal_notify_work_init(void) { return 0; }
- +
- +static void thermal_notify_work_cleanup(void) { }
- +
- +static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
- +{
- + platform_thermal_notify_work(NULL);
- +
- + return 0;
- +}
- +#endif /* CONFIG_PREEMPT_RT_FULL */
- +
- static int find_siblings_cpu(int cpu)
- {
- int i;
- @@ -585,6 +625,9 @@ static int __init pkg_temp_thermal_init(void)
- if (!x86_match_cpu(pkg_temp_thermal_ids))
- return -ENODEV;
-
- + if (!thermal_notify_work_init())
- + return -ENODEV;
- +
- spin_lock_init(&pkg_work_lock);
- platform_thermal_package_notify =
- pkg_temp_thermal_platform_thermal_notify;
- @@ -609,7 +652,7 @@ static int __init pkg_temp_thermal_init(void)
- kfree(pkg_work_scheduled);
- platform_thermal_package_notify = NULL;
- platform_thermal_package_rate_control = NULL;
- -
- + thermal_notify_work_cleanup();
- return -ENODEV;
- }
-
- @@ -634,6 +677,7 @@ static void __exit pkg_temp_thermal_exit(void)
- mutex_unlock(&phy_dev_list_mutex);
- platform_thermal_package_notify = NULL;
- platform_thermal_package_rate_control = NULL;
- + thermal_notify_work_cleanup();
- for_each_online_cpu(i)
- cancel_delayed_work_sync(
- &per_cpu(pkg_temp_thermal_threshold_work, i));
- diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
- index e8819aa20415..dd7f9bf45d6c 100644
- --- a/drivers/tty/serial/8250/8250_core.c
- +++ b/drivers/tty/serial/8250/8250_core.c
- @@ -58,7 +58,16 @@ static struct uart_driver serial8250_reg;
-
- static unsigned int skip_txen_test; /* force skip of txen test at init time */
-
- -#define PASS_LIMIT 512
- +/*
- + * On -rt we can have a more delays, and legitimately
- + * so - so don't drop work spuriously and spam the
- + * syslog:
- + */
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +# define PASS_LIMIT 1000000
- +#else
- +# define PASS_LIMIT 512
- +#endif
-
- #include <asm/serial.h>
- /*
- diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
- index f6e4373a8850..4620b51b0e7c 100644
- --- a/drivers/tty/serial/8250/8250_port.c
- +++ b/drivers/tty/serial/8250/8250_port.c
- @@ -35,6 +35,7 @@
- #include <linux/nmi.h>
- #include <linux/mutex.h>
- #include <linux/slab.h>
- +#include <linux/kdb.h>
- #include <linux/uaccess.h>
- #include <linux/pm_runtime.h>
- #include <linux/timer.h>
- @@ -3143,9 +3144,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
-
- serial8250_rpm_get(up);
-
- - if (port->sysrq)
- + if (port->sysrq || oops_in_progress)
- locked = 0;
- - else if (oops_in_progress)
- + else if (in_kdb_printk())
- locked = spin_trylock_irqsave(&port->lock, flags);
- else
- spin_lock_irqsave(&port->lock, flags);
- diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
- index e2c33b9528d8..53af53c43e8c 100644
- --- a/drivers/tty/serial/amba-pl011.c
- +++ b/drivers/tty/serial/amba-pl011.c
- @@ -2194,13 +2194,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
-
- clk_enable(uap->clk);
-
- - local_irq_save(flags);
- + /*
- + * local_irq_save(flags);
- + *
- + * This local_irq_save() is nonsense. If we come in via sysrq
- + * handling then interrupts are already disabled. Aside of
- + * that the port.sysrq check is racy on SMP regardless.
- + */
- if (uap->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- - locked = spin_trylock(&uap->port.lock);
- + locked = spin_trylock_irqsave(&uap->port.lock, flags);
- else
- - spin_lock(&uap->port.lock);
- + spin_lock_irqsave(&uap->port.lock, flags);
-
- /*
- * First save the CR then disable the interrupts
- @@ -2224,8 +2230,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
- pl011_write(old_cr, uap, REG_CR);
-
- if (locked)
- - spin_unlock(&uap->port.lock);
- - local_irq_restore(flags);
- + spin_unlock_irqrestore(&uap->port.lock, flags);
-
- clk_disable(uap->clk);
- }
- diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
- index 472ba3c813c1..e654cb421fb7 100644
- --- a/drivers/tty/serial/omap-serial.c
- +++ b/drivers/tty/serial/omap-serial.c
- @@ -1257,13 +1257,10 @@ serial_omap_console_write(struct console *co, const char *s,
-
- pm_runtime_get_sync(up->dev);
-
- - local_irq_save(flags);
- - if (up->port.sysrq)
- - locked = 0;
- - else if (oops_in_progress)
- - locked = spin_trylock(&up->port.lock);
- + if (up->port.sysrq || oops_in_progress)
- + locked = spin_trylock_irqsave(&up->port.lock, flags);
- else
- - spin_lock(&up->port.lock);
- + spin_lock_irqsave(&up->port.lock, flags);
-
- /*
- * First save the IER then disable the interrupts
- @@ -1292,8 +1289,7 @@ serial_omap_console_write(struct console *co, const char *s,
- pm_runtime_mark_last_busy(up->dev);
- pm_runtime_put_autosuspend(up->dev);
- if (locked)
- - spin_unlock(&up->port.lock);
- - local_irq_restore(flags);
- + spin_unlock_irqrestore(&up->port.lock, flags);
- }
-
- static int __init
- diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
- index fcc7aa248ce7..fb2c38d875f9 100644
- --- a/drivers/usb/core/hcd.c
- +++ b/drivers/usb/core/hcd.c
- @@ -1764,9 +1764,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
- * and no one may trigger the above deadlock situation when
- * running complete() in tasklet.
- */
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- urb->complete(urb);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
-
- usb_anchor_resume_wakeups(anchor);
- atomic_dec(&urb->use_count);
- diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
- index 7b107e43b1c4..f1e8534a1748 100644
- --- a/drivers/usb/gadget/function/f_fs.c
- +++ b/drivers/usb/gadget/function/f_fs.c
- @@ -1593,7 +1593,7 @@ static void ffs_data_put(struct ffs_data *ffs)
- pr_info("%s(): freeing\n", __func__);
- ffs_data_clear(ffs);
- BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
- - waitqueue_active(&ffs->ep0req_completion.wait));
- + swait_active(&ffs->ep0req_completion.wait));
- kfree(ffs->dev_name);
- kfree(ffs);
- }
- diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
- index b8534d3f8bb0..8fcaf02e21b0 100644
- --- a/drivers/usb/gadget/legacy/inode.c
- +++ b/drivers/usb/gadget/legacy/inode.c
- @@ -347,7 +347,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
- spin_unlock_irq (&epdata->dev->lock);
-
- if (likely (value == 0)) {
- - value = wait_event_interruptible (done.wait, done.done);
- + value = swait_event_interruptible (done.wait, done.done);
- if (value != 0) {
- spin_lock_irq (&epdata->dev->lock);
- if (likely (epdata->ep != NULL)) {
- @@ -356,7 +356,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
- usb_ep_dequeue (epdata->ep, epdata->req);
- spin_unlock_irq (&epdata->dev->lock);
-
- - wait_event (done.wait, done.done);
- + swait_event (done.wait, done.done);
- if (epdata->status == -ECONNRESET)
- epdata->status = -EINTR;
- } else {
- diff --git a/fs/aio.c b/fs/aio.c
- index 0fcb49ad67d4..211ebc21e4db 100644
- --- a/fs/aio.c
- +++ b/fs/aio.c
- @@ -40,6 +40,7 @@
- #include <linux/ramfs.h>
- #include <linux/percpu-refcount.h>
- #include <linux/mount.h>
- +#include <linux/swork.h>
-
- #include <asm/kmap_types.h>
- #include <asm/uaccess.h>
- @@ -115,7 +116,7 @@ struct kioctx {
- struct page **ring_pages;
- long nr_pages;
-
- - struct work_struct free_work;
- + struct swork_event free_work;
-
- /*
- * signals when all in-flight requests are done
- @@ -258,6 +259,7 @@ static int __init aio_setup(void)
- .mount = aio_mount,
- .kill_sb = kill_anon_super,
- };
- + BUG_ON(swork_get());
- aio_mnt = kern_mount(&aio_fs);
- if (IS_ERR(aio_mnt))
- panic("Failed to create aio fs mount.");
- @@ -581,9 +583,9 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
- return cancel(&kiocb->common);
- }
-
- -static void free_ioctx(struct work_struct *work)
- +static void free_ioctx(struct swork_event *sev)
- {
- - struct kioctx *ctx = container_of(work, struct kioctx, free_work);
- + struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
-
- pr_debug("freeing %p\n", ctx);
-
- @@ -602,8 +604,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
- if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
- complete(&ctx->rq_wait->comp);
-
- - INIT_WORK(&ctx->free_work, free_ioctx);
- - schedule_work(&ctx->free_work);
- + INIT_SWORK(&ctx->free_work, free_ioctx);
- + swork_queue(&ctx->free_work);
- }
-
- /*
- @@ -611,9 +613,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
- * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
- * now it's safe to cancel any that need to be.
- */
- -static void free_ioctx_users(struct percpu_ref *ref)
- +static void free_ioctx_users_work(struct swork_event *sev)
- {
- - struct kioctx *ctx = container_of(ref, struct kioctx, users);
- + struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
- struct aio_kiocb *req;
-
- spin_lock_irq(&ctx->ctx_lock);
- @@ -632,6 +634,14 @@ static void free_ioctx_users(struct percpu_ref *ref)
- percpu_ref_put(&ctx->reqs);
- }
-
- +static void free_ioctx_users(struct percpu_ref *ref)
- +{
- + struct kioctx *ctx = container_of(ref, struct kioctx, users);
- +
- + INIT_SWORK(&ctx->free_work, free_ioctx_users_work);
- + swork_queue(&ctx->free_work);
- +}
- +
- static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
- {
- unsigned i, new_nr;
- diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
- index a1fba4285277..3796769b4cd1 100644
- --- a/fs/autofs4/autofs_i.h
- +++ b/fs/autofs4/autofs_i.h
- @@ -31,6 +31,7 @@
- #include <linux/sched.h>
- #include <linux/mount.h>
- #include <linux/namei.h>
- +#include <linux/delay.h>
- #include <asm/current.h>
- #include <linux/uaccess.h>
-
- diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
- index d8e6d421c27f..2e689ab1306b 100644
- --- a/fs/autofs4/expire.c
- +++ b/fs/autofs4/expire.c
- @@ -148,7 +148,7 @@ static struct dentry *get_next_positive_dentry(struct dentry *prev,
- parent = p->d_parent;
- if (!spin_trylock(&parent->d_lock)) {
- spin_unlock(&p->d_lock);
- - cpu_relax();
- + cpu_chill();
- goto relock;
- }
- spin_unlock(&p->d_lock);
- diff --git a/fs/buffer.c b/fs/buffer.c
- index 5d8f496d624e..48074bd91ea3 100644
- --- a/fs/buffer.c
- +++ b/fs/buffer.c
- @@ -301,8 +301,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
- * decide that the page is now completely done.
- */
- first = page_buffers(page);
- - local_irq_save(flags);
- - bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
- + flags = bh_uptodate_lock_irqsave(first);
- clear_buffer_async_read(bh);
- unlock_buffer(bh);
- tmp = bh;
- @@ -315,8 +314,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
- }
- tmp = tmp->b_this_page;
- } while (tmp != bh);
- - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- - local_irq_restore(flags);
- + bh_uptodate_unlock_irqrestore(first, flags);
-
- /*
- * If none of the buffers had errors and they are all
- @@ -328,9 +326,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
- return;
-
- still_busy:
- - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- - local_irq_restore(flags);
- - return;
- + bh_uptodate_unlock_irqrestore(first, flags);
- }
-
- /*
- @@ -358,8 +354,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
- }
-
- first = page_buffers(page);
- - local_irq_save(flags);
- - bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
- + flags = bh_uptodate_lock_irqsave(first);
-
- clear_buffer_async_write(bh);
- unlock_buffer(bh);
- @@ -371,15 +366,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
- }
- tmp = tmp->b_this_page;
- }
- - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- - local_irq_restore(flags);
- + bh_uptodate_unlock_irqrestore(first, flags);
- end_page_writeback(page);
- return;
-
- still_busy:
- - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- - local_irq_restore(flags);
- - return;
- + bh_uptodate_unlock_irqrestore(first, flags);
- }
- EXPORT_SYMBOL(end_buffer_async_write);
-
- @@ -3383,6 +3375,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
- struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
- if (ret) {
- INIT_LIST_HEAD(&ret->b_assoc_buffers);
- + buffer_head_init_locks(ret);
- preempt_disable();
- __this_cpu_inc(bh_accounting.nr);
- recalc_bh_state();
- diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
- index a27fc8791551..791aecb7c1ac 100644
- --- a/fs/cifs/readdir.c
- +++ b/fs/cifs/readdir.c
- @@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
- struct inode *inode;
- struct super_block *sb = parent->d_sb;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
- + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
-
- cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
-
- diff --git a/fs/dcache.c b/fs/dcache.c
- index 67957f5b325c..f0719b2f1be5 100644
- --- a/fs/dcache.c
- +++ b/fs/dcache.c
- @@ -19,6 +19,7 @@
- #include <linux/mm.h>
- #include <linux/fs.h>
- #include <linux/fsnotify.h>
- +#include <linux/delay.h>
- #include <linux/slab.h>
- #include <linux/init.h>
- #include <linux/hash.h>
- @@ -777,6 +778,8 @@ static inline bool fast_dput(struct dentry *dentry)
- */
- void dput(struct dentry *dentry)
- {
- + struct dentry *parent;
- +
- if (unlikely(!dentry))
- return;
-
- @@ -815,9 +818,18 @@ void dput(struct dentry *dentry)
- return;
-
- kill_it:
- - dentry = dentry_kill(dentry);
- - if (dentry) {
- - cond_resched();
- + parent = dentry_kill(dentry);
- + if (parent) {
- + int r;
- +
- + if (parent == dentry) {
- + /* the task with the highest priority won't schedule */
- + r = cond_resched();
- + if (!r)
- + cpu_chill();
- + } else {
- + dentry = parent;
- + }
- goto repeat;
- }
- }
- @@ -2352,7 +2364,7 @@ void d_delete(struct dentry * dentry)
- if (dentry->d_lockref.count == 1) {
- if (!spin_trylock(&inode->i_lock)) {
- spin_unlock(&dentry->d_lock);
- - cpu_relax();
- + cpu_chill();
- goto again;
- }
- dentry->d_flags &= ~DCACHE_CANT_MOUNT;
- @@ -2397,9 +2409,10 @@ EXPORT_SYMBOL(d_rehash);
- static inline unsigned start_dir_add(struct inode *dir)
- {
-
- + preempt_disable_rt();
- for (;;) {
- - unsigned n = dir->i_dir_seq;
- - if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
- + unsigned n = dir->__i_dir_seq;
- + if (!(n & 1) && cmpxchg(&dir->__i_dir_seq, n, n + 1) == n)
- return n;
- cpu_relax();
- }
- @@ -2407,26 +2420,30 @@ static inline unsigned start_dir_add(struct inode *dir)
-
- static inline void end_dir_add(struct inode *dir, unsigned n)
- {
- - smp_store_release(&dir->i_dir_seq, n + 2);
- + smp_store_release(&dir->__i_dir_seq, n + 2);
- + preempt_enable_rt();
- }
-
- static void d_wait_lookup(struct dentry *dentry)
- {
- - if (d_in_lookup(dentry)) {
- - DECLARE_WAITQUEUE(wait, current);
- - add_wait_queue(dentry->d_wait, &wait);
- - do {
- - set_current_state(TASK_UNINTERRUPTIBLE);
- - spin_unlock(&dentry->d_lock);
- - schedule();
- - spin_lock(&dentry->d_lock);
- - } while (d_in_lookup(dentry));
- - }
- + struct swait_queue __wait;
- +
- + if (!d_in_lookup(dentry))
- + return;
- +
- + INIT_LIST_HEAD(&__wait.task_list);
- + do {
- + prepare_to_swait(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE);
- + spin_unlock(&dentry->d_lock);
- + schedule();
- + spin_lock(&dentry->d_lock);
- + } while (d_in_lookup(dentry));
- + finish_swait(dentry->d_wait, &__wait);
- }
-
- struct dentry *d_alloc_parallel(struct dentry *parent,
- const struct qstr *name,
- - wait_queue_head_t *wq)
- + struct swait_queue_head *wq)
- {
- unsigned int hash = name->hash;
- struct hlist_bl_head *b = in_lookup_hash(parent, hash);
- @@ -2440,7 +2457,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
-
- retry:
- rcu_read_lock();
- - seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
- + seq = smp_load_acquire(&parent->d_inode->__i_dir_seq) & ~1;
- r_seq = read_seqbegin(&rename_lock);
- dentry = __d_lookup_rcu(parent, name, &d_seq);
- if (unlikely(dentry)) {
- @@ -2462,7 +2479,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
- goto retry;
- }
- hlist_bl_lock(b);
- - if (unlikely(parent->d_inode->i_dir_seq != seq)) {
- + if (unlikely(parent->d_inode->__i_dir_seq != seq)) {
- hlist_bl_unlock(b);
- rcu_read_unlock();
- goto retry;
- @@ -2535,7 +2552,7 @@ void __d_lookup_done(struct dentry *dentry)
- hlist_bl_lock(b);
- dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
- __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
- - wake_up_all(dentry->d_wait);
- + swake_up_all(dentry->d_wait);
- dentry->d_wait = NULL;
- hlist_bl_unlock(b);
- INIT_HLIST_NODE(&dentry->d_u.d_alias);
- @@ -3632,6 +3649,11 @@ EXPORT_SYMBOL(d_genocide);
-
- void __init vfs_caches_init_early(void)
- {
- + int i;
- +
- + for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
- + INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
- +
- dcache_init_early();
- inode_init_early();
- }
- diff --git a/fs/eventpoll.c b/fs/eventpoll.c
- index 3cbc30413add..41a94f552aab 100644
- --- a/fs/eventpoll.c
- +++ b/fs/eventpoll.c
- @@ -510,12 +510,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
- */
- static void ep_poll_safewake(wait_queue_head_t *wq)
- {
- - int this_cpu = get_cpu();
- + int this_cpu = get_cpu_light();
-
- ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
- ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
-
- - put_cpu();
- + put_cpu_light();
- }
-
- static void ep_remove_wait_queue(struct eppoll_entry *pwq)
- diff --git a/fs/exec.c b/fs/exec.c
- index b8c43be24751..71f4c6ec2bb8 100644
- --- a/fs/exec.c
- +++ b/fs/exec.c
- @@ -1038,12 +1038,14 @@ static int exec_mmap(struct mm_struct *mm)
- }
- }
- task_lock(tsk);
- + preempt_disable_rt();
- active_mm = tsk->active_mm;
- tsk->mm = mm;
- tsk->active_mm = mm;
- activate_mm(active_mm, mm);
- tsk->mm->vmacache_seqnum = 0;
- vmacache_flush(tsk);
- + preempt_enable_rt();
- task_unlock(tsk);
- if (old_mm) {
- up_read(&old_mm->mmap_sem);
- diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
- index 0094923e5ebf..37fa06ef5417 100644
- --- a/fs/ext4/page-io.c
- +++ b/fs/ext4/page-io.c
- @@ -95,8 +95,7 @@ static void ext4_finish_bio(struct bio *bio)
- * We check all buffers in the page under BH_Uptodate_Lock
- * to avoid races with other end io clearing async_write flags
- */
- - local_irq_save(flags);
- - bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
- + flags = bh_uptodate_lock_irqsave(head);
- do {
- if (bh_offset(bh) < bio_start ||
- bh_offset(bh) + bh->b_size > bio_end) {
- @@ -108,8 +107,7 @@ static void ext4_finish_bio(struct bio *bio)
- if (bio->bi_error)
- buffer_io_error(bh);
- } while ((bh = bh->b_this_page) != head);
- - bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
- - local_irq_restore(flags);
- + bh_uptodate_unlock_irqrestore(head, flags);
- if (!under_io) {
- #ifdef CONFIG_EXT4_FS_ENCRYPTION
- if (data_page)
- diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
- index 4bbad745415a..5f91ca248ab0 100644
- --- a/fs/fuse/dir.c
- +++ b/fs/fuse/dir.c
- @@ -1191,7 +1191,7 @@ static int fuse_direntplus_link(struct file *file,
- struct inode *dir = d_inode(parent);
- struct fuse_conn *fc;
- struct inode *inode;
- - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
- + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
-
- if (!o->nodeid) {
- /*
- diff --git a/fs/inode.c b/fs/inode.c
- index 920aa0b1c6b0..3d6b5fd1bf06 100644
- --- a/fs/inode.c
- +++ b/fs/inode.c
- @@ -153,7 +153,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
- inode->i_bdev = NULL;
- inode->i_cdev = NULL;
- inode->i_link = NULL;
- - inode->i_dir_seq = 0;
- + inode->__i_dir_seq = 0;
- inode->i_rdev = 0;
- inode->dirtied_when = 0;
-
- diff --git a/fs/libfs.c b/fs/libfs.c
- index 9588780ad43e..9b37abd354c9 100644
- --- a/fs/libfs.c
- +++ b/fs/libfs.c
- @@ -89,7 +89,7 @@ static struct dentry *next_positive(struct dentry *parent,
- struct list_head *from,
- int count)
- {
- - unsigned *seq = &parent->d_inode->i_dir_seq, n;
- + unsigned *seq = &parent->d_inode->__i_dir_seq, n;
- struct dentry *res;
- struct list_head *p;
- bool skipped;
- @@ -122,8 +122,9 @@ static struct dentry *next_positive(struct dentry *parent,
- static void move_cursor(struct dentry *cursor, struct list_head *after)
- {
- struct dentry *parent = cursor->d_parent;
- - unsigned n, *seq = &parent->d_inode->i_dir_seq;
- + unsigned n, *seq = &parent->d_inode->__i_dir_seq;
- spin_lock(&parent->d_lock);
- + preempt_disable_rt();
- for (;;) {
- n = *seq;
- if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
- @@ -136,6 +137,7 @@ static void move_cursor(struct dentry *cursor, struct list_head *after)
- else
- list_add_tail(&cursor->d_child, &parent->d_subdirs);
- smp_store_release(seq, n + 2);
- + preempt_enable_rt();
- spin_unlock(&parent->d_lock);
- }
-
- diff --git a/fs/locks.c b/fs/locks.c
- index 22c5b4aa4961..269c6a44449a 100644
- --- a/fs/locks.c
- +++ b/fs/locks.c
- @@ -935,7 +935,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
- return -ENOMEM;
- }
-
- - percpu_down_read_preempt_disable(&file_rwsem);
- + percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- if (request->fl_flags & FL_ACCESS)
- goto find_conflict;
- @@ -976,7 +976,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
-
- out:
- spin_unlock(&ctx->flc_lock);
- - percpu_up_read_preempt_enable(&file_rwsem);
- + percpu_up_read(&file_rwsem);
- if (new_fl)
- locks_free_lock(new_fl);
- locks_dispose_list(&dispose);
- @@ -1013,7 +1013,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
- new_fl2 = locks_alloc_lock();
- }
-
- - percpu_down_read_preempt_disable(&file_rwsem);
- + percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- /*
- * New lock request. Walk all POSIX locks and look for conflicts. If
- @@ -1185,7 +1185,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
- }
- out:
- spin_unlock(&ctx->flc_lock);
- - percpu_up_read_preempt_enable(&file_rwsem);
- + percpu_up_read(&file_rwsem);
- /*
- * Free any unused locks.
- */
- @@ -1460,7 +1460,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
- return error;
- }
-
- - percpu_down_read_preempt_disable(&file_rwsem);
- + percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
-
- time_out_leases(inode, &dispose);
- @@ -1512,13 +1512,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
- locks_insert_block(fl, new_fl);
- trace_break_lease_block(inode, new_fl);
- spin_unlock(&ctx->flc_lock);
- - percpu_up_read_preempt_enable(&file_rwsem);
- + percpu_up_read(&file_rwsem);
-
- locks_dispose_list(&dispose);
- error = wait_event_interruptible_timeout(new_fl->fl_wait,
- !new_fl->fl_next, break_time);
-
- - percpu_down_read_preempt_disable(&file_rwsem);
- + percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- trace_break_lease_unblock(inode, new_fl);
- locks_delete_block(new_fl);
- @@ -1535,7 +1535,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
- }
- out:
- spin_unlock(&ctx->flc_lock);
- - percpu_up_read_preempt_enable(&file_rwsem);
- + percpu_up_read(&file_rwsem);
- locks_dispose_list(&dispose);
- locks_free_lock(new_fl);
- return error;
- @@ -1609,7 +1609,7 @@ int fcntl_getlease(struct file *filp)
-
- ctx = smp_load_acquire(&inode->i_flctx);
- if (ctx && !list_empty_careful(&ctx->flc_lease)) {
- - percpu_down_read_preempt_disable(&file_rwsem);
- + percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- time_out_leases(inode, &dispose);
- list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
- @@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp)
- break;
- }
- spin_unlock(&ctx->flc_lock);
- - percpu_up_read_preempt_enable(&file_rwsem);
- + percpu_up_read(&file_rwsem);
-
- locks_dispose_list(&dispose);
- }
- @@ -1694,7 +1694,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
- return -EINVAL;
- }
-
- - percpu_down_read_preempt_disable(&file_rwsem);
- + percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- time_out_leases(inode, &dispose);
- error = check_conflicting_open(dentry, arg, lease->fl_flags);
- @@ -1765,7 +1765,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
- lease->fl_lmops->lm_setup(lease, priv);
- out:
- spin_unlock(&ctx->flc_lock);
- - percpu_up_read_preempt_enable(&file_rwsem);
- + percpu_up_read(&file_rwsem);
- locks_dispose_list(&dispose);
- if (is_deleg)
- inode_unlock(inode);
- @@ -1788,7 +1788,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
- return error;
- }
-
- - percpu_down_read_preempt_disable(&file_rwsem);
- + percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
- if (fl->fl_file == filp &&
- @@ -1801,7 +1801,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
- if (victim)
- error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
- spin_unlock(&ctx->flc_lock);
- - percpu_up_read_preempt_enable(&file_rwsem);
- + percpu_up_read(&file_rwsem);
- locks_dispose_list(&dispose);
- return error;
- }
- @@ -2532,13 +2532,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
- if (list_empty(&ctx->flc_lease))
- return;
-
- - percpu_down_read_preempt_disable(&file_rwsem);
- + percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
- if (filp == fl->fl_file)
- lease_modify(fl, F_UNLCK, &dispose);
- spin_unlock(&ctx->flc_lock);
- - percpu_up_read_preempt_enable(&file_rwsem);
- + percpu_up_read(&file_rwsem);
-
- locks_dispose_list(&dispose);
- }
- diff --git a/fs/namei.c b/fs/namei.c
- index e7d125c23aa6..072a2f724437 100644
- --- a/fs/namei.c
- +++ b/fs/namei.c
- @@ -1626,7 +1626,7 @@ static struct dentry *lookup_slow(const struct qstr *name,
- {
- struct dentry *dentry = ERR_PTR(-ENOENT), *old;
- struct inode *inode = dir->d_inode;
- - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
- + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
-
- inode_lock_shared(inode);
- /* Don't go there if it's already dead */
- @@ -3089,7 +3089,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
- struct dentry *dentry;
- int error, create_error = 0;
- umode_t mode = op->mode;
- - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
- + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
-
- if (unlikely(IS_DEADDIR(dir_inode)))
- return -ENOENT;
- diff --git a/fs/namespace.c b/fs/namespace.c
- index d7360f9897b4..da188c6966a3 100644
- --- a/fs/namespace.c
- +++ b/fs/namespace.c
- @@ -14,6 +14,7 @@
- #include <linux/mnt_namespace.h>
- #include <linux/user_namespace.h>
- #include <linux/namei.h>
- +#include <linux/delay.h>
- #include <linux/security.h>
- #include <linux/idr.h>
- #include <linux/init.h> /* init_rootfs */
- @@ -357,8 +358,11 @@ int __mnt_want_write(struct vfsmount *m)
- * incremented count after it has set MNT_WRITE_HOLD.
- */
- smp_mb();
- - while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
- - cpu_relax();
- + while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
- + preempt_enable();
- + cpu_chill();
- + preempt_disable();
- + }
- /*
- * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
- * be set to match its requirements. So we must not load that until
- diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
- index dff600ae0d74..d726d2e09353 100644
- --- a/fs/nfs/delegation.c
- +++ b/fs/nfs/delegation.c
- @@ -150,11 +150,11 @@ static int nfs_delegation_claim_opens(struct inode *inode,
- sp = state->owner;
- /* Block nfs4_proc_unlck */
- mutex_lock(&sp->so_delegreturn_mutex);
- - seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
- + seq = read_seqbegin(&sp->so_reclaim_seqlock);
- err = nfs4_open_delegation_recall(ctx, state, stateid, type);
- if (!err)
- err = nfs_delegation_claim_locks(ctx, state, stateid);
- - if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
- + if (!err && read_seqretry(&sp->so_reclaim_seqlock, seq))
- err = -EAGAIN;
- mutex_unlock(&sp->so_delegreturn_mutex);
- put_nfs_open_context(ctx);
- diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
- index 1e5321d1ed22..2510f2be8557 100644
- --- a/fs/nfs/dir.c
- +++ b/fs/nfs/dir.c
- @@ -485,7 +485,7 @@ static
- void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
- {
- struct qstr filename = QSTR_INIT(entry->name, entry->len);
- - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
- + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
- struct dentry *dentry;
- struct dentry *alias;
- struct inode *dir = d_inode(parent);
- @@ -1492,7 +1492,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned open_flags,
- umode_t mode, int *opened)
- {
- - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
- + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
- struct nfs_open_context *ctx;
- struct dentry *res;
- struct iattr attr = { .ia_valid = ATTR_OPEN };
- @@ -1807,7 +1807,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
-
- trace_nfs_rmdir_enter(dir, dentry);
- if (d_really_is_positive(dentry)) {
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + down(&NFS_I(d_inode(dentry))->rmdir_sem);
- +#else
- down_write(&NFS_I(d_inode(dentry))->rmdir_sem);
- +#endif
- error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
- /* Ensure the VFS deletes this inode */
- switch (error) {
- @@ -1817,7 +1821,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
- case -ENOENT:
- nfs_dentry_handle_enoent(dentry);
- }
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + up(&NFS_I(d_inode(dentry))->rmdir_sem);
- +#else
- up_write(&NFS_I(d_inode(dentry))->rmdir_sem);
- +#endif
- } else
- error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
- trace_nfs_rmdir_exit(dir, dentry, error);
- diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
- index 76ae25661d3f..89159d298278 100644
- --- a/fs/nfs/inode.c
- +++ b/fs/nfs/inode.c
- @@ -1957,7 +1957,11 @@ static void init_once(void *foo)
- nfsi->nrequests = 0;
- nfsi->commit_info.ncommit = 0;
- atomic_set(&nfsi->commit_info.rpcs_out, 0);
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + sema_init(&nfsi->rmdir_sem, 1);
- +#else
- init_rwsem(&nfsi->rmdir_sem);
- +#endif
- nfs4_init_once(nfsi);
- }
-
- diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
- index 1452177c822d..f43b01d54c59 100644
- --- a/fs/nfs/nfs4_fs.h
- +++ b/fs/nfs/nfs4_fs.h
- @@ -111,7 +111,7 @@ struct nfs4_state_owner {
- unsigned long so_flags;
- struct list_head so_states;
- struct nfs_seqid_counter so_seqid;
- - seqcount_t so_reclaim_seqcount;
- + seqlock_t so_reclaim_seqlock;
- struct mutex so_delegreturn_mutex;
- };
-
- diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
- index 4638654e26f3..5dd6fd555c72 100644
- --- a/fs/nfs/nfs4proc.c
- +++ b/fs/nfs/nfs4proc.c
- @@ -2691,7 +2691,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
- unsigned int seq;
- int ret;
-
- - seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
- + seq = raw_seqcount_begin(&sp->so_reclaim_seqlock.seqcount);
-
- ret = _nfs4_proc_open(opendata);
- if (ret != 0)
- @@ -2729,7 +2729,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
-
- if (d_inode(dentry) == state->inode) {
- nfs_inode_attach_open_context(ctx);
- - if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
- + if (read_seqretry(&sp->so_reclaim_seqlock, seq))
- nfs4_schedule_stateid_recovery(server, state);
- }
- out:
- diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
- index 71deeae6eefd..4be6999299dc 100644
- --- a/fs/nfs/nfs4state.c
- +++ b/fs/nfs/nfs4state.c
- @@ -488,7 +488,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
- nfs4_init_seqid_counter(&sp->so_seqid);
- atomic_set(&sp->so_count, 1);
- INIT_LIST_HEAD(&sp->so_lru);
- - seqcount_init(&sp->so_reclaim_seqcount);
- + seqlock_init(&sp->so_reclaim_seqlock);
- mutex_init(&sp->so_delegreturn_mutex);
- return sp;
- }
- @@ -1498,8 +1498,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
- * recovering after a network partition or a reboot from a
- * server that doesn't support a grace period.
- */
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + write_seqlock(&sp->so_reclaim_seqlock);
- +#else
- + write_seqcount_begin(&sp->so_reclaim_seqlock.seqcount);
- +#endif
- spin_lock(&sp->so_lock);
- - raw_write_seqcount_begin(&sp->so_reclaim_seqcount);
- restart:
- list_for_each_entry(state, &sp->so_states, open_states) {
- if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
- @@ -1568,14 +1572,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
- spin_lock(&sp->so_lock);
- goto restart;
- }
- - raw_write_seqcount_end(&sp->so_reclaim_seqcount);
- spin_unlock(&sp->so_lock);
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + write_sequnlock(&sp->so_reclaim_seqlock);
- +#else
- + write_seqcount_end(&sp->so_reclaim_seqlock.seqcount);
- +#endif
- return 0;
- out_err:
- nfs4_put_open_state(state);
- - spin_lock(&sp->so_lock);
- - raw_write_seqcount_end(&sp->so_reclaim_seqcount);
- - spin_unlock(&sp->so_lock);
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + write_sequnlock(&sp->so_reclaim_seqlock);
- +#else
- + write_seqcount_end(&sp->so_reclaim_seqlock.seqcount);
- +#endif
- return status;
- }
-
- diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
- index 191aa577dd1f..58990c8f52e0 100644
- --- a/fs/nfs/unlink.c
- +++ b/fs/nfs/unlink.c
- @@ -12,7 +12,7 @@
- #include <linux/sunrpc/clnt.h>
- #include <linux/nfs_fs.h>
- #include <linux/sched.h>
- -#include <linux/wait.h>
- +#include <linux/swait.h>
- #include <linux/namei.h>
- #include <linux/fsnotify.h>
-
- @@ -51,6 +51,29 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
- rpc_restart_call_prepare(task);
- }
-
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +static void nfs_down_anon(struct semaphore *sema)
- +{
- + down(sema);
- +}
- +
- +static void nfs_up_anon(struct semaphore *sema)
- +{
- + up(sema);
- +}
- +
- +#else
- +static void nfs_down_anon(struct rw_semaphore *rwsem)
- +{
- + down_read_non_owner(rwsem);
- +}
- +
- +static void nfs_up_anon(struct rw_semaphore *rwsem)
- +{
- + up_read_non_owner(rwsem);
- +}
- +#endif
- +
- /**
- * nfs_async_unlink_release - Release the sillydelete data.
- * @task: rpc_task of the sillydelete
- @@ -64,7 +87,7 @@ static void nfs_async_unlink_release(void *calldata)
- struct dentry *dentry = data->dentry;
- struct super_block *sb = dentry->d_sb;
-
- - up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
- + nfs_up_anon(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem);
- d_lookup_done(dentry);
- nfs_free_unlinkdata(data);
- dput(dentry);
- @@ -117,10 +140,10 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
- struct inode *dir = d_inode(dentry->d_parent);
- struct dentry *alias;
-
- - down_read_non_owner(&NFS_I(dir)->rmdir_sem);
- + nfs_down_anon(&NFS_I(dir)->rmdir_sem);
- alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq);
- if (IS_ERR(alias)) {
- - up_read_non_owner(&NFS_I(dir)->rmdir_sem);
- + nfs_up_anon(&NFS_I(dir)->rmdir_sem);
- return 0;
- }
- if (!d_in_lookup(alias)) {
- @@ -142,7 +165,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
- ret = 0;
- spin_unlock(&alias->d_lock);
- dput(alias);
- - up_read_non_owner(&NFS_I(dir)->rmdir_sem);
- + nfs_up_anon(&NFS_I(dir)->rmdir_sem);
- /*
- * If we'd displaced old cached devname, free it. At that
- * point dentry is definitely not a root, so we won't need
- @@ -182,7 +205,7 @@ nfs_async_unlink(struct dentry *dentry, const struct qstr *name)
- goto out_free_name;
- }
- data->res.dir_attr = &data->dir_attr;
- - init_waitqueue_head(&data->wq);
- + init_swait_queue_head(&data->wq);
-
- status = -EBUSY;
- spin_lock(&dentry->d_lock);
- diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
- index fe251f187ff8..e89da4fb14c2 100644
- --- a/fs/ntfs/aops.c
- +++ b/fs/ntfs/aops.c
- @@ -92,13 +92,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
- ofs = 0;
- if (file_ofs < init_size)
- ofs = init_size - file_ofs;
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- kaddr = kmap_atomic(page);
- memset(kaddr + bh_offset(bh) + ofs, 0,
- bh->b_size - ofs);
- flush_dcache_page(page);
- kunmap_atomic(kaddr);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- }
- } else {
- clear_buffer_uptodate(bh);
- @@ -107,8 +107,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
- "0x%llx.", (unsigned long long)bh->b_blocknr);
- }
- first = page_buffers(page);
- - local_irq_save(flags);
- - bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
- + flags = bh_uptodate_lock_irqsave(first);
- clear_buffer_async_read(bh);
- unlock_buffer(bh);
- tmp = bh;
- @@ -123,8 +122,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
- }
- tmp = tmp->b_this_page;
- } while (tmp != bh);
- - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- - local_irq_restore(flags);
- + bh_uptodate_unlock_irqrestore(first, flags);
- /*
- * If none of the buffers had errors then we can set the page uptodate,
- * but we first have to perform the post read mst fixups, if the
- @@ -145,13 +143,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
- recs = PAGE_SIZE / rec_size;
- /* Should have been verified before we got here... */
- BUG_ON(!recs);
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- kaddr = kmap_atomic(page);
- for (i = 0; i < recs; i++)
- post_read_mst_fixup((NTFS_RECORD*)(kaddr +
- i * rec_size), rec_size);
- kunmap_atomic(kaddr);
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- flush_dcache_page(page);
- if (likely(page_uptodate && !PageError(page)))
- SetPageUptodate(page);
- @@ -159,9 +157,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
- unlock_page(page);
- return;
- still_busy:
- - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- - local_irq_restore(flags);
- - return;
- + bh_uptodate_unlock_irqrestore(first, flags);
- }
-
- /**
- diff --git a/fs/proc/base.c b/fs/proc/base.c
- index e67fec3c9856..0edc16f95596 100644
- --- a/fs/proc/base.c
- +++ b/fs/proc/base.c
- @@ -1834,7 +1834,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
-
- child = d_hash_and_lookup(dir, &qname);
- if (!child) {
- - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
- + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
- child = d_alloc_parallel(dir, &qname, &wq);
- if (IS_ERR(child))
- goto end_instantiate;
- diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
- index d4e37acd4821..000cea46434a 100644
- --- a/fs/proc/proc_sysctl.c
- +++ b/fs/proc/proc_sysctl.c
- @@ -632,7 +632,7 @@ static bool proc_sys_fill_cache(struct file *file,
-
- child = d_lookup(dir, &qname);
- if (!child) {
- - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
- + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
- child = d_alloc_parallel(dir, &qname, &wq);
- if (IS_ERR(child))
- return false;
- diff --git a/fs/timerfd.c b/fs/timerfd.c
- index ab8dd1538381..5580853f57dd 100644
- --- a/fs/timerfd.c
- +++ b/fs/timerfd.c
- @@ -471,7 +471,10 @@ static int do_timerfd_settime(int ufd, int flags,
- break;
- }
- spin_unlock_irq(&ctx->wqh.lock);
- - cpu_relax();
- + if (isalarm(ctx))
- + hrtimer_wait_for_timer(&ctx->t.alarm.timer);
- + else
- + hrtimer_wait_for_timer(&ctx->t.tmr);
- }
-
- /*
- diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
- index d31cd1ebd8e9..5ea3f933a52a 100644
- --- a/fs/xfs/xfs_aops.c
- +++ b/fs/xfs/xfs_aops.c
- @@ -112,8 +112,7 @@ xfs_finish_page_writeback(
- ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
- ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
-
- - local_irq_save(flags);
- - bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
- + flags = bh_uptodate_lock_irqsave(head);
- do {
- if (off >= bvec->bv_offset &&
- off < bvec->bv_offset + bvec->bv_len) {
- @@ -136,8 +135,7 @@ xfs_finish_page_writeback(
- }
- off += bh->b_size;
- } while ((bh = bh->b_this_page) != head);
- - bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
- - local_irq_restore(flags);
- + bh_uptodate_unlock_irqrestore(head, flags);
-
- if (!busy)
- end_page_writeback(bvec->bv_page);
- diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
- index e861a24f06f2..b5c97d3059c7 100644
- --- a/include/acpi/platform/aclinux.h
- +++ b/include/acpi/platform/aclinux.h
- @@ -133,6 +133,7 @@
-
- #define acpi_cache_t struct kmem_cache
- #define acpi_spinlock spinlock_t *
- +#define acpi_raw_spinlock raw_spinlock_t *
- #define acpi_cpu_flags unsigned long
-
- /* Use native linux version of acpi_os_allocate_zeroed */
- @@ -151,6 +152,20 @@
- #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
- #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
-
- +#define acpi_os_create_raw_lock(__handle) \
- +({ \
- + raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
- + \
- + if (lock) { \
- + *(__handle) = lock; \
- + raw_spin_lock_init(*(__handle)); \
- + } \
- + lock ? AE_OK : AE_NO_MEMORY; \
- + })
- +
- +#define acpi_os_delete_raw_lock(__handle) kfree(__handle)
- +
- +
- /*
- * OSL interfaces used by debugger/disassembler
- */
- diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
- index 6f96247226a4..fa53a21263c2 100644
- --- a/include/asm-generic/bug.h
- +++ b/include/asm-generic/bug.h
- @@ -215,6 +215,20 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
- # define WARN_ON_SMP(x) ({0;})
- #endif
-
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +# define BUG_ON_RT(c) BUG_ON(c)
- +# define BUG_ON_NONRT(c) do { } while (0)
- +# define WARN_ON_RT(condition) WARN_ON(condition)
- +# define WARN_ON_NONRT(condition) do { } while (0)
- +# define WARN_ON_ONCE_NONRT(condition) do { } while (0)
- +#else
- +# define BUG_ON_RT(c) do { } while (0)
- +# define BUG_ON_NONRT(c) BUG_ON(c)
- +# define WARN_ON_RT(condition) do { } while (0)
- +# define WARN_ON_NONRT(condition) WARN_ON(condition)
- +# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
- +#endif
- +
- #endif /* __ASSEMBLY__ */
-
- #endif
- diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
- index 535ab2e13d2e..cfc246899473 100644
- --- a/include/linux/blk-mq.h
- +++ b/include/linux/blk-mq.h
- @@ -209,7 +209,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
- return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
- }
-
- -
- +void __blk_mq_complete_request_remote_work(struct work_struct *work);
- int blk_mq_request_started(struct request *rq);
- void blk_mq_start_request(struct request *rq);
- void blk_mq_end_request(struct request *rq, int error);
- diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
- index f6a816129856..ec7a4676f8a8 100644
- --- a/include/linux/blkdev.h
- +++ b/include/linux/blkdev.h
- @@ -89,6 +89,7 @@ struct request {
- struct list_head queuelist;
- union {
- struct call_single_data csd;
- + struct work_struct work;
- u64 fifo_time;
- };
-
- @@ -467,7 +468,7 @@ struct request_queue {
- struct throtl_data *td;
- #endif
- struct rcu_head rcu_head;
- - wait_queue_head_t mq_freeze_wq;
- + struct swait_queue_head mq_freeze_wq;
- struct percpu_ref q_usage_counter;
- struct list_head all_q_node;
-
- diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
- index 8fdcb783197d..d07dbeec7bc1 100644
- --- a/include/linux/bottom_half.h
- +++ b/include/linux/bottom_half.h
- @@ -3,6 +3,39 @@
-
- #include <linux/preempt.h>
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +
- +extern void __local_bh_disable(void);
- +extern void _local_bh_enable(void);
- +extern void __local_bh_enable(void);
- +
- +static inline void local_bh_disable(void)
- +{
- + __local_bh_disable();
- +}
- +
- +static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
- +{
- + __local_bh_disable();
- +}
- +
- +static inline void local_bh_enable(void)
- +{
- + __local_bh_enable();
- +}
- +
- +static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
- +{
- + __local_bh_enable();
- +}
- +
- +static inline void local_bh_enable_ip(unsigned long ip)
- +{
- + __local_bh_enable();
- +}
- +
- +#else
- +
- #ifdef CONFIG_TRACE_IRQFLAGS
- extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
- #else
- @@ -30,5 +63,6 @@ static inline void local_bh_enable(void)
- {
- __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
- }
- +#endif
-
- #endif /* _LINUX_BH_H */
- diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
- index 4431ea2c8802..0744157a97ca 100644
- --- a/include/linux/buffer_head.h
- +++ b/include/linux/buffer_head.h
- @@ -75,8 +75,50 @@ struct buffer_head {
- struct address_space *b_assoc_map; /* mapping this buffer is
- associated with */
- atomic_t b_count; /* users using this buffer_head */
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + spinlock_t b_uptodate_lock;
- +#if IS_ENABLED(CONFIG_JBD2)
- + spinlock_t b_state_lock;
- + spinlock_t b_journal_head_lock;
- +#endif
- +#endif
- };
-
- +static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
- +{
- + unsigned long flags;
- +
- +#ifndef CONFIG_PREEMPT_RT_BASE
- + local_irq_save(flags);
- + bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
- +#else
- + spin_lock_irqsave(&bh->b_uptodate_lock, flags);
- +#endif
- + return flags;
- +}
- +
- +static inline void
- +bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
- +{
- +#ifndef CONFIG_PREEMPT_RT_BASE
- + bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
- + local_irq_restore(flags);
- +#else
- + spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
- +#endif
- +}
- +
- +static inline void buffer_head_init_locks(struct buffer_head *bh)
- +{
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + spin_lock_init(&bh->b_uptodate_lock);
- +#if IS_ENABLED(CONFIG_JBD2)
- + spin_lock_init(&bh->b_state_lock);
- + spin_lock_init(&bh->b_journal_head_lock);
- +#endif
- +#endif
- +}
- +
- /*
- * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
- * and buffer_foo() functions.
- diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
- index 6fb1c34cf805..ccd2a5addb56 100644
- --- a/include/linux/cgroup-defs.h
- +++ b/include/linux/cgroup-defs.h
- @@ -16,6 +16,7 @@
- #include <linux/percpu-refcount.h>
- #include <linux/percpu-rwsem.h>
- #include <linux/workqueue.h>
- +#include <linux/swork.h>
-
- #ifdef CONFIG_CGROUPS
-
- @@ -138,6 +139,7 @@ struct cgroup_subsys_state {
- /* percpu_ref killing and RCU release */
- struct rcu_head rcu_head;
- struct work_struct destroy_work;
- + struct swork_event destroy_swork;
- };
-
- /*
- diff --git a/include/linux/completion.h b/include/linux/completion.h
- index 5d5aaae3af43..3bca1590e29f 100644
- --- a/include/linux/completion.h
- +++ b/include/linux/completion.h
- @@ -7,8 +7,7 @@
- * Atomic wait-for-completion handler data structures.
- * See kernel/sched/completion.c for details.
- */
- -
- -#include <linux/wait.h>
- +#include <linux/swait.h>
-
- /*
- * struct completion - structure used to maintain state for a "completion"
- @@ -24,11 +23,11 @@
- */
- struct completion {
- unsigned int done;
- - wait_queue_head_t wait;
- + struct swait_queue_head wait;
- };
-
- #define COMPLETION_INITIALIZER(work) \
- - { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
- + { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
-
- #define COMPLETION_INITIALIZER_ONSTACK(work) \
- ({ init_completion(&work); work; })
- @@ -73,7 +72,7 @@ struct completion {
- static inline void init_completion(struct completion *x)
- {
- x->done = 0;
- - init_waitqueue_head(&x->wait);
- + init_swait_queue_head(&x->wait);
- }
-
- /**
- diff --git a/include/linux/cpu.h b/include/linux/cpu.h
- index e571128ad99a..5e52d28c20c1 100644
- --- a/include/linux/cpu.h
- +++ b/include/linux/cpu.h
- @@ -182,6 +182,8 @@ extern void get_online_cpus(void);
- extern void put_online_cpus(void);
- extern void cpu_hotplug_disable(void);
- extern void cpu_hotplug_enable(void);
- +extern void pin_current_cpu(void);
- +extern void unpin_current_cpu(void);
- #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
- #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
- #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
- @@ -199,6 +201,8 @@ static inline void cpu_hotplug_done(void) {}
- #define put_online_cpus() do { } while (0)
- #define cpu_hotplug_disable() do { } while (0)
- #define cpu_hotplug_enable() do { } while (0)
- +static inline void pin_current_cpu(void) { }
- +static inline void unpin_current_cpu(void) { }
- #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
- #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
- /* These aren't inline functions due to a GCC bug. */
- diff --git a/include/linux/dcache.h b/include/linux/dcache.h
- index ff295e166b2c..d532c60f3fb5 100644
- --- a/include/linux/dcache.h
- +++ b/include/linux/dcache.h
- @@ -11,6 +11,7 @@
- #include <linux/rcupdate.h>
- #include <linux/lockref.h>
- #include <linux/stringhash.h>
- +#include <linux/wait.h>
-
- struct path;
- struct vfsmount;
- @@ -100,7 +101,7 @@ struct dentry {
-
- union {
- struct list_head d_lru; /* LRU list */
- - wait_queue_head_t *d_wait; /* in-lookup ones only */
- + struct swait_queue_head *d_wait; /* in-lookup ones only */
- };
- struct list_head d_child; /* child of parent list */
- struct list_head d_subdirs; /* our children */
- @@ -230,7 +231,7 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op
- extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
- extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
- extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
- - wait_queue_head_t *);
- + struct swait_queue_head *);
- extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
- extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
- extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
- diff --git a/include/linux/delay.h b/include/linux/delay.h
- index a6ecb34cf547..37caab306336 100644
- --- a/include/linux/delay.h
- +++ b/include/linux/delay.h
- @@ -52,4 +52,10 @@ static inline void ssleep(unsigned int seconds)
- msleep(seconds * 1000);
- }
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +extern void cpu_chill(void);
- +#else
- +# define cpu_chill() cpu_relax()
- +#endif
- +
- #endif /* defined(_LINUX_DELAY_H) */
- diff --git a/include/linux/fs.h b/include/linux/fs.h
- index d705ae084edd..ab1946f4a729 100644
- --- a/include/linux/fs.h
- +++ b/include/linux/fs.h
- @@ -688,7 +688,7 @@ struct inode {
- struct block_device *i_bdev;
- struct cdev *i_cdev;
- char *i_link;
- - unsigned i_dir_seq;
- + unsigned __i_dir_seq;
- };
-
- __u32 i_generation;
- diff --git a/include/linux/highmem.h b/include/linux/highmem.h
- index bb3f3297062a..a117a33ef72c 100644
- --- a/include/linux/highmem.h
- +++ b/include/linux/highmem.h
- @@ -7,6 +7,7 @@
- #include <linux/mm.h>
- #include <linux/uaccess.h>
- #include <linux/hardirq.h>
- +#include <linux/sched.h>
-
- #include <asm/cacheflush.h>
-
- @@ -65,7 +66,7 @@ static inline void kunmap(struct page *page)
-
- static inline void *kmap_atomic(struct page *page)
- {
- - preempt_disable();
- + preempt_disable_nort();
- pagefault_disable();
- return page_address(page);
- }
- @@ -74,7 +75,7 @@ static inline void *kmap_atomic(struct page *page)
- static inline void __kunmap_atomic(void *addr)
- {
- pagefault_enable();
- - preempt_enable();
- + preempt_enable_nort();
- }
-
- #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
- @@ -86,32 +87,51 @@ static inline void __kunmap_atomic(void *addr)
-
- #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- DECLARE_PER_CPU(int, __kmap_atomic_idx);
- +#endif
-
- static inline int kmap_atomic_idx_push(void)
- {
- +#ifndef CONFIG_PREEMPT_RT_FULL
- int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
-
- -#ifdef CONFIG_DEBUG_HIGHMEM
- +# ifdef CONFIG_DEBUG_HIGHMEM
- WARN_ON_ONCE(in_irq() && !irqs_disabled());
- BUG_ON(idx >= KM_TYPE_NR);
- -#endif
- +# endif
- return idx;
- +#else
- + current->kmap_idx++;
- + BUG_ON(current->kmap_idx > KM_TYPE_NR);
- + return current->kmap_idx - 1;
- +#endif
- }
-
- static inline int kmap_atomic_idx(void)
- {
- +#ifndef CONFIG_PREEMPT_RT_FULL
- return __this_cpu_read(__kmap_atomic_idx) - 1;
- +#else
- + return current->kmap_idx - 1;
- +#endif
- }
-
- static inline void kmap_atomic_idx_pop(void)
- {
- -#ifdef CONFIG_DEBUG_HIGHMEM
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +# ifdef CONFIG_DEBUG_HIGHMEM
- int idx = __this_cpu_dec_return(__kmap_atomic_idx);
-
- BUG_ON(idx < 0);
- -#else
- +# else
- __this_cpu_dec(__kmap_atomic_idx);
- +# endif
- +#else
- + current->kmap_idx--;
- +# ifdef CONFIG_DEBUG_HIGHMEM
- + BUG_ON(current->kmap_idx < 0);
- +# endif
- #endif
- }
-
- diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
- index 5e00f80b1535..a34e10b55cde 100644
- --- a/include/linux/hrtimer.h
- +++ b/include/linux/hrtimer.h
- @@ -87,6 +87,9 @@ enum hrtimer_restart {
- * @function: timer expiry callback function
- * @base: pointer to the timer base (per cpu and per clock)
- * @state: state information (See bit values above)
- + * @cb_entry: list entry to defer timers from hardirq context
- + * @irqsafe: timer can run in hardirq context
- + * @praecox: timer expiry time if expired at the time of programming
- * @is_rel: Set if the timer was armed relative
- * @start_pid: timer statistics field to store the pid of the task which
- * started the timer
- @@ -103,6 +106,11 @@ struct hrtimer {
- enum hrtimer_restart (*function)(struct hrtimer *);
- struct hrtimer_clock_base *base;
- u8 state;
- + struct list_head cb_entry;
- + int irqsafe;
- +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- + ktime_t praecox;
- +#endif
- u8 is_rel;
- #ifdef CONFIG_TIMER_STATS
- int start_pid;
- @@ -123,11 +131,7 @@ struct hrtimer_sleeper {
- struct task_struct *task;
- };
-
- -#ifdef CONFIG_64BIT
- # define HRTIMER_CLOCK_BASE_ALIGN 64
- -#else
- -# define HRTIMER_CLOCK_BASE_ALIGN 32
- -#endif
-
- /**
- * struct hrtimer_clock_base - the timer base for a specific clock
- @@ -136,6 +140,7 @@ struct hrtimer_sleeper {
- * timer to a base on another cpu.
- * @clockid: clock id for per_cpu support
- * @active: red black tree root node for the active timers
- + * @expired: list head for deferred timers.
- * @get_time: function to retrieve the current time of the clock
- * @offset: offset of this clock to the monotonic base
- */
- @@ -144,6 +149,7 @@ struct hrtimer_clock_base {
- int index;
- clockid_t clockid;
- struct timerqueue_head active;
- + struct list_head expired;
- ktime_t (*get_time)(void);
- ktime_t offset;
- } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
- @@ -187,6 +193,7 @@ struct hrtimer_cpu_base {
- raw_spinlock_t lock;
- seqcount_t seq;
- struct hrtimer *running;
- + struct hrtimer *running_soft;
- unsigned int cpu;
- unsigned int active_bases;
- unsigned int clock_was_set_seq;
- @@ -202,6 +209,9 @@ struct hrtimer_cpu_base {
- unsigned int nr_retries;
- unsigned int nr_hangs;
- unsigned int max_hang_time;
- +#endif
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + wait_queue_head_t wait;
- #endif
- struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
- } ____cacheline_aligned;
- @@ -412,6 +422,13 @@ static inline void hrtimer_restart(struct hrtimer *timer)
- hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
- }
-
- +/* Softirq preemption could deadlock timer removal */
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
- +#else
- +# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
- +#endif
- +
- /* Query timers: */
- extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
-
- @@ -436,9 +453,15 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
- * Helper function to check, whether the timer is running the callback
- * function
- */
- -static inline int hrtimer_callback_running(struct hrtimer *timer)
- +static inline int hrtimer_callback_running(const struct hrtimer *timer)
- {
- - return timer->base->cpu_base->running == timer;
- + if (timer->base->cpu_base->running == timer)
- + return 1;
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + if (timer->base->cpu_base->running_soft == timer)
- + return 1;
- +#endif
- + return 0;
- }
-
- /* Forward a hrtimer so it expires after now: */
- diff --git a/include/linux/idr.h b/include/linux/idr.h
- index 083d61e92706..5899796f50cb 100644
- --- a/include/linux/idr.h
- +++ b/include/linux/idr.h
- @@ -95,10 +95,14 @@ bool idr_is_empty(struct idr *idp);
- * Each idr_preload() should be matched with an invocation of this
- * function. See idr_preload() for details.
- */
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +void idr_preload_end(void);
- +#else
- static inline void idr_preload_end(void)
- {
- preempt_enable();
- }
- +#endif
-
- /**
- * idr_find - return pointer for given id
- diff --git a/include/linux/init_task.h b/include/linux/init_task.h
- index 325f649d77ff..a56e263f5005 100644
- --- a/include/linux/init_task.h
- +++ b/include/linux/init_task.h
- @@ -150,6 +150,12 @@ extern struct task_group root_task_group;
- # define INIT_PERF_EVENTS(tsk)
- #endif
-
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +# define INIT_TIMER_LIST .posix_timer_list = NULL,
- +#else
- +# define INIT_TIMER_LIST
- +#endif
- +
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- # define INIT_VTIME(tsk) \
- .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \
- @@ -164,6 +170,7 @@ extern struct task_group root_task_group;
- #ifdef CONFIG_RT_MUTEXES
- # define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = RB_ROOT, \
- + .pi_top_task = NULL, \
- .pi_waiters_leftmost = NULL,
- #else
- # define INIT_RT_MUTEXES(tsk)
- @@ -250,6 +257,7 @@ extern struct task_group root_task_group;
- .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
- .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
- .timer_slack_ns = 50000, /* 50 usec default slack */ \
- + INIT_TIMER_LIST \
- .pids = { \
- [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
- [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
- diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
- index 72f0721f75e7..480972ae47d3 100644
- --- a/include/linux/interrupt.h
- +++ b/include/linux/interrupt.h
- @@ -14,6 +14,7 @@
- #include <linux/hrtimer.h>
- #include <linux/kref.h>
- #include <linux/workqueue.h>
- +#include <linux/swork.h>
-
- #include <linux/atomic.h>
- #include <asm/ptrace.h>
- @@ -61,6 +62,7 @@
- * interrupt handler after suspending interrupts. For system
- * wakeup devices users need to implement wakeup detection in
- * their interrupt handlers.
- + * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
- */
- #define IRQF_SHARED 0x00000080
- #define IRQF_PROBE_SHARED 0x00000100
- @@ -74,6 +76,7 @@
- #define IRQF_NO_THREAD 0x00010000
- #define IRQF_EARLY_RESUME 0x00020000
- #define IRQF_COND_SUSPEND 0x00040000
- +#define IRQF_NO_SOFTIRQ_CALL 0x00080000
-
- #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
-
- @@ -196,7 +199,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
- #ifdef CONFIG_LOCKDEP
- # define local_irq_enable_in_hardirq() do { } while (0)
- #else
- -# define local_irq_enable_in_hardirq() local_irq_enable()
- +# define local_irq_enable_in_hardirq() local_irq_enable_nort()
- #endif
-
- extern void disable_irq_nosync(unsigned int irq);
- @@ -216,6 +219,7 @@ extern void resume_device_irqs(void);
- * struct irq_affinity_notify - context for notification of IRQ affinity changes
- * @irq: Interrupt to which notification applies
- * @kref: Reference count, for internal use
- + * @swork: Swork item, for internal use
- * @work: Work item, for internal use
- * @notify: Function to be called on change. This will be
- * called in process context.
- @@ -227,7 +231,11 @@ extern void resume_device_irqs(void);
- struct irq_affinity_notify {
- unsigned int irq;
- struct kref kref;
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + struct swork_event swork;
- +#else
- struct work_struct work;
- +#endif
- void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
- void (*release)(struct kref *ref);
- };
- @@ -406,9 +414,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
- bool state);
-
- #ifdef CONFIG_IRQ_FORCED_THREADING
- +# ifndef CONFIG_PREEMPT_RT_BASE
- extern bool force_irqthreads;
- +# else
- +# define force_irqthreads (true)
- +# endif
- #else
- -#define force_irqthreads (0)
- +#define force_irqthreads (false)
- #endif
-
- #ifndef __ARCH_SET_SOFTIRQ_PENDING
- @@ -465,9 +477,10 @@ struct softirq_action
- void (*action)(struct softirq_action *);
- };
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- asmlinkage void do_softirq(void);
- asmlinkage void __do_softirq(void);
- -
- +static inline void thread_do_softirq(void) { do_softirq(); }
- #ifdef __ARCH_HAS_DO_SOFTIRQ
- void do_softirq_own_stack(void);
- #else
- @@ -476,13 +489,25 @@ static inline void do_softirq_own_stack(void)
- __do_softirq();
- }
- #endif
- +#else
- +extern void thread_do_softirq(void);
- +#endif
-
- extern void open_softirq(int nr, void (*action)(struct softirq_action *));
- extern void softirq_init(void);
- extern void __raise_softirq_irqoff(unsigned int nr);
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +extern void __raise_softirq_irqoff_ksoft(unsigned int nr);
- +#else
- +static inline void __raise_softirq_irqoff_ksoft(unsigned int nr)
- +{
- + __raise_softirq_irqoff(nr);
- +}
- +#endif
-
- extern void raise_softirq_irqoff(unsigned int nr);
- extern void raise_softirq(unsigned int nr);
- +extern void softirq_check_pending_idle(void);
-
- DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
-
- @@ -504,8 +529,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
- to be executed on some cpu at least once after this.
- * If the tasklet is already scheduled, but its execution is still not
- started, it will be executed only once.
- - * If this tasklet is already running on another CPU (or schedule is called
- - from tasklet itself), it is rescheduled for later.
- + * If this tasklet is already running on another CPU, it is rescheduled
- + for later.
- + * Schedule must not be called from the tasklet itself (a lockup occurs)
- * Tasklet is strictly serialized wrt itself, but not
- wrt another tasklets. If client needs some intertask synchronization,
- he makes it with spinlocks.
- @@ -530,27 +556,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
- enum
- {
- TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
- - TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
- + TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
- + TASKLET_STATE_PENDING /* Tasklet is pending */
- };
-
- -#ifdef CONFIG_SMP
- +#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
- +#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
- +#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
- +
- +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
- static inline int tasklet_trylock(struct tasklet_struct *t)
- {
- return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
- }
-
- +static inline int tasklet_tryunlock(struct tasklet_struct *t)
- +{
- + return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
- +}
- +
- static inline void tasklet_unlock(struct tasklet_struct *t)
- {
- smp_mb__before_atomic();
- clear_bit(TASKLET_STATE_RUN, &(t)->state);
- }
-
- -static inline void tasklet_unlock_wait(struct tasklet_struct *t)
- -{
- - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
- -}
- +extern void tasklet_unlock_wait(struct tasklet_struct *t);
- +
- #else
- #define tasklet_trylock(t) 1
- +#define tasklet_tryunlock(t) 1
- #define tasklet_unlock_wait(t) do { } while (0)
- #define tasklet_unlock(t) do { } while (0)
- #endif
- @@ -599,12 +634,7 @@ static inline void tasklet_disable(struct tasklet_struct *t)
- smp_mb();
- }
-
- -static inline void tasklet_enable(struct tasklet_struct *t)
- -{
- - smp_mb__before_atomic();
- - atomic_dec(&t->count);
- -}
- -
- +extern void tasklet_enable(struct tasklet_struct *t);
- extern void tasklet_kill(struct tasklet_struct *t);
- extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
- extern void tasklet_init(struct tasklet_struct *t,
- @@ -635,6 +665,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
- tasklet_kill(&ttimer->tasklet);
- }
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +extern void softirq_early_init(void);
- +#else
- +static inline void softirq_early_init(void) { }
- +#endif
- +
- /*
- * Autoprobing for irqs:
- *
- diff --git a/include/linux/irq.h b/include/linux/irq.h
- index 39e3254e5769..8ebac94fbb9f 100644
- --- a/include/linux/irq.h
- +++ b/include/linux/irq.h
- @@ -72,6 +72,7 @@ enum irqchip_irq_state;
- * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
- * it from the spurious interrupt detection
- * mechanism and from core side polling.
- + * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
- * IRQ_DISABLE_UNLAZY - Disable lazy irq disable
- */
- enum {
- @@ -99,13 +100,14 @@ enum {
- IRQ_PER_CPU_DEVID = (1 << 17),
- IRQ_IS_POLLED = (1 << 18),
- IRQ_DISABLE_UNLAZY = (1 << 19),
- + IRQ_NO_SOFTIRQ_CALL = (1 << 20),
- };
-
- #define IRQF_MODIFY_MASK \
- (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
- IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
- - IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
- + IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL)
-
- #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
-
- diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
- index 47b9ebd4a74f..2543aab05daa 100644
- --- a/include/linux/irq_work.h
- +++ b/include/linux/irq_work.h
- @@ -16,6 +16,7 @@
- #define IRQ_WORK_BUSY 2UL
- #define IRQ_WORK_FLAGS 3UL
- #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
- +#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */
-
- struct irq_work {
- unsigned long flags;
- @@ -51,4 +52,10 @@ static inline bool irq_work_needs_cpu(void) { return false; }
- static inline void irq_work_run(void) { }
- #endif
-
- +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
- +void irq_work_tick_soft(void);
- +#else
- +static inline void irq_work_tick_soft(void) { }
- +#endif
- +
- #endif /* _LINUX_IRQ_WORK_H */
- diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
- index c9be57931b58..eeeb540971ae 100644
- --- a/include/linux/irqdesc.h
- +++ b/include/linux/irqdesc.h
- @@ -66,6 +66,7 @@ struct irq_desc {
- unsigned int irqs_unhandled;
- atomic_t threads_handled;
- int threads_handled_last;
- + u64 random_ip;
- raw_spinlock_t lock;
- struct cpumask *percpu_enabled;
- const struct cpumask *percpu_affinity;
- diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
- index 5dd1272d1ab2..9b77034f7c5e 100644
- --- a/include/linux/irqflags.h
- +++ b/include/linux/irqflags.h
- @@ -25,8 +25,6 @@
- # define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
- # define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
- # define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
- -# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
- -# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
- # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
- #else
- # define trace_hardirqs_on() do { } while (0)
- @@ -39,9 +37,15 @@
- # define trace_softirqs_enabled(p) 0
- # define trace_hardirq_enter() do { } while (0)
- # define trace_hardirq_exit() do { } while (0)
- +# define INIT_TRACE_IRQFLAGS
- +#endif
- +
- +#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
- +# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
- +# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
- +#else
- # define lockdep_softirq_enter() do { } while (0)
- # define lockdep_softirq_exit() do { } while (0)
- -# define INIT_TRACE_IRQFLAGS
- #endif
-
- #if defined(CONFIG_IRQSOFF_TRACER) || \
- @@ -148,4 +152,23 @@
-
- #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
-
- +/*
- + * local_irq* variants depending on RT/!RT
- + */
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +# define local_irq_disable_nort() do { } while (0)
- +# define local_irq_enable_nort() do { } while (0)
- +# define local_irq_save_nort(flags) local_save_flags(flags)
- +# define local_irq_restore_nort(flags) (void)(flags)
- +# define local_irq_disable_rt() local_irq_disable()
- +# define local_irq_enable_rt() local_irq_enable()
- +#else
- +# define local_irq_disable_nort() local_irq_disable()
- +# define local_irq_enable_nort() local_irq_enable()
- +# define local_irq_save_nort(flags) local_irq_save(flags)
- +# define local_irq_restore_nort(flags) local_irq_restore(flags)
- +# define local_irq_disable_rt() do { } while (0)
- +# define local_irq_enable_rt() do { } while (0)
- +#endif
- +
- #endif
- diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
- index dfaa1f4dcb0c..d57dd06544a1 100644
- --- a/include/linux/jbd2.h
- +++ b/include/linux/jbd2.h
- @@ -347,32 +347,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
-
- static inline void jbd_lock_bh_state(struct buffer_head *bh)
- {
- +#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_lock(BH_State, &bh->b_state);
- +#else
- + spin_lock(&bh->b_state_lock);
- +#endif
- }
-
- static inline int jbd_trylock_bh_state(struct buffer_head *bh)
- {
- +#ifndef CONFIG_PREEMPT_RT_BASE
- return bit_spin_trylock(BH_State, &bh->b_state);
- +#else
- + return spin_trylock(&bh->b_state_lock);
- +#endif
- }
-
- static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
- {
- +#ifndef CONFIG_PREEMPT_RT_BASE
- return bit_spin_is_locked(BH_State, &bh->b_state);
- +#else
- + return spin_is_locked(&bh->b_state_lock);
- +#endif
- }
-
- static inline void jbd_unlock_bh_state(struct buffer_head *bh)
- {
- +#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_unlock(BH_State, &bh->b_state);
- +#else
- + spin_unlock(&bh->b_state_lock);
- +#endif
- }
-
- static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
- {
- +#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_lock(BH_JournalHead, &bh->b_state);
- +#else
- + spin_lock(&bh->b_journal_head_lock);
- +#endif
- }
-
- static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
- {
- +#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_unlock(BH_JournalHead, &bh->b_state);
- +#else
- + spin_unlock(&bh->b_journal_head_lock);
- +#endif
- }
-
- #define J_ASSERT(assert) BUG_ON(!(assert))
- diff --git a/include/linux/kdb.h b/include/linux/kdb.h
- index 410decacff8f..0861bebfc188 100644
- --- a/include/linux/kdb.h
- +++ b/include/linux/kdb.h
- @@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
- extern __printf(1, 2) int kdb_printf(const char *, ...);
- typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
-
- +#define in_kdb_printk() (kdb_trap_printk)
- extern void kdb_init(int level);
-
- /* Access to kdb specific polling devices */
- @@ -201,6 +202,7 @@ extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
- extern int kdb_unregister(char *);
- #else /* ! CONFIG_KGDB_KDB */
- static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
- +#define in_kdb_printk() (0)
- static inline void kdb_init(int level) {}
- static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen) { return 0; }
- diff --git a/include/linux/kernel.h b/include/linux/kernel.h
- index bc6ed52a39b9..7894d55e4998 100644
- --- a/include/linux/kernel.h
- +++ b/include/linux/kernel.h
- @@ -194,6 +194,9 @@ extern int _cond_resched(void);
- */
- # define might_sleep() \
- do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
- +
- +# define might_sleep_no_state_check() \
- + do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
- # define sched_annotate_sleep() (current->task_state_change = 0)
- #else
- static inline void ___might_sleep(const char *file, int line,
- @@ -201,6 +204,7 @@ extern int _cond_resched(void);
- static inline void __might_sleep(const char *file, int line,
- int preempt_offset) { }
- # define might_sleep() do { might_resched(); } while (0)
- +# define might_sleep_no_state_check() do { might_resched(); } while (0)
- # define sched_annotate_sleep() do { } while (0)
- #endif
-
- @@ -488,6 +492,7 @@ extern enum system_states {
- SYSTEM_HALT,
- SYSTEM_POWER_OFF,
- SYSTEM_RESTART,
- + SYSTEM_SUSPEND,
- } system_state;
-
- #define TAINT_PROPRIETARY_MODULE 0
- diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
- index cb483305e1f5..4e5062316bb6 100644
- --- a/include/linux/list_bl.h
- +++ b/include/linux/list_bl.h
- @@ -2,6 +2,7 @@
- #define _LINUX_LIST_BL_H
-
- #include <linux/list.h>
- +#include <linux/spinlock.h>
- #include <linux/bit_spinlock.h>
-
- /*
- @@ -32,13 +33,24 @@
-
- struct hlist_bl_head {
- struct hlist_bl_node *first;
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + raw_spinlock_t lock;
- +#endif
- };
-
- struct hlist_bl_node {
- struct hlist_bl_node *next, **pprev;
- };
- -#define INIT_HLIST_BL_HEAD(ptr) \
- - ((ptr)->first = NULL)
- +
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +#define INIT_HLIST_BL_HEAD(h) \
- +do { \
- + (h)->first = NULL; \
- + raw_spin_lock_init(&(h)->lock); \
- +} while (0)
- +#else
- +#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL
- +#endif
-
- static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
- {
- @@ -118,12 +130,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
-
- static inline void hlist_bl_lock(struct hlist_bl_head *b)
- {
- +#ifndef CONFIG_PREEMPT_RT_BASE
- bit_spin_lock(0, (unsigned long *)b);
- +#else
- + raw_spin_lock(&b->lock);
- +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- + __set_bit(0, (unsigned long *)b);
- +#endif
- +#endif
- }
-
- static inline void hlist_bl_unlock(struct hlist_bl_head *b)
- {
- +#ifndef CONFIG_PREEMPT_RT_BASE
- __bit_spin_unlock(0, (unsigned long *)b);
- +#else
- +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- + __clear_bit(0, (unsigned long *)b);
- +#endif
- + raw_spin_unlock(&b->lock);
- +#endif
- }
-
- static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
- diff --git a/include/linux/locallock.h b/include/linux/locallock.h
- new file mode 100644
- index 000000000000..280f884a05a3
- --- /dev/null
- +++ b/include/linux/locallock.h
- @@ -0,0 +1,287 @@
- +#ifndef _LINUX_LOCALLOCK_H
- +#define _LINUX_LOCALLOCK_H
- +
- +#include <linux/percpu.h>
- +#include <linux/spinlock.h>
- +
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +
- +#ifdef CONFIG_DEBUG_SPINLOCK
- +# define LL_WARN(cond) WARN_ON(cond)
- +#else
- +# define LL_WARN(cond) do { } while (0)
- +#endif
- +
- +/*
- + * per cpu lock based substitute for local_irq_*()
- + */
- +struct local_irq_lock {
- + spinlock_t lock;
- + struct task_struct *owner;
- + int nestcnt;
- + unsigned long flags;
- +};
- +
- +#define DEFINE_LOCAL_IRQ_LOCK(lvar) \
- + DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
- + .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
- +
- +#define DECLARE_LOCAL_IRQ_LOCK(lvar) \
- + DECLARE_PER_CPU(struct local_irq_lock, lvar)
- +
- +#define local_irq_lock_init(lvar) \
- + do { \
- + int __cpu; \
- + for_each_possible_cpu(__cpu) \
- + spin_lock_init(&per_cpu(lvar, __cpu).lock); \
- + } while (0)
- +
- +/*
- + * spin_lock|trylock|unlock_local flavour that does not migrate disable
- + * used for __local_lock|trylock|unlock where get_local_var/put_local_var
- + * already takes care of the migrate_disable/enable
- + * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
- + */
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +# define spin_lock_local(lock) rt_spin_lock__no_mg(lock)
- +# define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock)
- +# define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock)
- +#else
- +# define spin_lock_local(lock) spin_lock(lock)
- +# define spin_trylock_local(lock) spin_trylock(lock)
- +# define spin_unlock_local(lock) spin_unlock(lock)
- +#endif
- +
- +static inline void __local_lock(struct local_irq_lock *lv)
- +{
- + if (lv->owner != current) {
- + spin_lock_local(&lv->lock);
- + LL_WARN(lv->owner);
- + LL_WARN(lv->nestcnt);
- + lv->owner = current;
- + }
- + lv->nestcnt++;
- +}
- +
- +#define local_lock(lvar) \
- + do { __local_lock(&get_local_var(lvar)); } while (0)
- +
- +#define local_lock_on(lvar, cpu) \
- + do { __local_lock(&per_cpu(lvar, cpu)); } while (0)
- +
- +static inline int __local_trylock(struct local_irq_lock *lv)
- +{
- + if (lv->owner != current && spin_trylock_local(&lv->lock)) {
- + LL_WARN(lv->owner);
- + LL_WARN(lv->nestcnt);
- + lv->owner = current;
- + lv->nestcnt = 1;
- + return 1;
- + } else if (lv->owner == current) {
- + lv->nestcnt++;
- + return 1;
- + }
- + return 0;
- +}
- +
- +#define local_trylock(lvar) \
- + ({ \
- + int __locked; \
- + __locked = __local_trylock(&get_local_var(lvar)); \
- + if (!__locked) \
- + put_local_var(lvar); \
- + __locked; \
- + })
- +
- +static inline void __local_unlock(struct local_irq_lock *lv)
- +{
- + LL_WARN(lv->nestcnt == 0);
- + LL_WARN(lv->owner != current);
- + if (--lv->nestcnt)
- + return;
- +
- + lv->owner = NULL;
- + spin_unlock_local(&lv->lock);
- +}
- +
- +#define local_unlock(lvar) \
- + do { \
- + __local_unlock(this_cpu_ptr(&lvar)); \
- + put_local_var(lvar); \
- + } while (0)
- +
- +#define local_unlock_on(lvar, cpu) \
- + do { __local_unlock(&per_cpu(lvar, cpu)); } while (0)
- +
- +static inline void __local_lock_irq(struct local_irq_lock *lv)
- +{
- + spin_lock_irqsave(&lv->lock, lv->flags);
- + LL_WARN(lv->owner);
- + LL_WARN(lv->nestcnt);
- + lv->owner = current;
- + lv->nestcnt = 1;
- +}
- +
- +#define local_lock_irq(lvar) \
- + do { __local_lock_irq(&get_local_var(lvar)); } while (0)
- +
- +#define local_lock_irq_on(lvar, cpu) \
- + do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
- +
- +static inline void __local_unlock_irq(struct local_irq_lock *lv)
- +{
- + LL_WARN(!lv->nestcnt);
- + LL_WARN(lv->owner != current);
- + lv->owner = NULL;
- + lv->nestcnt = 0;
- + spin_unlock_irq(&lv->lock);
- +}
- +
- +#define local_unlock_irq(lvar) \
- + do { \
- + __local_unlock_irq(this_cpu_ptr(&lvar)); \
- + put_local_var(lvar); \
- + } while (0)
- +
- +#define local_unlock_irq_on(lvar, cpu) \
- + do { \
- + __local_unlock_irq(&per_cpu(lvar, cpu)); \
- + } while (0)
- +
- +static inline int __local_lock_irqsave(struct local_irq_lock *lv)
- +{
- + if (lv->owner != current) {
- + __local_lock_irq(lv);
- + return 0;
- + } else {
- + lv->nestcnt++;
- + return 1;
- + }
- +}
- +
- +#define local_lock_irqsave(lvar, _flags) \
- + do { \
- + if (__local_lock_irqsave(&get_local_var(lvar))) \
- + put_local_var(lvar); \
- + _flags = __this_cpu_read(lvar.flags); \
- + } while (0)
- +
- +#define local_lock_irqsave_on(lvar, _flags, cpu) \
- + do { \
- + __local_lock_irqsave(&per_cpu(lvar, cpu)); \
- + _flags = per_cpu(lvar, cpu).flags; \
- + } while (0)
- +
- +static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
- + unsigned long flags)
- +{
- + LL_WARN(!lv->nestcnt);
- + LL_WARN(lv->owner != current);
- + if (--lv->nestcnt)
- + return 0;
- +
- + lv->owner = NULL;
- + spin_unlock_irqrestore(&lv->lock, lv->flags);
- + return 1;
- +}
- +
- +#define local_unlock_irqrestore(lvar, flags) \
- + do { \
- + if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \
- + put_local_var(lvar); \
- + } while (0)
- +
- +#define local_unlock_irqrestore_on(lvar, flags, cpu) \
- + do { \
- + __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
- + } while (0)
- +
- +#define local_spin_trylock_irq(lvar, lock) \
- + ({ \
- + int __locked; \
- + local_lock_irq(lvar); \
- + __locked = spin_trylock(lock); \
- + if (!__locked) \
- + local_unlock_irq(lvar); \
- + __locked; \
- + })
- +
- +#define local_spin_lock_irq(lvar, lock) \
- + do { \
- + local_lock_irq(lvar); \
- + spin_lock(lock); \
- + } while (0)
- +
- +#define local_spin_unlock_irq(lvar, lock) \
- + do { \
- + spin_unlock(lock); \
- + local_unlock_irq(lvar); \
- + } while (0)
- +
- +#define local_spin_lock_irqsave(lvar, lock, flags) \
- + do { \
- + local_lock_irqsave(lvar, flags); \
- + spin_lock(lock); \
- + } while (0)
- +
- +#define local_spin_unlock_irqrestore(lvar, lock, flags) \
- + do { \
- + spin_unlock(lock); \
- + local_unlock_irqrestore(lvar, flags); \
- + } while (0)
- +
- +#define get_locked_var(lvar, var) \
- + (*({ \
- + local_lock(lvar); \
- + this_cpu_ptr(&var); \
- + }))
- +
- +#define put_locked_var(lvar, var) local_unlock(lvar);
- +
- +#define local_lock_cpu(lvar) \
- + ({ \
- + local_lock(lvar); \
- + smp_processor_id(); \
- + })
- +
- +#define local_unlock_cpu(lvar) local_unlock(lvar)
- +
- +#else /* PREEMPT_RT_BASE */
- +
- +#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
- +#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
- +
- +static inline void local_irq_lock_init(int lvar) { }
- +
- +#define local_trylock(lvar) \
- + ({ \
- + preempt_disable(); \
- + 1; \
- + })
- +
- +#define local_lock(lvar) preempt_disable()
- +#define local_unlock(lvar) preempt_enable()
- +#define local_lock_irq(lvar) local_irq_disable()
- +#define local_lock_irq_on(lvar, cpu) local_irq_disable()
- +#define local_unlock_irq(lvar) local_irq_enable()
- +#define local_unlock_irq_on(lvar, cpu) local_irq_enable()
- +#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
- +#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
- +
- +#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
- +#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
- +#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
- +#define local_spin_lock_irqsave(lvar, lock, flags) \
- + spin_lock_irqsave(lock, flags)
- +#define local_spin_unlock_irqrestore(lvar, lock, flags) \
- + spin_unlock_irqrestore(lock, flags)
- +
- +#define get_locked_var(lvar, var) get_cpu_var(var)
- +#define put_locked_var(lvar, var) put_cpu_var(var)
- +
- +#define local_lock_cpu(lvar) get_cpu()
- +#define local_unlock_cpu(lvar) put_cpu()
- +
- +#endif
- +
- +#endif
- diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
- index e8471c2ca83a..08bde1a7a987 100644
- --- a/include/linux/mm_types.h
- +++ b/include/linux/mm_types.h
- @@ -11,6 +11,7 @@
- #include <linux/completion.h>
- #include <linux/cpumask.h>
- #include <linux/uprobes.h>
- +#include <linux/rcupdate.h>
- #include <linux/page-flags-layout.h>
- #include <linux/workqueue.h>
- #include <asm/page.h>
- @@ -513,6 +514,9 @@ struct mm_struct {
- bool tlb_flush_batched;
- #endif
- struct uprobes_state uprobes_state;
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + struct rcu_head delayed_drop;
- +#endif
- #ifdef CONFIG_X86_INTEL_MPX
- /* address of the bounds directory */
- void __user *bd_addr;
- diff --git a/include/linux/module.h b/include/linux/module.h
- index 0c3207d26ac0..5944baaa3f28 100644
- --- a/include/linux/module.h
- +++ b/include/linux/module.h
- @@ -496,6 +496,7 @@ static inline int module_is_live(struct module *mod)
- struct module *__module_text_address(unsigned long addr);
- struct module *__module_address(unsigned long addr);
- bool is_module_address(unsigned long addr);
- +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
- bool is_module_percpu_address(unsigned long addr);
- bool is_module_text_address(unsigned long addr);
-
- @@ -663,6 +664,11 @@ static inline bool is_module_percpu_address(unsigned long addr)
- return false;
- }
-
- +static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
- +{
- + return false;
- +}
- +
- static inline bool is_module_text_address(unsigned long addr)
- {
- return false;
- diff --git a/include/linux/mutex.h b/include/linux/mutex.h
- index 2cb7531e7d7a..b3fdfc820216 100644
- --- a/include/linux/mutex.h
- +++ b/include/linux/mutex.h
- @@ -19,6 +19,17 @@
- #include <asm/processor.h>
- #include <linux/osq_lock.h>
-
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
- + , .dep_map = { .name = #lockname }
- +#else
- +# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
- +#endif
- +
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +# include <linux/mutex_rt.h>
- +#else
- +
- /*
- * Simple, straightforward mutexes with strict semantics:
- *
- @@ -99,13 +110,6 @@ do { \
- static inline void mutex_destroy(struct mutex *lock) {}
- #endif
-
- -#ifdef CONFIG_DEBUG_LOCK_ALLOC
- -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
- - , .dep_map = { .name = #lockname }
- -#else
- -# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
- -#endif
- -
- #define __MUTEX_INITIALIZER(lockname) \
- { .count = ATOMIC_INIT(1) \
- , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
- @@ -173,6 +177,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
- extern int mutex_trylock(struct mutex *lock);
- extern void mutex_unlock(struct mutex *lock);
-
- +#endif /* !PREEMPT_RT_FULL */
- +
- extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
-
- #endif /* __LINUX_MUTEX_H */
- diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
- new file mode 100644
- index 000000000000..e0284edec655
- --- /dev/null
- +++ b/include/linux/mutex_rt.h
- @@ -0,0 +1,89 @@
- +#ifndef __LINUX_MUTEX_RT_H
- +#define __LINUX_MUTEX_RT_H
- +
- +#ifndef __LINUX_MUTEX_H
- +#error "Please include mutex.h"
- +#endif
- +
- +#include <linux/rtmutex.h>
- +
- +/* FIXME: Just for __lockfunc */
- +#include <linux/spinlock.h>
- +
- +struct mutex {
- + struct rt_mutex lock;
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- + struct lockdep_map dep_map;
- +#endif
- +};
- +
- +#define __MUTEX_INITIALIZER(mutexname) \
- + { \
- + .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
- + __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
- + }
- +
- +#define DEFINE_MUTEX(mutexname) \
- + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
- +
- +extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
- +extern void __lockfunc _mutex_lock(struct mutex *lock);
- +extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
- +extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
- +extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
- +extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
- +extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
- +extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
- +extern int __lockfunc _mutex_trylock(struct mutex *lock);
- +extern void __lockfunc _mutex_unlock(struct mutex *lock);
- +
- +#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
- +#define mutex_lock(l) _mutex_lock(l)
- +#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
- +#define mutex_lock_killable(l) _mutex_lock_killable(l)
- +#define mutex_trylock(l) _mutex_trylock(l)
- +#define mutex_unlock(l) _mutex_unlock(l)
- +
- +#ifdef CONFIG_DEBUG_MUTEXES
- +#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
- +#else
- +static inline void mutex_destroy(struct mutex *lock) {}
- +#endif
- +
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- +# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
- +# define mutex_lock_interruptible_nested(l, s) \
- + _mutex_lock_interruptible_nested(l, s)
- +# define mutex_lock_killable_nested(l, s) \
- + _mutex_lock_killable_nested(l, s)
- +
- +# define mutex_lock_nest_lock(lock, nest_lock) \
- +do { \
- + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
- + _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
- +} while (0)
- +
- +#else
- +# define mutex_lock_nested(l, s) _mutex_lock(l)
- +# define mutex_lock_interruptible_nested(l, s) \
- + _mutex_lock_interruptible(l)
- +# define mutex_lock_killable_nested(l, s) \
- + _mutex_lock_killable(l)
- +# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
- +#endif
- +
- +# define mutex_init(mutex) \
- +do { \
- + static struct lock_class_key __key; \
- + \
- + rt_mutex_init(&(mutex)->lock); \
- + __mutex_do_init((mutex), #mutex, &__key); \
- +} while (0)
- +
- +# define __mutex_init(mutex, name, key) \
- +do { \
- + rt_mutex_init(&(mutex)->lock); \
- + __mutex_do_init((mutex), name, key); \
- +} while (0)
- +
- +#endif
- diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
- index 47c7f5b8f675..85fc72b8a92b 100644
- --- a/include/linux/netdevice.h
- +++ b/include/linux/netdevice.h
- @@ -396,7 +396,19 @@ typedef enum rx_handler_result rx_handler_result_t;
- typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
-
- void __napi_schedule(struct napi_struct *n);
- +
- +/*
- + * When PREEMPT_RT_FULL is defined, all device interrupt handlers
- + * run as threads, and they can also be preempted (without PREEMPT_RT
- + * interrupt threads can not be preempted). Which means that calling
- + * __napi_schedule_irqoff() from an interrupt handler can be preempted
- + * and can corrupt the napi->poll_list.
- + */
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +#define __napi_schedule_irqoff(n) __napi_schedule(n)
- +#else
- void __napi_schedule_irqoff(struct napi_struct *n);
- +#endif
-
- static inline bool napi_disable_pending(struct napi_struct *n)
- {
- @@ -2464,14 +2476,53 @@ void netdev_freemem(struct net_device *dev);
- void synchronize_net(void);
- int init_dummy_netdev(struct net_device *dev);
-
- -DECLARE_PER_CPU(int, xmit_recursion);
- #define XMIT_RECURSION_LIMIT 10
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +static inline int dev_recursion_level(void)
- +{
- + return current->xmit_recursion;
- +}
- +
- +static inline int xmit_rec_read(void)
- +{
- + return current->xmit_recursion;
- +}
- +
- +static inline void xmit_rec_inc(void)
- +{
- + current->xmit_recursion++;
- +}
- +
- +static inline void xmit_rec_dec(void)
- +{
- + current->xmit_recursion--;
- +}
- +
- +#else
- +
- +DECLARE_PER_CPU(int, xmit_recursion);
-
- static inline int dev_recursion_level(void)
- {
- return this_cpu_read(xmit_recursion);
- }
-
- +static inline int xmit_rec_read(void)
- +{
- + return __this_cpu_read(xmit_recursion);
- +}
- +
- +static inline void xmit_rec_inc(void)
- +{
- + __this_cpu_inc(xmit_recursion);
- +}
- +
- +static inline void xmit_rec_dec(void)
- +{
- + __this_cpu_dec(xmit_recursion);
- +}
- +#endif
- +
- struct net_device *dev_get_by_index(struct net *net, int ifindex);
- struct net_device *__dev_get_by_index(struct net *net, int ifindex);
- struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
- @@ -2856,6 +2907,7 @@ struct softnet_data {
- unsigned int dropped;
- struct sk_buff_head input_pkt_queue;
- struct napi_struct backlog;
- + struct sk_buff_head tofree_queue;
-
- };
-
- diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
- index 2ad1a2b289b5..b4d10155af54 100644
- --- a/include/linux/netfilter/x_tables.h
- +++ b/include/linux/netfilter/x_tables.h
- @@ -4,6 +4,7 @@
-
- #include <linux/netdevice.h>
- #include <linux/static_key.h>
- +#include <linux/locallock.h>
- #include <uapi/linux/netfilter/x_tables.h>
-
- /* Test a struct->invflags and a boolean for inequality */
- @@ -300,6 +301,8 @@ void xt_free_table_info(struct xt_table_info *info);
- */
- DECLARE_PER_CPU(seqcount_t, xt_recseq);
-
- +DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
- +
- /* xt_tee_enabled - true if x_tables needs to handle reentrancy
- *
- * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
- @@ -320,6 +323,9 @@ static inline unsigned int xt_write_recseq_begin(void)
- {
- unsigned int addend;
-
- + /* RT protection */
- + local_lock(xt_write_lock);
- +
- /*
- * Low order bit of sequence is set if we already
- * called xt_write_recseq_begin().
- @@ -350,6 +356,7 @@ static inline void xt_write_recseq_end(unsigned int addend)
- /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
- smp_wmb();
- __this_cpu_add(xt_recseq.sequence, addend);
- + local_unlock(xt_write_lock);
- }
-
- /*
- diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
- index 810124b33327..d54ca43d571f 100644
- --- a/include/linux/nfs_fs.h
- +++ b/include/linux/nfs_fs.h
- @@ -165,7 +165,11 @@ struct nfs_inode {
-
- /* Readers: in-flight sillydelete RPC calls */
- /* Writers: rmdir */
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + struct semaphore rmdir_sem;
- +#else
- struct rw_semaphore rmdir_sem;
- +#endif
-
- #if IS_ENABLED(CONFIG_NFS_V4)
- struct nfs4_cached_acl *nfs4_acl;
- diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
- index 3bf867a0c3b3..71c6bdd14c8a 100644
- --- a/include/linux/nfs_xdr.h
- +++ b/include/linux/nfs_xdr.h
- @@ -1490,7 +1490,7 @@ struct nfs_unlinkdata {
- struct nfs_removeargs args;
- struct nfs_removeres res;
- struct dentry *dentry;
- - wait_queue_head_t wq;
- + struct swait_queue_head wq;
- struct rpc_cred *cred;
- struct nfs_fattr dir_attr;
- long timeout;
- diff --git a/include/linux/notifier.h b/include/linux/notifier.h
- index 4149868de4e6..babe5b9bcb91 100644
- --- a/include/linux/notifier.h
- +++ b/include/linux/notifier.h
- @@ -6,7 +6,7 @@
- *
- * Alan Cox <Alan.Cox@linux.org>
- */
- -
- +
- #ifndef _LINUX_NOTIFIER_H
- #define _LINUX_NOTIFIER_H
- #include <linux/errno.h>
- @@ -42,9 +42,7 @@
- * in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
- * As compensation, srcu_notifier_chain_unregister() is rather expensive.
- * SRCU notifier chains should be used when the chain will be called very
- - * often but notifier_blocks will seldom be removed. Also, SRCU notifier
- - * chains are slightly more difficult to use because they require special
- - * runtime initialization.
- + * often but notifier_blocks will seldom be removed.
- */
-
- struct notifier_block;
- @@ -90,7 +88,7 @@ struct srcu_notifier_head {
- (name)->head = NULL; \
- } while (0)
-
- -/* srcu_notifier_heads must be initialized and cleaned up dynamically */
- +/* srcu_notifier_heads must be cleaned up dynamically */
- extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
- #define srcu_cleanup_notifier_head(name) \
- cleanup_srcu_struct(&(name)->srcu);
- @@ -103,7 +101,13 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
- .head = NULL }
- #define RAW_NOTIFIER_INIT(name) { \
- .head = NULL }
- -/* srcu_notifier_heads cannot be initialized statically */
- +
- +#define SRCU_NOTIFIER_INIT(name, pcpu) \
- + { \
- + .mutex = __MUTEX_INITIALIZER(name.mutex), \
- + .head = NULL, \
- + .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
- + }
-
- #define ATOMIC_NOTIFIER_HEAD(name) \
- struct atomic_notifier_head name = \
- @@ -115,6 +119,18 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
- struct raw_notifier_head name = \
- RAW_NOTIFIER_INIT(name)
-
- +#define _SRCU_NOTIFIER_HEAD(name, mod) \
- + static DEFINE_PER_CPU(struct srcu_struct_array, \
- + name##_head_srcu_array); \
- + mod struct srcu_notifier_head name = \
- + SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
- +
- +#define SRCU_NOTIFIER_HEAD(name) \
- + _SRCU_NOTIFIER_HEAD(name, )
- +
- +#define SRCU_NOTIFIER_HEAD_STATIC(name) \
- + _SRCU_NOTIFIER_HEAD(name, static)
- +
- #ifdef __KERNEL__
-
- extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
- @@ -184,12 +200,12 @@ static inline int notifier_to_errno(int ret)
-
- /*
- * Declared notifiers so far. I can imagine quite a few more chains
- - * over time (eg laptop power reset chains, reboot chain (to clean
- + * over time (eg laptop power reset chains, reboot chain (to clean
- * device units up), device [un]mount chain, module load/unload chain,
- - * low memory chain, screenblank chain (for plug in modular screenblankers)
- + * low memory chain, screenblank chain (for plug in modular screenblankers)
- * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
- */
- -
- +
- /* CPU notfiers are defined in include/linux/cpu.h. */
-
- /* netdevice notifiers are defined in include/linux/netdevice.h */
- diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
- index 5b2e6159b744..ea940f451606 100644
- --- a/include/linux/percpu-rwsem.h
- +++ b/include/linux/percpu-rwsem.h
- @@ -4,7 +4,7 @@
- #include <linux/atomic.h>
- #include <linux/rwsem.h>
- #include <linux/percpu.h>
- -#include <linux/wait.h>
- +#include <linux/swait.h>
- #include <linux/rcu_sync.h>
- #include <linux/lockdep.h>
-
- @@ -12,7 +12,7 @@ struct percpu_rw_semaphore {
- struct rcu_sync rss;
- unsigned int __percpu *read_count;
- struct rw_semaphore rw_sem;
- - wait_queue_head_t writer;
- + struct swait_queue_head writer;
- int readers_block;
- };
-
- @@ -22,13 +22,13 @@ static struct percpu_rw_semaphore name = { \
- .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \
- .read_count = &__percpu_rwsem_rc_##name, \
- .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
- - .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \
- + .writer = __SWAIT_QUEUE_HEAD_INITIALIZER(name.writer), \
- }
-
- extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
- extern void __percpu_up_read(struct percpu_rw_semaphore *);
-
- -static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
- +static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
- {
- might_sleep();
-
- @@ -46,16 +46,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
- __this_cpu_inc(*sem->read_count);
- if (unlikely(!rcu_sync_is_idle(&sem->rss)))
- __percpu_down_read(sem, false); /* Unconditional memory barrier */
- - barrier();
- /*
- - * The barrier() prevents the compiler from
- + * The preempt_enable() prevents the compiler from
- * bleeding the critical section out.
- */
- -}
- -
- -static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
- -{
- - percpu_down_read_preempt_disable(sem);
- preempt_enable();
- }
-
- @@ -82,13 +76,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
- return ret;
- }
-
- -static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
- +static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
- {
- - /*
- - * The barrier() prevents the compiler from
- - * bleeding the critical section out.
- - */
- - barrier();
- + preempt_disable();
- /*
- * Same as in percpu_down_read().
- */
- @@ -101,12 +91,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem
- rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
- }
-
- -static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
- -{
- - preempt_disable();
- - percpu_up_read_preempt_enable(sem);
- -}
- -
- extern void percpu_down_write(struct percpu_rw_semaphore *);
- extern void percpu_up_write(struct percpu_rw_semaphore *);
-
- diff --git a/include/linux/percpu.h b/include/linux/percpu.h
- index 56939d3f6e53..b988bf40ad3e 100644
- --- a/include/linux/percpu.h
- +++ b/include/linux/percpu.h
- @@ -18,6 +18,35 @@
- #define PERCPU_MODULE_RESERVE 0
- #endif
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +
- +#define get_local_var(var) (*({ \
- + migrate_disable(); \
- + this_cpu_ptr(&var); }))
- +
- +#define put_local_var(var) do { \
- + (void)&(var); \
- + migrate_enable(); \
- +} while (0)
- +
- +# define get_local_ptr(var) ({ \
- + migrate_disable(); \
- + this_cpu_ptr(var); })
- +
- +# define put_local_ptr(var) do { \
- + (void)(var); \
- + migrate_enable(); \
- +} while (0)
- +
- +#else
- +
- +#define get_local_var(var) get_cpu_var(var)
- +#define put_local_var(var) put_cpu_var(var)
- +#define get_local_ptr(var) get_cpu_ptr(var)
- +#define put_local_ptr(var) put_cpu_ptr(var)
- +
- +#endif
- +
- /* minimum unit size, also is the maximum supported allocation size */
- #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
-
- @@ -110,6 +139,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
- #endif
-
- extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
- +extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
- extern bool is_kernel_percpu_address(unsigned long addr);
-
- #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
- diff --git a/include/linux/pid.h b/include/linux/pid.h
- index 97b745ddece5..01a5460a0c85 100644
- --- a/include/linux/pid.h
- +++ b/include/linux/pid.h
- @@ -2,6 +2,7 @@
- #define _LINUX_PID_H
-
- #include <linux/rcupdate.h>
- +#include <linux/atomic.h>
-
- enum pid_type
- {
- diff --git a/include/linux/preempt.h b/include/linux/preempt.h
- index 7eeceac52dea..f97c54265904 100644
- --- a/include/linux/preempt.h
- +++ b/include/linux/preempt.h
- @@ -50,7 +50,11 @@
- #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
- #define NMI_OFFSET (1UL << NMI_SHIFT)
-
- -#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
- +#else
- +# define SOFTIRQ_DISABLE_OFFSET (0)
- +#endif
-
- /* We use the MSB mostly because its available */
- #define PREEMPT_NEED_RESCHED 0x80000000
- @@ -59,9 +63,15 @@
- #include <asm/preempt.h>
-
- #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
- -#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
- #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
- | NMI_MASK))
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
- +# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
- +#else
- +# define softirq_count() (0UL)
- +extern int in_serving_softirq(void);
- +#endif
-
- /*
- * Are we doing bottom half or hardware interrupt processing?
- @@ -79,7 +89,6 @@
- #define in_irq() (hardirq_count())
- #define in_softirq() (softirq_count())
- #define in_interrupt() (irq_count())
- -#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
- #define in_nmi() (preempt_count() & NMI_MASK)
- #define in_task() (!(preempt_count() & \
- (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
- @@ -96,7 +105,11 @@
- /*
- * The preempt_count offset after spin_lock()
- */
- +#if !defined(CONFIG_PREEMPT_RT_FULL)
- #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
- +#else
- +#define PREEMPT_LOCK_OFFSET 0
- +#endif
-
- /*
- * The preempt_count offset needed for things like:
- @@ -145,6 +158,20 @@ extern void preempt_count_sub(int val);
- #define preempt_count_inc() preempt_count_add(1)
- #define preempt_count_dec() preempt_count_sub(1)
-
- +#ifdef CONFIG_PREEMPT_LAZY
- +#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
- +#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
- +#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
- +#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
- +#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
- +#else
- +#define add_preempt_lazy_count(val) do { } while (0)
- +#define sub_preempt_lazy_count(val) do { } while (0)
- +#define inc_preempt_lazy_count() do { } while (0)
- +#define dec_preempt_lazy_count() do { } while (0)
- +#define preempt_lazy_count() (0)
- +#endif
- +
- #ifdef CONFIG_PREEMPT_COUNT
-
- #define preempt_disable() \
- @@ -153,13 +180,25 @@ do { \
- barrier(); \
- } while (0)
-
- +#define preempt_lazy_disable() \
- +do { \
- + inc_preempt_lazy_count(); \
- + barrier(); \
- +} while (0)
- +
- #define sched_preempt_enable_no_resched() \
- do { \
- barrier(); \
- preempt_count_dec(); \
- } while (0)
-
- -#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
- +# define preempt_check_resched_rt() preempt_check_resched()
- +#else
- +# define preempt_enable_no_resched() preempt_enable()
- +# define preempt_check_resched_rt() barrier();
- +#endif
-
- #define preemptible() (preempt_count() == 0 && !irqs_disabled())
-
- @@ -184,6 +223,13 @@ do { \
- __preempt_schedule(); \
- } while (0)
-
- +#define preempt_lazy_enable() \
- +do { \
- + dec_preempt_lazy_count(); \
- + barrier(); \
- + preempt_check_resched(); \
- +} while (0)
- +
- #else /* !CONFIG_PREEMPT */
- #define preempt_enable() \
- do { \
- @@ -229,6 +275,7 @@ do { \
- #define preempt_disable_notrace() barrier()
- #define preempt_enable_no_resched_notrace() barrier()
- #define preempt_enable_notrace() barrier()
- +#define preempt_check_resched_rt() barrier()
- #define preemptible() 0
-
- #endif /* CONFIG_PREEMPT_COUNT */
- @@ -249,10 +296,31 @@ do { \
- } while (0)
- #define preempt_fold_need_resched() \
- do { \
- - if (tif_need_resched()) \
- + if (tif_need_resched_now()) \
- set_preempt_need_resched(); \
- } while (0)
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +# define preempt_disable_rt() preempt_disable()
- +# define preempt_enable_rt() preempt_enable()
- +# define preempt_disable_nort() barrier()
- +# define preempt_enable_nort() barrier()
- +# ifdef CONFIG_SMP
- + extern void migrate_disable(void);
- + extern void migrate_enable(void);
- +# else /* CONFIG_SMP */
- +# define migrate_disable() barrier()
- +# define migrate_enable() barrier()
- +# endif /* CONFIG_SMP */
- +#else
- +# define preempt_disable_rt() barrier()
- +# define preempt_enable_rt() barrier()
- +# define preempt_disable_nort() preempt_disable()
- +# define preempt_enable_nort() preempt_enable()
- +# define migrate_disable() preempt_disable()
- +# define migrate_enable() preempt_enable()
- +#endif
- +
- #ifdef CONFIG_PREEMPT_NOTIFIERS
-
- struct preempt_notifier;
- diff --git a/include/linux/printk.h b/include/linux/printk.h
- index eac1af8502bb..37e647af0b0b 100644
- --- a/include/linux/printk.h
- +++ b/include/linux/printk.h
- @@ -126,9 +126,11 @@ struct va_format {
- #ifdef CONFIG_EARLY_PRINTK
- extern asmlinkage __printf(1, 2)
- void early_printk(const char *fmt, ...);
- +extern void printk_kill(void);
- #else
- static inline __printf(1, 2) __cold
- void early_printk(const char *s, ...) { }
- +static inline void printk_kill(void) { }
- #endif
-
- #ifdef CONFIG_PRINTK_NMI
- diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
- index af3581b8a451..277295039c8f 100644
- --- a/include/linux/radix-tree.h
- +++ b/include/linux/radix-tree.h
- @@ -292,6 +292,8 @@ unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
- int radix_tree_preload(gfp_t gfp_mask);
- int radix_tree_maybe_preload(gfp_t gfp_mask);
- int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
- +void radix_tree_preload_end(void);
- +
- void radix_tree_init(void);
- void *radix_tree_tag_set(struct radix_tree_root *root,
- unsigned long index, unsigned int tag);
- @@ -314,11 +316,6 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
- int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
- unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
-
- -static inline void radix_tree_preload_end(void)
- -{
- - preempt_enable();
- -}
- -
- /**
- * struct radix_tree_iter - radix tree iterator state
- *
- diff --git a/include/linux/random.h b/include/linux/random.h
- index 16ab429735a7..9d0fecb5b6c2 100644
- --- a/include/linux/random.h
- +++ b/include/linux/random.h
- @@ -31,7 +31,7 @@ static inline void add_latent_entropy(void) {}
-
- extern void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value) __latent_entropy;
- -extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
- +extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) __latent_entropy;
-
- extern void get_random_bytes(void *buf, int nbytes);
- extern int add_random_ready_callback(struct random_ready_callback *rdy);
- diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
- index e585018498d5..25c64474fc27 100644
- --- a/include/linux/rbtree.h
- +++ b/include/linux/rbtree.h
- @@ -31,7 +31,7 @@
-
- #include <linux/kernel.h>
- #include <linux/stddef.h>
- -#include <linux/rcupdate.h>
- +#include <linux/rcu_assign_pointer.h>
-
- struct rb_node {
- unsigned long __rb_parent_color;
- diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
- index d076183e49be..36bfb4dd57ae 100644
- --- a/include/linux/rbtree_augmented.h
- +++ b/include/linux/rbtree_augmented.h
- @@ -26,6 +26,7 @@
-
- #include <linux/compiler.h>
- #include <linux/rbtree.h>
- +#include <linux/rcupdate.h>
-
- /*
- * Please note - only struct rb_augment_callbacks and the prototypes for
- diff --git a/include/linux/rcu_assign_pointer.h b/include/linux/rcu_assign_pointer.h
- new file mode 100644
- index 000000000000..7066962a4379
- --- /dev/null
- +++ b/include/linux/rcu_assign_pointer.h
- @@ -0,0 +1,54 @@
- +#ifndef __LINUX_RCU_ASSIGN_POINTER_H__
- +#define __LINUX_RCU_ASSIGN_POINTER_H__
- +#include <linux/compiler.h>
- +#include <asm/barrier.h>
- +
- +/**
- + * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
- + * @v: The value to statically initialize with.
- + */
- +#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
- +
- +/**
- + * rcu_assign_pointer() - assign to RCU-protected pointer
- + * @p: pointer to assign to
- + * @v: value to assign (publish)
- + *
- + * Assigns the specified value to the specified RCU-protected
- + * pointer, ensuring that any concurrent RCU readers will see
- + * any prior initialization.
- + *
- + * Inserts memory barriers on architectures that require them
- + * (which is most of them), and also prevents the compiler from
- + * reordering the code that initializes the structure after the pointer
- + * assignment. More importantly, this call documents which pointers
- + * will be dereferenced by RCU read-side code.
- + *
- + * In some special cases, you may use RCU_INIT_POINTER() instead
- + * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
- + * to the fact that it does not constrain either the CPU or the compiler.
- + * That said, using RCU_INIT_POINTER() when you should have used
- + * rcu_assign_pointer() is a very bad thing that results in
- + * impossible-to-diagnose memory corruption. So please be careful.
- + * See the RCU_INIT_POINTER() comment header for details.
- + *
- + * Note that rcu_assign_pointer() evaluates each of its arguments only
- + * once, appearances notwithstanding. One of the "extra" evaluations
- + * is in typeof() and the other visible only to sparse (__CHECKER__),
- + * neither of which actually execute the argument. As with most cpp
- + * macros, this execute-arguments-only-once property is important, so
- + * please be careful when making changes to rcu_assign_pointer() and the
- + * other macros that it invokes.
- + */
- +#define rcu_assign_pointer(p, v) \
- +({ \
- + uintptr_t _r_a_p__v = (uintptr_t)(v); \
- + \
- + if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
- + WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
- + else \
- + smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
- + _r_a_p__v; \
- +})
- +
- +#endif
- diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
- index 01f71e1d2e94..30cc001d0d5a 100644
- --- a/include/linux/rcupdate.h
- +++ b/include/linux/rcupdate.h
- @@ -46,6 +46,7 @@
- #include <linux/compiler.h>
- #include <linux/ktime.h>
- #include <linux/irqflags.h>
- +#include <linux/rcu_assign_pointer.h>
-
- #include <asm/barrier.h>
-
- @@ -178,6 +179,9 @@ void call_rcu(struct rcu_head *head,
-
- #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +#define call_rcu_bh call_rcu
- +#else
- /**
- * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
- * @head: structure to be used for queueing the RCU updates.
- @@ -201,6 +205,7 @@ void call_rcu(struct rcu_head *head,
- */
- void call_rcu_bh(struct rcu_head *head,
- rcu_callback_t func);
- +#endif
-
- /**
- * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
- @@ -301,6 +306,11 @@ void synchronize_rcu(void);
- * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
- */
- #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +#define sched_rcu_preempt_depth() rcu_preempt_depth()
- +#else
- +static inline int sched_rcu_preempt_depth(void) { return 0; }
- +#endif
-
- #else /* #ifdef CONFIG_PREEMPT_RCU */
-
- @@ -326,6 +336,8 @@ static inline int rcu_preempt_depth(void)
- return 0;
- }
-
- +#define sched_rcu_preempt_depth() rcu_preempt_depth()
- +
- #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
- /* Internal to kernel */
- @@ -505,7 +517,14 @@ extern struct lockdep_map rcu_callback_map;
- int debug_lockdep_rcu_enabled(void);
-
- int rcu_read_lock_held(void);
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +static inline int rcu_read_lock_bh_held(void)
- +{
- + return rcu_read_lock_held();
- +}
- +#else
- int rcu_read_lock_bh_held(void);
- +#endif
-
- /**
- * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
- @@ -625,54 +644,6 @@ static inline void rcu_preempt_sleep_check(void)
- ((typeof(*p) __force __kernel *)(________p1)); \
- })
-
- -/**
- - * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
- - * @v: The value to statically initialize with.
- - */
- -#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
- -
- -/**
- - * rcu_assign_pointer() - assign to RCU-protected pointer
- - * @p: pointer to assign to
- - * @v: value to assign (publish)
- - *
- - * Assigns the specified value to the specified RCU-protected
- - * pointer, ensuring that any concurrent RCU readers will see
- - * any prior initialization.
- - *
- - * Inserts memory barriers on architectures that require them
- - * (which is most of them), and also prevents the compiler from
- - * reordering the code that initializes the structure after the pointer
- - * assignment. More importantly, this call documents which pointers
- - * will be dereferenced by RCU read-side code.
- - *
- - * In some special cases, you may use RCU_INIT_POINTER() instead
- - * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
- - * to the fact that it does not constrain either the CPU or the compiler.
- - * That said, using RCU_INIT_POINTER() when you should have used
- - * rcu_assign_pointer() is a very bad thing that results in
- - * impossible-to-diagnose memory corruption. So please be careful.
- - * See the RCU_INIT_POINTER() comment header for details.
- - *
- - * Note that rcu_assign_pointer() evaluates each of its arguments only
- - * once, appearances notwithstanding. One of the "extra" evaluations
- - * is in typeof() and the other visible only to sparse (__CHECKER__),
- - * neither of which actually execute the argument. As with most cpp
- - * macros, this execute-arguments-only-once property is important, so
- - * please be careful when making changes to rcu_assign_pointer() and the
- - * other macros that it invokes.
- - */
- -#define rcu_assign_pointer(p, v) \
- -({ \
- - uintptr_t _r_a_p__v = (uintptr_t)(v); \
- - \
- - if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
- - WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
- - else \
- - smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
- - _r_a_p__v; \
- -})
- -
- /**
- * rcu_access_pointer() - fetch RCU pointer with no dereferencing
- * @p: The pointer to read
- @@ -951,10 +922,14 @@ static inline void rcu_read_unlock(void)
- static inline void rcu_read_lock_bh(void)
- {
- local_bh_disable();
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + rcu_read_lock();
- +#else
- __acquire(RCU_BH);
- rcu_lock_acquire(&rcu_bh_lock_map);
- RCU_LOCKDEP_WARN(!rcu_is_watching(),
- "rcu_read_lock_bh() used illegally while idle");
- +#endif
- }
-
- /*
- @@ -964,10 +939,14 @@ static inline void rcu_read_lock_bh(void)
- */
- static inline void rcu_read_unlock_bh(void)
- {
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + rcu_read_unlock();
- +#else
- RCU_LOCKDEP_WARN(!rcu_is_watching(),
- "rcu_read_unlock_bh() used illegally while idle");
- rcu_lock_release(&rcu_bh_lock_map);
- __release(RCU_BH);
- +#endif
- local_bh_enable();
- }
-
- diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
- index 63a4e4cf40a5..08ab12df2863 100644
- --- a/include/linux/rcutree.h
- +++ b/include/linux/rcutree.h
- @@ -44,7 +44,11 @@ static inline void rcu_virt_note_context_switch(int cpu)
- rcu_note_context_switch();
- }
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +# define synchronize_rcu_bh synchronize_rcu
- +#else
- void synchronize_rcu_bh(void);
- +#endif
- void synchronize_sched_expedited(void);
- void synchronize_rcu_expedited(void);
-
- @@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_expedited(void)
- }
-
- void rcu_barrier(void);
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +# define rcu_barrier_bh rcu_barrier
- +#else
- void rcu_barrier_bh(void);
- +#endif
- void rcu_barrier_sched(void);
- unsigned long get_state_synchronize_rcu(void);
- void cond_synchronize_rcu(unsigned long oldstate);
- @@ -82,17 +90,14 @@ void cond_synchronize_sched(unsigned long oldstate);
- extern unsigned long rcutorture_testseq;
- extern unsigned long rcutorture_vernum;
- unsigned long rcu_batches_started(void);
- -unsigned long rcu_batches_started_bh(void);
- unsigned long rcu_batches_started_sched(void);
- unsigned long rcu_batches_completed(void);
- -unsigned long rcu_batches_completed_bh(void);
- unsigned long rcu_batches_completed_sched(void);
- unsigned long rcu_exp_batches_completed(void);
- unsigned long rcu_exp_batches_completed_sched(void);
- void show_rcu_gp_kthreads(void);
-
- void rcu_force_quiescent_state(void);
- -void rcu_bh_force_quiescent_state(void);
- void rcu_sched_force_quiescent_state(void);
-
- void rcu_idle_enter(void);
- @@ -109,6 +114,16 @@ extern int rcu_scheduler_active __read_mostly;
-
- bool rcu_is_watching(void);
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +void rcu_bh_force_quiescent_state(void);
- +unsigned long rcu_batches_started_bh(void);
- +unsigned long rcu_batches_completed_bh(void);
- +#else
- +# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
- +# define rcu_batches_completed_bh rcu_batches_completed
- +# define rcu_batches_started_bh rcu_batches_completed
- +#endif
- +
- void rcu_all_qs(void);
-
- /* RCUtree hotplug events */
- diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
- index 1abba5ce2a2f..294a8b4875f1 100644
- --- a/include/linux/rtmutex.h
- +++ b/include/linux/rtmutex.h
- @@ -13,11 +13,15 @@
- #define __LINUX_RT_MUTEX_H
-
- #include <linux/linkage.h>
- +#include <linux/spinlock_types_raw.h>
- #include <linux/rbtree.h>
- -#include <linux/spinlock_types.h>
-
- extern int max_lock_depth; /* for sysctl */
-
- +#ifdef CONFIG_DEBUG_MUTEXES
- +#include <linux/debug_locks.h>
- +#endif
- +
- /**
- * The rt_mutex structure
- *
- @@ -31,8 +35,8 @@ struct rt_mutex {
- struct rb_root waiters;
- struct rb_node *waiters_leftmost;
- struct task_struct *owner;
- -#ifdef CONFIG_DEBUG_RT_MUTEXES
- int save_state;
- +#ifdef CONFIG_DEBUG_RT_MUTEXES
- const char *name, *file;
- int line;
- void *magic;
- @@ -55,22 +59,33 @@ struct hrtimer_sleeper;
- # define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
- #endif
-
- +# define rt_mutex_init(mutex) \
- + do { \
- + raw_spin_lock_init(&(mutex)->wait_lock); \
- + __rt_mutex_init(mutex, #mutex); \
- + } while (0)
- +
- #ifdef CONFIG_DEBUG_RT_MUTEXES
- # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
- , .name = #mutexname, .file = __FILE__, .line = __LINE__
- -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
- extern void rt_mutex_debug_task_free(struct task_struct *tsk);
- #else
- # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
- -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
- # define rt_mutex_debug_task_free(t) do { } while (0)
- #endif
-
- -#define __RT_MUTEX_INITIALIZER(mutexname) \
- - { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- +#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
- + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .waiters = RB_ROOT \
- , .owner = NULL \
- - __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
- + __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
- +
- +#define __RT_MUTEX_INITIALIZER(mutexname) \
- + { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
- +
- +#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
- + { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
- + , .save_state = 1 }
-
- #define DEFINE_RT_MUTEX(mutexname) \
- struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
- @@ -90,7 +105,9 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
- extern void rt_mutex_destroy(struct rt_mutex *lock);
-
- extern void rt_mutex_lock(struct rt_mutex *lock);
- +extern int rt_mutex_lock_state(struct rt_mutex *lock, int state);
- extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
- +extern int rt_mutex_lock_killable(struct rt_mutex *lock);
- extern int rt_mutex_timed_lock(struct rt_mutex *lock,
- struct hrtimer_sleeper *timeout);
-
- diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
- new file mode 100644
- index 000000000000..49ed2d45d3be
- --- /dev/null
- +++ b/include/linux/rwlock_rt.h
- @@ -0,0 +1,99 @@
- +#ifndef __LINUX_RWLOCK_RT_H
- +#define __LINUX_RWLOCK_RT_H
- +
- +#ifndef __LINUX_SPINLOCK_H
- +#error Do not include directly. Use spinlock.h
- +#endif
- +
- +#define rwlock_init(rwl) \
- +do { \
- + static struct lock_class_key __key; \
- + \
- + rt_mutex_init(&(rwl)->lock); \
- + __rt_rwlock_init(rwl, #rwl, &__key); \
- +} while (0)
- +
- +extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
- +extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
- +extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
- +extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
- +extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
- +extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
- +extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
- +extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
- +extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
- +extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
- +
- +#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
- +#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
- +
- +#define write_trylock_irqsave(lock, flags) \
- + __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
- +
- +#define read_lock_irqsave(lock, flags) \
- + do { \
- + typecheck(unsigned long, flags); \
- + flags = rt_read_lock_irqsave(lock); \
- + } while (0)
- +
- +#define write_lock_irqsave(lock, flags) \
- + do { \
- + typecheck(unsigned long, flags); \
- + flags = rt_write_lock_irqsave(lock); \
- + } while (0)
- +
- +#define read_lock(lock) rt_read_lock(lock)
- +
- +#define read_lock_bh(lock) \
- + do { \
- + local_bh_disable(); \
- + rt_read_lock(lock); \
- + } while (0)
- +
- +#define read_lock_irq(lock) read_lock(lock)
- +
- +#define write_lock(lock) rt_write_lock(lock)
- +
- +#define write_lock_bh(lock) \
- + do { \
- + local_bh_disable(); \
- + rt_write_lock(lock); \
- + } while (0)
- +
- +#define write_lock_irq(lock) write_lock(lock)
- +
- +#define read_unlock(lock) rt_read_unlock(lock)
- +
- +#define read_unlock_bh(lock) \
- + do { \
- + rt_read_unlock(lock); \
- + local_bh_enable(); \
- + } while (0)
- +
- +#define read_unlock_irq(lock) read_unlock(lock)
- +
- +#define write_unlock(lock) rt_write_unlock(lock)
- +
- +#define write_unlock_bh(lock) \
- + do { \
- + rt_write_unlock(lock); \
- + local_bh_enable(); \
- + } while (0)
- +
- +#define write_unlock_irq(lock) write_unlock(lock)
- +
- +#define read_unlock_irqrestore(lock, flags) \
- + do { \
- + typecheck(unsigned long, flags); \
- + (void) flags; \
- + rt_read_unlock(lock); \
- + } while (0)
- +
- +#define write_unlock_irqrestore(lock, flags) \
- + do { \
- + typecheck(unsigned long, flags); \
- + (void) flags; \
- + rt_write_unlock(lock); \
- + } while (0)
- +
- +#endif
- diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
- index cc0072e93e36..5317cd957292 100644
- --- a/include/linux/rwlock_types.h
- +++ b/include/linux/rwlock_types.h
- @@ -1,6 +1,10 @@
- #ifndef __LINUX_RWLOCK_TYPES_H
- #define __LINUX_RWLOCK_TYPES_H
-
- +#if !defined(__LINUX_SPINLOCK_TYPES_H)
- +# error "Do not include directly, include spinlock_types.h"
- +#endif
- +
- /*
- * include/linux/rwlock_types.h - generic rwlock type definitions
- * and initializers
- diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
- new file mode 100644
- index 000000000000..51b28d775fe1
- --- /dev/null
- +++ b/include/linux/rwlock_types_rt.h
- @@ -0,0 +1,33 @@
- +#ifndef __LINUX_RWLOCK_TYPES_RT_H
- +#define __LINUX_RWLOCK_TYPES_RT_H
- +
- +#ifndef __LINUX_SPINLOCK_TYPES_H
- +#error "Do not include directly. Include spinlock_types.h instead"
- +#endif
- +
- +/*
- + * rwlocks - rtmutex which allows single reader recursion
- + */
- +typedef struct {
- + struct rt_mutex lock;
- + int read_depth;
- + unsigned int break_lock;
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- + struct lockdep_map dep_map;
- +#endif
- +} rwlock_t;
- +
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
- +#else
- +# define RW_DEP_MAP_INIT(lockname)
- +#endif
- +
- +#define __RW_LOCK_UNLOCKED(name) \
- + { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
- + RW_DEP_MAP_INIT(name) }
- +
- +#define DEFINE_RWLOCK(name) \
- + rwlock_t name = __RW_LOCK_UNLOCKED(name)
- +
- +#endif
- diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
- index dd1d14250340..aa2ac1f65c2d 100644
- --- a/include/linux/rwsem.h
- +++ b/include/linux/rwsem.h
- @@ -19,6 +19,10 @@
- #include <linux/osq_lock.h>
- #endif
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +#include <linux/rwsem_rt.h>
- +#else /* PREEMPT_RT_FULL */
- +
- struct rw_semaphore;
-
- #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
- @@ -106,6 +110,13 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
- return !list_empty(&sem->wait_list);
- }
-
- +#endif /* !PREEMPT_RT_FULL */
- +
- +/*
- + * The functions below are the same for all rwsem implementations including
- + * the RT specific variant.
- + */
- +
- /*
- * lock for reading
- */
- diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h
- new file mode 100644
- index 000000000000..2ffbf093ae92
- --- /dev/null
- +++ b/include/linux/rwsem_rt.h
- @@ -0,0 +1,67 @@
- +#ifndef _LINUX_RWSEM_RT_H
- +#define _LINUX_RWSEM_RT_H
- +
- +#ifndef _LINUX_RWSEM_H
- +#error "Include rwsem.h"
- +#endif
- +
- +#include <linux/rtmutex.h>
- +#include <linux/swait.h>
- +
- +#define READER_BIAS (1U << 31)
- +#define WRITER_BIAS (1U << 30)
- +
- +struct rw_semaphore {
- + atomic_t readers;
- + struct rt_mutex rtmutex;
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- + struct lockdep_map dep_map;
- +#endif
- +};
- +
- +#define __RWSEM_INITIALIZER(name) \
- +{ \
- + .readers = ATOMIC_INIT(READER_BIAS), \
- + .rtmutex = __RT_MUTEX_INITIALIZER(name.rtmutex), \
- + RW_DEP_MAP_INIT(name) \
- +}
- +
- +#define DECLARE_RWSEM(lockname) \
- + struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
- +
- +extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name,
- + struct lock_class_key *key);
- +
- +#define __init_rwsem(sem, name, key) \
- +do { \
- + rt_mutex_init(&(sem)->rtmutex); \
- + __rwsem_init((sem), (name), (key)); \
- +} while (0)
- +
- +#define init_rwsem(sem) \
- +do { \
- + static struct lock_class_key __key; \
- + \
- + __init_rwsem((sem), #sem, &__key); \
- +} while (0)
- +
- +static inline int rwsem_is_locked(struct rw_semaphore *sem)
- +{
- + return atomic_read(&sem->readers) != READER_BIAS;
- +}
- +
- +static inline int rwsem_is_contended(struct rw_semaphore *sem)
- +{
- + return atomic_read(&sem->readers) > 0;
- +}
- +
- +extern void __down_read(struct rw_semaphore *sem);
- +extern int __down_read_trylock(struct rw_semaphore *sem);
- +extern void __down_write(struct rw_semaphore *sem);
- +extern int __must_check __down_write_killable(struct rw_semaphore *sem);
- +extern int __down_write_trylock(struct rw_semaphore *sem);
- +extern void __up_read(struct rw_semaphore *sem);
- +extern void __up_write(struct rw_semaphore *sem);
- +extern void __downgrade_write(struct rw_semaphore *sem);
- +
- +#endif
- diff --git a/include/linux/sched.h b/include/linux/sched.h
- index a4d0afc009a7..e775696b480a 100644
- --- a/include/linux/sched.h
- +++ b/include/linux/sched.h
- @@ -26,6 +26,7 @@ struct sched_param {
- #include <linux/nodemask.h>
- #include <linux/mm_types.h>
- #include <linux/preempt.h>
- +#include <asm/kmap_types.h>
-
- #include <asm/page.h>
- #include <asm/ptrace.h>
- @@ -236,17 +237,13 @@ extern char ___assert_task_state[1 - 2*!!(
-
- /* Convenience macros for the sake of wake_up */
- #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
- -#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
-
- /* get_task_state() */
- #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
- TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
- __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
-
- -#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
- #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
- -#define task_is_stopped_or_traced(task) \
- - ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
- #define task_contributes_to_load(task) \
- ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
- (task->flags & PF_FROZEN) == 0 && \
- @@ -312,6 +309,11 @@ extern char ___assert_task_state[1 - 2*!!(
-
- #endif
-
- +#define __set_current_state_no_track(state_value) \
- + do { current->state = (state_value); } while (0)
- +#define set_current_state_no_track(state_value) \
- + set_mb(current->state, (state_value))
- +
- /* Task command name length */
- #define TASK_COMM_LEN 16
-
- @@ -1022,9 +1024,31 @@ struct wake_q_head {
- #define WAKE_Q(name) \
- struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
-
- -extern void wake_q_add(struct wake_q_head *head,
- - struct task_struct *task);
- -extern void wake_up_q(struct wake_q_head *head);
- +extern void __wake_q_add(struct wake_q_head *head,
- + struct task_struct *task, bool sleeper);
- +static inline void wake_q_add(struct wake_q_head *head,
- + struct task_struct *task)
- +{
- + __wake_q_add(head, task, false);
- +}
- +
- +static inline void wake_q_add_sleeper(struct wake_q_head *head,
- + struct task_struct *task)
- +{
- + __wake_q_add(head, task, true);
- +}
- +
- +extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
- +
- +static inline void wake_up_q(struct wake_q_head *head)
- +{
- + __wake_up_q(head, false);
- +}
- +
- +static inline void wake_up_q_sleeper(struct wake_q_head *head)
- +{
- + __wake_up_q(head, true);
- +}
-
- /*
- * sched-domains (multiprocessor balancing) declarations:
- @@ -1491,6 +1515,7 @@ struct task_struct {
- struct thread_info thread_info;
- #endif
- volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
- + volatile long saved_state; /* saved state for "spinlock sleepers" */
- void *stack;
- atomic_t usage;
- unsigned int flags; /* per process flags, defined below */
- @@ -1530,6 +1555,13 @@ struct task_struct {
- #endif
-
- unsigned int policy;
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + int migrate_disable;
- + int migrate_disable_update;
- +# ifdef CONFIG_SCHED_DEBUG
- + int migrate_disable_atomic;
- +# endif
- +#endif
- int nr_cpus_allowed;
- cpumask_t cpus_allowed;
-
- @@ -1668,6 +1700,9 @@ struct task_struct {
-
- struct task_cputime cputime_expires;
- struct list_head cpu_timers[3];
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + struct task_struct *posix_timer_list;
- +#endif
-
- /* process credentials */
- const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
- @@ -1699,10 +1734,15 @@ struct task_struct {
- /* signal handlers */
- struct signal_struct *signal;
- struct sighand_struct *sighand;
- + struct sigqueue *sigqueue_cache;
-
- sigset_t blocked, real_blocked;
- sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
- struct sigpending pending;
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + /* TODO: move me into ->restart_block ? */
- + struct siginfo forced_info;
- +#endif
-
- unsigned long sas_ss_sp;
- size_t sas_ss_size;
- @@ -1728,11 +1768,14 @@ struct task_struct {
- raw_spinlock_t pi_lock;
-
- struct wake_q_node wake_q;
- + struct wake_q_node wake_q_sleeper;
-
- #ifdef CONFIG_RT_MUTEXES
- /* PI waiters blocked on a rt_mutex held by this task */
- struct rb_root pi_waiters;
- struct rb_node *pi_waiters_leftmost;
- + /* Updated under owner's pi_lock and rq lock */
- + struct task_struct *pi_top_task;
- /* Deadlock detection and priority inheritance handling */
- struct rt_mutex_waiter *pi_blocked_on;
- #endif
- @@ -1931,6 +1974,12 @@ struct task_struct {
- /* bitmask and counter of trace recursion */
- unsigned long trace_recursion;
- #endif /* CONFIG_TRACING */
- +#ifdef CONFIG_WAKEUP_LATENCY_HIST
- + u64 preempt_timestamp_hist;
- +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- + long timer_offset;
- +#endif
- +#endif
- #ifdef CONFIG_KCOV
- /* Coverage collection mode enabled for this task (0 if disabled). */
- enum kcov_mode kcov_mode;
- @@ -1956,8 +2005,22 @@ struct task_struct {
- unsigned int sequential_io;
- unsigned int sequential_io_avg;
- #endif
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + struct rcu_head put_rcu;
- + int softirq_nestcnt;
- + unsigned int softirqs_raised;
- +#endif
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
- + int kmap_idx;
- + pte_t kmap_pte[KM_TYPE_NR];
- +# endif
- +#endif
- #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- unsigned long task_state_change;
- +#endif
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + int xmit_recursion;
- #endif
- int pagefault_disabled;
- #ifdef CONFIG_MMU
- @@ -1998,14 +2061,6 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
- }
- #endif
-
- -/* Future-safe accessor for struct task_struct's cpus_allowed. */
- -#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
- -
- -static inline int tsk_nr_cpus_allowed(struct task_struct *p)
- -{
- - return p->nr_cpus_allowed;
- -}
- -
- #define TNF_MIGRATED 0x01
- #define TNF_NO_GROUP 0x02
- #define TNF_SHARED 0x04
- @@ -2225,6 +2280,15 @@ extern struct pid *cad_pid;
- extern void free_task(struct task_struct *tsk);
- #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
-
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +extern void __put_task_struct_cb(struct rcu_head *rhp);
- +
- +static inline void put_task_struct(struct task_struct *t)
- +{
- + if (atomic_dec_and_test(&t->usage))
- + call_rcu(&t->put_rcu, __put_task_struct_cb);
- +}
- +#else
- extern void __put_task_struct(struct task_struct *t);
-
- static inline void put_task_struct(struct task_struct *t)
- @@ -2232,6 +2296,7 @@ static inline void put_task_struct(struct task_struct *t)
- if (atomic_dec_and_test(&t->usage))
- __put_task_struct(t);
- }
- +#endif
-
- struct task_struct *task_rcu_dereference(struct task_struct **ptask);
- struct task_struct *try_get_task_struct(struct task_struct **ptask);
- @@ -2273,6 +2338,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
- /*
- * Per process flags
- */
- +#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
- #define PF_EXITING 0x00000004 /* getting shut down */
- #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
- #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
- @@ -2441,6 +2507,10 @@ extern void do_set_cpus_allowed(struct task_struct *p,
-
- extern int set_cpus_allowed_ptr(struct task_struct *p,
- const struct cpumask *new_mask);
- +int migrate_me(void);
- +void tell_sched_cpu_down_begin(int cpu);
- +void tell_sched_cpu_down_done(int cpu);
- +
- #else
- static inline void do_set_cpus_allowed(struct task_struct *p,
- const struct cpumask *new_mask)
- @@ -2453,6 +2523,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
- return -EINVAL;
- return 0;
- }
- +static inline int migrate_me(void) { return 0; }
- +static inline void tell_sched_cpu_down_begin(int cpu) { }
- +static inline void tell_sched_cpu_down_done(int cpu) { }
- #endif
-
- #ifdef CONFIG_NO_HZ_COMMON
- @@ -2691,6 +2764,7 @@ extern void xtime_update(unsigned long ticks);
-
- extern int wake_up_state(struct task_struct *tsk, unsigned int state);
- extern int wake_up_process(struct task_struct *tsk);
- +extern int wake_up_lock_sleeper(struct task_struct * tsk);
- extern void wake_up_new_task(struct task_struct *tsk);
- #ifdef CONFIG_SMP
- extern void kick_process(struct task_struct *tsk);
- @@ -2899,6 +2973,17 @@ static inline void mmdrop(struct mm_struct *mm)
- __mmdrop(mm);
- }
-
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +extern void __mmdrop_delayed(struct rcu_head *rhp);
- +static inline void mmdrop_delayed(struct mm_struct *mm)
- +{
- + if (atomic_dec_and_test(&mm->mm_count))
- + call_rcu(&mm->delayed_drop, __mmdrop_delayed);
- +}
- +#else
- +# define mmdrop_delayed(mm) mmdrop(mm)
- +#endif
- +
- static inline void mmdrop_async_fn(struct work_struct *work)
- {
- struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
- @@ -3291,6 +3376,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
- return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
- }
-
- +#ifdef CONFIG_PREEMPT_LAZY
- +static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
- +{
- + set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
- +}
- +
- +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
- +{
- + clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
- +}
- +
- +static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
- +{
- + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
- +}
- +
- +static inline int need_resched_lazy(void)
- +{
- + return test_thread_flag(TIF_NEED_RESCHED_LAZY);
- +}
- +
- +static inline int need_resched_now(void)
- +{
- + return test_thread_flag(TIF_NEED_RESCHED);
- +}
- +
- +#else
- +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
- +static inline int need_resched_lazy(void) { return 0; }
- +
- +static inline int need_resched_now(void)
- +{
- + return test_thread_flag(TIF_NEED_RESCHED);
- +}
- +
- +#endif
- +
- static inline int restart_syscall(void)
- {
- set_tsk_thread_flag(current, TIF_SIGPENDING);
- @@ -3322,6 +3444,51 @@ static inline int signal_pending_state(long state, struct task_struct *p)
- return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
- }
-
- +static inline bool __task_is_stopped_or_traced(struct task_struct *task)
- +{
- + if (task->state & (__TASK_STOPPED | __TASK_TRACED))
- + return true;
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
- + return true;
- +#endif
- + return false;
- +}
- +
- +static inline bool task_is_stopped_or_traced(struct task_struct *task)
- +{
- + bool traced_stopped;
- +
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + unsigned long flags;
- +
- + raw_spin_lock_irqsave(&task->pi_lock, flags);
- + traced_stopped = __task_is_stopped_or_traced(task);
- + raw_spin_unlock_irqrestore(&task->pi_lock, flags);
- +#else
- + traced_stopped = __task_is_stopped_or_traced(task);
- +#endif
- + return traced_stopped;
- +}
- +
- +static inline bool task_is_traced(struct task_struct *task)
- +{
- + bool traced = false;
- +
- + if (task->state & __TASK_TRACED)
- + return true;
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + /* in case the task is sleeping on tasklist_lock */
- + raw_spin_lock_irq(&task->pi_lock);
- + if (task->state & __TASK_TRACED)
- + traced = true;
- + else if (task->saved_state & __TASK_TRACED)
- + traced = true;
- + raw_spin_unlock_irq(&task->pi_lock);
- +#endif
- + return traced;
- +}
- +
- /*
- * cond_resched() and cond_resched_lock(): latency reduction via
- * explicit rescheduling in places that are safe. The return
- @@ -3347,12 +3514,16 @@ extern int __cond_resched_lock(spinlock_t *lock);
- __cond_resched_lock(lock); \
- })
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- extern int __cond_resched_softirq(void);
-
- #define cond_resched_softirq() ({ \
- ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
- __cond_resched_softirq(); \
- })
- +#else
- +# define cond_resched_softirq() cond_resched()
- +#endif
-
- static inline void cond_resched_rcu(void)
- {
- @@ -3527,6 +3698,31 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
-
- #endif /* CONFIG_SMP */
-
- +static inline int __migrate_disabled(struct task_struct *p)
- +{
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + return p->migrate_disable;
- +#else
- + return 0;
- +#endif
- +}
- +
- +/* Future-safe accessor for struct task_struct's cpus_allowed. */
- +static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
- +{
- + if (__migrate_disabled(p))
- + return cpumask_of(task_cpu(p));
- +
- + return &p->cpus_allowed;
- +}
- +
- +static inline int tsk_nr_cpus_allowed(struct task_struct *p)
- +{
- + if (__migrate_disabled(p))
- + return 1;
- + return p->nr_cpus_allowed;
- +}
- +
- extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
- extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
-
- diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
- index a30b172df6e1..db3e91f2bc03 100644
- --- a/include/linux/sched/rt.h
- +++ b/include/linux/sched/rt.h
- @@ -16,27 +16,20 @@ static inline int rt_task(struct task_struct *p)
- }
-
- #ifdef CONFIG_RT_MUTEXES
- -extern int rt_mutex_getprio(struct task_struct *p);
- -extern void rt_mutex_setprio(struct task_struct *p, int prio);
- -extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
- -extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
- +/*
- + * Must hold either p->pi_lock or task_rq(p)->lock.
- + */
- +static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
- +{
- + return p->pi_top_task;
- +}
- +extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
- extern void rt_mutex_adjust_pi(struct task_struct *p);
- static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
- {
- return tsk->pi_blocked_on != NULL;
- }
- #else
- -static inline int rt_mutex_getprio(struct task_struct *p)
- -{
- - return p->normal_prio;
- -}
- -
- -static inline int rt_mutex_get_effective_prio(struct task_struct *task,
- - int newprio)
- -{
- - return newprio;
- -}
- -
- static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
- {
- return NULL;
- diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
- index ead97654c4e9..3d7223ffdd3b 100644
- --- a/include/linux/seqlock.h
- +++ b/include/linux/seqlock.h
- @@ -220,20 +220,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
- return __read_seqcount_retry(s, start);
- }
-
- -
- -
- -static inline void raw_write_seqcount_begin(seqcount_t *s)
- +static inline void __raw_write_seqcount_begin(seqcount_t *s)
- {
- s->sequence++;
- smp_wmb();
- }
-
- -static inline void raw_write_seqcount_end(seqcount_t *s)
- +static inline void raw_write_seqcount_begin(seqcount_t *s)
- +{
- + preempt_disable_rt();
- + __raw_write_seqcount_begin(s);
- +}
- +
- +static inline void __raw_write_seqcount_end(seqcount_t *s)
- {
- smp_wmb();
- s->sequence++;
- }
-
- +static inline void raw_write_seqcount_end(seqcount_t *s)
- +{
- + __raw_write_seqcount_end(s);
- + preempt_enable_rt();
- +}
- +
- /**
- * raw_write_seqcount_barrier - do a seq write barrier
- * @s: pointer to seqcount_t
- @@ -428,10 +438,32 @@ typedef struct {
- /*
- * Read side functions for starting and finalizing a read side section.
- */
- +#ifndef CONFIG_PREEMPT_RT_FULL
- static inline unsigned read_seqbegin(const seqlock_t *sl)
- {
- return read_seqcount_begin(&sl->seqcount);
- }
- +#else
- +/*
- + * Starvation safe read side for RT
- + */
- +static inline unsigned read_seqbegin(seqlock_t *sl)
- +{
- + unsigned ret;
- +
- +repeat:
- + ret = ACCESS_ONCE(sl->seqcount.sequence);
- + if (unlikely(ret & 1)) {
- + /*
- + * Take the lock and let the writer proceed (i.e. evtl
- + * boost it), otherwise we could loop here forever.
- + */
- + spin_unlock_wait(&sl->lock);
- + goto repeat;
- + }
- + return ret;
- +}
- +#endif
-
- static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
- {
- @@ -446,36 +478,45 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
- static inline void write_seqlock(seqlock_t *sl)
- {
- spin_lock(&sl->lock);
- - write_seqcount_begin(&sl->seqcount);
- + __raw_write_seqcount_begin(&sl->seqcount);
- +}
- +
- +static inline int try_write_seqlock(seqlock_t *sl)
- +{
- + if (spin_trylock(&sl->lock)) {
- + __raw_write_seqcount_begin(&sl->seqcount);
- + return 1;
- + }
- + return 0;
- }
-
- static inline void write_sequnlock(seqlock_t *sl)
- {
- - write_seqcount_end(&sl->seqcount);
- + __raw_write_seqcount_end(&sl->seqcount);
- spin_unlock(&sl->lock);
- }
-
- static inline void write_seqlock_bh(seqlock_t *sl)
- {
- spin_lock_bh(&sl->lock);
- - write_seqcount_begin(&sl->seqcount);
- + __raw_write_seqcount_begin(&sl->seqcount);
- }
-
- static inline void write_sequnlock_bh(seqlock_t *sl)
- {
- - write_seqcount_end(&sl->seqcount);
- + __raw_write_seqcount_end(&sl->seqcount);
- spin_unlock_bh(&sl->lock);
- }
-
- static inline void write_seqlock_irq(seqlock_t *sl)
- {
- spin_lock_irq(&sl->lock);
- - write_seqcount_begin(&sl->seqcount);
- + __raw_write_seqcount_begin(&sl->seqcount);
- }
-
- static inline void write_sequnlock_irq(seqlock_t *sl)
- {
- - write_seqcount_end(&sl->seqcount);
- + __raw_write_seqcount_end(&sl->seqcount);
- spin_unlock_irq(&sl->lock);
- }
-
- @@ -484,7 +525,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
- unsigned long flags;
-
- spin_lock_irqsave(&sl->lock, flags);
- - write_seqcount_begin(&sl->seqcount);
- + __raw_write_seqcount_begin(&sl->seqcount);
- return flags;
- }
-
- @@ -494,7 +535,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
- static inline void
- write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
- {
- - write_seqcount_end(&sl->seqcount);
- + __raw_write_seqcount_end(&sl->seqcount);
- spin_unlock_irqrestore(&sl->lock, flags);
- }
-
- diff --git a/include/linux/signal.h b/include/linux/signal.h
- index b63f63eaa39c..295540fdfc72 100644
- --- a/include/linux/signal.h
- +++ b/include/linux/signal.h
- @@ -233,6 +233,7 @@ static inline void init_sigpending(struct sigpending *sig)
- }
-
- extern void flush_sigqueue(struct sigpending *queue);
- +extern void flush_task_sigqueue(struct task_struct *tsk);
-
- /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
- static inline int valid_signal(unsigned long sig)
- diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
- index 601dfa849d30..dca387a8fa6b 100644
- --- a/include/linux/skbuff.h
- +++ b/include/linux/skbuff.h
- @@ -284,6 +284,7 @@ struct sk_buff_head {
-
- __u32 qlen;
- spinlock_t lock;
- + raw_spinlock_t raw_lock;
- };
-
- struct sk_buff;
- @@ -1573,6 +1574,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
- __skb_queue_head_init(list);
- }
-
- +static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
- +{
- + raw_spin_lock_init(&list->raw_lock);
- + __skb_queue_head_init(list);
- +}
- +
- static inline void skb_queue_head_init_class(struct sk_buff_head *list,
- struct lock_class_key *class)
- {
- diff --git a/include/linux/smp.h b/include/linux/smp.h
- index 8e0cb7a0f836..891c533724f5 100644
- --- a/include/linux/smp.h
- +++ b/include/linux/smp.h
- @@ -120,6 +120,13 @@ extern unsigned int setup_max_cpus;
- extern void __init setup_nr_cpu_ids(void);
- extern void __init smp_init(void);
-
- +extern int __boot_cpu_id;
- +
- +static inline int get_boot_cpu_id(void)
- +{
- + return __boot_cpu_id;
- +}
- +
- #else /* !SMP */
-
- static inline void smp_send_stop(void) { }
- @@ -158,6 +165,11 @@ static inline void smp_init(void) { up_late_init(); }
- static inline void smp_init(void) { }
- #endif
-
- +static inline int get_boot_cpu_id(void)
- +{
- + return 0;
- +}
- +
- #endif /* !SMP */
-
- /*
- @@ -185,6 +197,9 @@ static inline void smp_init(void) { }
- #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
- #define put_cpu() preempt_enable()
-
- +#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
- +#define put_cpu_light() migrate_enable()
- +
- /*
- * Callback to arch code if there's nosmp or maxcpus=0 on the
- * boot command line:
- diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
- index 47dd0cebd204..b241cc044bd3 100644
- --- a/include/linux/spinlock.h
- +++ b/include/linux/spinlock.h
- @@ -271,7 +271,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
- #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
-
- /* Include rwlock functions */
- -#include <linux/rwlock.h>
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +# include <linux/rwlock_rt.h>
- +#else
- +# include <linux/rwlock.h>
- +#endif
-
- /*
- * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
- @@ -282,6 +286,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
- # include <linux/spinlock_api_up.h>
- #endif
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +# include <linux/spinlock_rt.h>
- +#else /* PREEMPT_RT_FULL */
- +
- /*
- * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
- */
- @@ -416,4 +424,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
- #define atomic_dec_and_lock(atomic, lock) \
- __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
-
- +#endif /* !PREEMPT_RT_FULL */
- +
- #endif /* __LINUX_SPINLOCK_H */
- diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
- index 5344268e6e62..043263f30e81 100644
- --- a/include/linux/spinlock_api_smp.h
- +++ b/include/linux/spinlock_api_smp.h
- @@ -189,6 +189,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
- return 0;
- }
-
- -#include <linux/rwlock_api_smp.h>
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +# include <linux/rwlock_api_smp.h>
- +#endif
-
- #endif /* __LINUX_SPINLOCK_API_SMP_H */
- diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
- new file mode 100644
- index 000000000000..43ca841b913a
- --- /dev/null
- +++ b/include/linux/spinlock_rt.h
- @@ -0,0 +1,162 @@
- +#ifndef __LINUX_SPINLOCK_RT_H
- +#define __LINUX_SPINLOCK_RT_H
- +
- +#ifndef __LINUX_SPINLOCK_H
- +#error Do not include directly. Use spinlock.h
- +#endif
- +
- +#include <linux/bug.h>
- +
- +extern void
- +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
- +
- +#define spin_lock_init(slock) \
- +do { \
- + static struct lock_class_key __key; \
- + \
- + rt_mutex_init(&(slock)->lock); \
- + __rt_spin_lock_init(slock, #slock, &__key); \
- +} while (0)
- +
- +void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock);
- +void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock);
- +int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock);
- +
- +extern void __lockfunc rt_spin_lock(spinlock_t *lock);
- +extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
- +extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
- +extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
- +extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
- +extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
- +extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
- +extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
- +extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
- +
- +/*
- + * lockdep-less calls, for derived types like rwlock:
- + * (for trylock they can use rt_mutex_trylock() directly.
- + */
- +extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
- +extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
- +extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
- +
- +#define spin_lock(lock) rt_spin_lock(lock)
- +
- +#define spin_lock_bh(lock) \
- + do { \
- + local_bh_disable(); \
- + rt_spin_lock(lock); \
- + } while (0)
- +
- +#define spin_lock_irq(lock) spin_lock(lock)
- +
- +#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
- +
- +#define spin_trylock(lock) \
- +({ \
- + int __locked; \
- + __locked = spin_do_trylock(lock); \
- + __locked; \
- +})
- +
- +#ifdef CONFIG_LOCKDEP
- +# define spin_lock_nested(lock, subclass) \
- + do { \
- + rt_spin_lock_nested(lock, subclass); \
- + } while (0)
- +
- +#define spin_lock_bh_nested(lock, subclass) \
- + do { \
- + local_bh_disable(); \
- + rt_spin_lock_nested(lock, subclass); \
- + } while (0)
- +
- +# define spin_lock_irqsave_nested(lock, flags, subclass) \
- + do { \
- + typecheck(unsigned long, flags); \
- + flags = 0; \
- + rt_spin_lock_nested(lock, subclass); \
- + } while (0)
- +#else
- +# define spin_lock_nested(lock, subclass) spin_lock(lock)
- +# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock)
- +
- +# define spin_lock_irqsave_nested(lock, flags, subclass) \
- + do { \
- + typecheck(unsigned long, flags); \
- + flags = 0; \
- + spin_lock(lock); \
- + } while (0)
- +#endif
- +
- +#define spin_lock_irqsave(lock, flags) \
- + do { \
- + typecheck(unsigned long, flags); \
- + flags = 0; \
- + spin_lock(lock); \
- + } while (0)
- +
- +static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
- +{
- + unsigned long flags = 0;
- +#ifdef CONFIG_TRACE_IRQFLAGS
- + flags = rt_spin_lock_trace_flags(lock);
- +#else
- + spin_lock(lock); /* lock_local */
- +#endif
- + return flags;
- +}
- +
- +/* FIXME: we need rt_spin_lock_nest_lock */
- +#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
- +
- +#define spin_unlock(lock) rt_spin_unlock(lock)
- +
- +#define spin_unlock_bh(lock) \
- + do { \
- + rt_spin_unlock(lock); \
- + local_bh_enable(); \
- + } while (0)
- +
- +#define spin_unlock_irq(lock) spin_unlock(lock)
- +
- +#define spin_unlock_irqrestore(lock, flags) \
- + do { \
- + typecheck(unsigned long, flags); \
- + (void) flags; \
- + spin_unlock(lock); \
- + } while (0)
- +
- +#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
- +#define spin_trylock_irq(lock) spin_trylock(lock)
- +
- +#define spin_trylock_irqsave(lock, flags) \
- + rt_spin_trylock_irqsave(lock, &(flags))
- +
- +#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
- +
- +#ifdef CONFIG_GENERIC_LOCKBREAK
- +# define spin_is_contended(lock) ((lock)->break_lock)
- +#else
- +# define spin_is_contended(lock) (((void)(lock), 0))
- +#endif
- +
- +static inline int spin_can_lock(spinlock_t *lock)
- +{
- + return !rt_mutex_is_locked(&lock->lock);
- +}
- +
- +static inline int spin_is_locked(spinlock_t *lock)
- +{
- + return rt_mutex_is_locked(&lock->lock);
- +}
- +
- +static inline void assert_spin_locked(spinlock_t *lock)
- +{
- + BUG_ON(!spin_is_locked(lock));
- +}
- +
- +#define atomic_dec_and_lock(atomic, lock) \
- + atomic_dec_and_spin_lock(atomic, lock)
- +
- +#endif
- diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
- index 73548eb13a5d..10bac715ea96 100644
- --- a/include/linux/spinlock_types.h
- +++ b/include/linux/spinlock_types.h
- @@ -9,80 +9,15 @@
- * Released under the General Public License (GPL).
- */
-
- -#if defined(CONFIG_SMP)
- -# include <asm/spinlock_types.h>
- -#else
- -# include <linux/spinlock_types_up.h>
- -#endif
- -
- -#include <linux/lockdep.h>
- -
- -typedef struct raw_spinlock {
- - arch_spinlock_t raw_lock;
- -#ifdef CONFIG_GENERIC_LOCKBREAK
- - unsigned int break_lock;
- -#endif
- -#ifdef CONFIG_DEBUG_SPINLOCK
- - unsigned int magic, owner_cpu;
- - void *owner;
- -#endif
- -#ifdef CONFIG_DEBUG_LOCK_ALLOC
- - struct lockdep_map dep_map;
- -#endif
- -} raw_spinlock_t;
- -
- -#define SPINLOCK_MAGIC 0xdead4ead
- -
- -#define SPINLOCK_OWNER_INIT ((void *)-1L)
- -
- -#ifdef CONFIG_DEBUG_LOCK_ALLOC
- -# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
- -#else
- -# define SPIN_DEP_MAP_INIT(lockname)
- -#endif
- +#include <linux/spinlock_types_raw.h>
-
- -#ifdef CONFIG_DEBUG_SPINLOCK
- -# define SPIN_DEBUG_INIT(lockname) \
- - .magic = SPINLOCK_MAGIC, \
- - .owner_cpu = -1, \
- - .owner = SPINLOCK_OWNER_INIT,
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +# include <linux/spinlock_types_nort.h>
- +# include <linux/rwlock_types.h>
- #else
- -# define SPIN_DEBUG_INIT(lockname)
- +# include <linux/rtmutex.h>
- +# include <linux/spinlock_types_rt.h>
- +# include <linux/rwlock_types_rt.h>
- #endif
-
- -#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
- - { \
- - .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
- - SPIN_DEBUG_INIT(lockname) \
- - SPIN_DEP_MAP_INIT(lockname) }
- -
- -#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
- - (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
- -
- -#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
- -
- -typedef struct spinlock {
- - union {
- - struct raw_spinlock rlock;
- -
- -#ifdef CONFIG_DEBUG_LOCK_ALLOC
- -# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
- - struct {
- - u8 __padding[LOCK_PADSIZE];
- - struct lockdep_map dep_map;
- - };
- -#endif
- - };
- -} spinlock_t;
- -
- -#define __SPIN_LOCK_INITIALIZER(lockname) \
- - { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
- -
- -#define __SPIN_LOCK_UNLOCKED(lockname) \
- - (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
- -
- -#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
- -
- -#include <linux/rwlock_types.h>
- -
- #endif /* __LINUX_SPINLOCK_TYPES_H */
- diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
- new file mode 100644
- index 000000000000..f1dac1fb1d6a
- --- /dev/null
- +++ b/include/linux/spinlock_types_nort.h
- @@ -0,0 +1,33 @@
- +#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
- +#define __LINUX_SPINLOCK_TYPES_NORT_H
- +
- +#ifndef __LINUX_SPINLOCK_TYPES_H
- +#error "Do not include directly. Include spinlock_types.h instead"
- +#endif
- +
- +/*
- + * The non RT version maps spinlocks to raw_spinlocks
- + */
- +typedef struct spinlock {
- + union {
- + struct raw_spinlock rlock;
- +
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- +# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
- + struct {
- + u8 __padding[LOCK_PADSIZE];
- + struct lockdep_map dep_map;
- + };
- +#endif
- + };
- +} spinlock_t;
- +
- +#define __SPIN_LOCK_INITIALIZER(lockname) \
- + { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
- +
- +#define __SPIN_LOCK_UNLOCKED(lockname) \
- + (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
- +
- +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
- +
- +#endif
- diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
- new file mode 100644
- index 000000000000..edffc4d53fc9
- --- /dev/null
- +++ b/include/linux/spinlock_types_raw.h
- @@ -0,0 +1,56 @@
- +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
- +#define __LINUX_SPINLOCK_TYPES_RAW_H
- +
- +#if defined(CONFIG_SMP)
- +# include <asm/spinlock_types.h>
- +#else
- +# include <linux/spinlock_types_up.h>
- +#endif
- +
- +#include <linux/lockdep.h>
- +
- +typedef struct raw_spinlock {
- + arch_spinlock_t raw_lock;
- +#ifdef CONFIG_GENERIC_LOCKBREAK
- + unsigned int break_lock;
- +#endif
- +#ifdef CONFIG_DEBUG_SPINLOCK
- + unsigned int magic, owner_cpu;
- + void *owner;
- +#endif
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- + struct lockdep_map dep_map;
- +#endif
- +} raw_spinlock_t;
- +
- +#define SPINLOCK_MAGIC 0xdead4ead
- +
- +#define SPINLOCK_OWNER_INIT ((void *)-1L)
- +
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- +# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
- +#else
- +# define SPIN_DEP_MAP_INIT(lockname)
- +#endif
- +
- +#ifdef CONFIG_DEBUG_SPINLOCK
- +# define SPIN_DEBUG_INIT(lockname) \
- + .magic = SPINLOCK_MAGIC, \
- + .owner_cpu = -1, \
- + .owner = SPINLOCK_OWNER_INIT,
- +#else
- +# define SPIN_DEBUG_INIT(lockname)
- +#endif
- +
- +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
- + { \
- + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
- + SPIN_DEBUG_INIT(lockname) \
- + SPIN_DEP_MAP_INIT(lockname) }
- +
- +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
- + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
- +
- +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
- +
- +#endif
- diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
- new file mode 100644
- index 000000000000..3e3d8c5f7a9a
- --- /dev/null
- +++ b/include/linux/spinlock_types_rt.h
- @@ -0,0 +1,48 @@
- +#ifndef __LINUX_SPINLOCK_TYPES_RT_H
- +#define __LINUX_SPINLOCK_TYPES_RT_H
- +
- +#ifndef __LINUX_SPINLOCK_TYPES_H
- +#error "Do not include directly. Include spinlock_types.h instead"
- +#endif
- +
- +#include <linux/cache.h>
- +
- +/*
- + * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
- + */
- +typedef struct spinlock {
- + struct rt_mutex lock;
- + unsigned int break_lock;
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- + struct lockdep_map dep_map;
- +#endif
- +} spinlock_t;
- +
- +#ifdef CONFIG_DEBUG_RT_MUTEXES
- +# define __RT_SPIN_INITIALIZER(name) \
- + { \
- + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
- + .save_state = 1, \
- + .file = __FILE__, \
- + .line = __LINE__ , \
- + }
- +#else
- +# define __RT_SPIN_INITIALIZER(name) \
- + { \
- + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
- + .save_state = 1, \
- + }
- +#endif
- +
- +/*
- +.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
- +*/
- +
- +#define __SPIN_LOCK_UNLOCKED(name) \
- + { .lock = __RT_SPIN_INITIALIZER(name.lock), \
- + SPIN_DEP_MAP_INIT(name) }
- +
- +#define DEFINE_SPINLOCK(name) \
- + spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
- +
- +#endif
- diff --git a/include/linux/srcu.h b/include/linux/srcu.h
- index dc8eb63c6568..e793d3a257da 100644
- --- a/include/linux/srcu.h
- +++ b/include/linux/srcu.h
- @@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct *sp);
-
- void process_srcu(struct work_struct *work);
-
- -#define __SRCU_STRUCT_INIT(name) \
- +#define __SRCU_STRUCT_INIT(name, pcpu_name) \
- { \
- .completed = -300, \
- - .per_cpu_ref = &name##_srcu_array, \
- + .per_cpu_ref = &pcpu_name, \
- .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
- .running = false, \
- .batch_queue = RCU_BATCH_INIT(name.batch_queue), \
- @@ -119,7 +119,7 @@ void process_srcu(struct work_struct *work);
- */
- #define __DEFINE_SRCU(name, is_static) \
- static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
- - is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
- + is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array)
- #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
- #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
-
- diff --git a/include/linux/suspend.h b/include/linux/suspend.h
- index d9718378a8be..e81e6dc7dcb1 100644
- --- a/include/linux/suspend.h
- +++ b/include/linux/suspend.h
- @@ -193,6 +193,12 @@ struct platform_freeze_ops {
- void (*end)(void);
- };
-
- +#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
- +extern bool pm_in_action;
- +#else
- +# define pm_in_action false
- +#endif
- +
- #ifdef CONFIG_SUSPEND
- /**
- * suspend_set_ops - set platform dependent suspend operations
- diff --git a/include/linux/swait.h b/include/linux/swait.h
- index c1f9c62a8a50..83f004a72320 100644
- --- a/include/linux/swait.h
- +++ b/include/linux/swait.h
- @@ -87,6 +87,7 @@ static inline int swait_active(struct swait_queue_head *q)
- extern void swake_up(struct swait_queue_head *q);
- extern void swake_up_all(struct swait_queue_head *q);
- extern void swake_up_locked(struct swait_queue_head *q);
- +extern void swake_up_all_locked(struct swait_queue_head *q);
-
- extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
- extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
- diff --git a/include/linux/swap.h b/include/linux/swap.h
- index 55ff5593c193..52bf5477dc92 100644
- --- a/include/linux/swap.h
- +++ b/include/linux/swap.h
- @@ -11,6 +11,7 @@
- #include <linux/fs.h>
- #include <linux/atomic.h>
- #include <linux/page-flags.h>
- +#include <linux/locallock.h>
- #include <asm/page.h>
-
- struct notifier_block;
- @@ -247,7 +248,8 @@ struct swap_info_struct {
- void *workingset_eviction(struct address_space *mapping, struct page *page);
- bool workingset_refault(void *shadow);
- void workingset_activation(struct page *page);
- -extern struct list_lru workingset_shadow_nodes;
- +extern struct list_lru __workingset_shadow_nodes;
- +DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
-
- static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
- {
- @@ -292,6 +294,7 @@ extern unsigned long nr_free_pagecache_pages(void);
-
-
- /* linux/mm/swap.c */
- +DECLARE_LOCAL_IRQ_LOCK(swapvec_lock);
- extern void lru_cache_add(struct page *);
- extern void lru_cache_add_anon(struct page *page);
- extern void lru_cache_add_file(struct page *page);
- diff --git a/include/linux/swork.h b/include/linux/swork.h
- new file mode 100644
- index 000000000000..f175fa9a6016
- --- /dev/null
- +++ b/include/linux/swork.h
- @@ -0,0 +1,24 @@
- +#ifndef _LINUX_SWORK_H
- +#define _LINUX_SWORK_H
- +
- +#include <linux/list.h>
- +
- +struct swork_event {
- + struct list_head item;
- + unsigned long flags;
- + void (*func)(struct swork_event *);
- +};
- +
- +static inline void INIT_SWORK(struct swork_event *event,
- + void (*func)(struct swork_event *))
- +{
- + event->flags = 0;
- + event->func = func;
- +}
- +
- +bool swork_queue(struct swork_event *sev);
- +
- +int swork_get(void);
- +void swork_put(void);
- +
- +#endif /* _LINUX_SWORK_H */
- diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
- index 2873baf5372a..eb1a108f17ca 100644
- --- a/include/linux/thread_info.h
- +++ b/include/linux/thread_info.h
- @@ -107,7 +107,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
- #define test_thread_flag(flag) \
- test_ti_thread_flag(current_thread_info(), flag)
-
- -#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
- +#ifdef CONFIG_PREEMPT_LAZY
- +#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \
- + test_thread_flag(TIF_NEED_RESCHED_LAZY))
- +#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED))
- +#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY))
- +
- +#else
- +#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
- +#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED)
- +#define tif_need_resched_lazy() 0
- +#endif
-
- #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
- static inline int arch_within_stack_frames(const void * const stack,
- diff --git a/include/linux/timer.h b/include/linux/timer.h
- index ec86e4e55ea3..8e5b680d1275 100644
- --- a/include/linux/timer.h
- +++ b/include/linux/timer.h
- @@ -241,7 +241,7 @@ extern void add_timer(struct timer_list *timer);
-
- extern int try_to_del_timer_sync(struct timer_list *timer);
-
- -#ifdef CONFIG_SMP
- +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
- extern int del_timer_sync(struct timer_list *timer);
- #else
- # define del_timer_sync(t) del_timer(t)
- diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
- index ba57266d9e80..5c36934ec2bc 100644
- --- a/include/linux/trace_events.h
- +++ b/include/linux/trace_events.h
- @@ -56,6 +56,9 @@ struct trace_entry {
- unsigned char flags;
- unsigned char preempt_count;
- int pid;
- + unsigned short migrate_disable;
- + unsigned short padding;
- + unsigned char preempt_lazy_count;
- };
-
- #define TRACE_EVENT_TYPE_MAX \
- diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
- index f30c187ed785..83bf0f798426 100644
- --- a/include/linux/uaccess.h
- +++ b/include/linux/uaccess.h
- @@ -24,6 +24,7 @@ static __always_inline void pagefault_disabled_dec(void)
- */
- static inline void pagefault_disable(void)
- {
- + migrate_disable();
- pagefault_disabled_inc();
- /*
- * make sure to have issued the store before a pagefault
- @@ -40,6 +41,7 @@ static inline void pagefault_enable(void)
- */
- barrier();
- pagefault_disabled_dec();
- + migrate_enable();
- }
-
- /*
- diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
- index 4a29c75b146e..0a294e950df8 100644
- --- a/include/linux/uprobes.h
- +++ b/include/linux/uprobes.h
- @@ -27,6 +27,7 @@
- #include <linux/errno.h>
- #include <linux/rbtree.h>
- #include <linux/types.h>
- +#include <linux/wait.h>
-
- struct vm_area_struct;
- struct mm_struct;
- diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
- index 613771909b6e..e28c5a43229d 100644
- --- a/include/linux/vmstat.h
- +++ b/include/linux/vmstat.h
- @@ -33,7 +33,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
- */
- static inline void __count_vm_event(enum vm_event_item item)
- {
- + preempt_disable_rt();
- raw_cpu_inc(vm_event_states.event[item]);
- + preempt_enable_rt();
- }
-
- static inline void count_vm_event(enum vm_event_item item)
- @@ -43,7 +45,9 @@ static inline void count_vm_event(enum vm_event_item item)
-
- static inline void __count_vm_events(enum vm_event_item item, long delta)
- {
- + preempt_disable_rt();
- raw_cpu_add(vm_event_states.event[item], delta);
- + preempt_enable_rt();
- }
-
- static inline void count_vm_events(enum vm_event_item item, long delta)
- diff --git a/include/linux/wait.h b/include/linux/wait.h
- index 2408e8d5c05c..db50d6609195 100644
- --- a/include/linux/wait.h
- +++ b/include/linux/wait.h
- @@ -8,6 +8,7 @@
- #include <linux/spinlock.h>
- #include <asm/current.h>
- #include <uapi/linux/wait.h>
- +#include <linux/atomic.h>
-
- typedef struct __wait_queue wait_queue_t;
- typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
- diff --git a/include/net/dst.h b/include/net/dst.h
- index ddcff17615da..a1fc787b1a8c 100644
- --- a/include/net/dst.h
- +++ b/include/net/dst.h
- @@ -452,7 +452,7 @@ static inline void dst_confirm(struct dst_entry *dst)
- static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
- struct sk_buff *skb)
- {
- - const struct hh_cache *hh;
- + struct hh_cache *hh;
-
- if (dst->pending_confirm) {
- unsigned long now = jiffies;
- diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
- index 231e121cc7d9..d125222b979d 100644
- --- a/include/net/gen_stats.h
- +++ b/include/net/gen_stats.h
- @@ -5,6 +5,7 @@
- #include <linux/socket.h>
- #include <linux/rtnetlink.h>
- #include <linux/pkt_sched.h>
- +#include <net/net_seq_lock.h>
-
- struct gnet_stats_basic_cpu {
- struct gnet_stats_basic_packed bstats;
- @@ -33,11 +34,11 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
- spinlock_t *lock, struct gnet_dump *d,
- int padattr);
-
- -int gnet_stats_copy_basic(const seqcount_t *running,
- +int gnet_stats_copy_basic(net_seqlock_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
- -void __gnet_stats_copy_basic(const seqcount_t *running,
- +void __gnet_stats_copy_basic(net_seqlock_t *running,
- struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b);
- @@ -55,14 +56,14 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
- spinlock_t *stats_lock,
- - seqcount_t *running, struct nlattr *opt);
- + net_seqlock_t *running, struct nlattr *opt);
- void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_rate_est64 *rate_est);
- int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
- spinlock_t *stats_lock,
- - seqcount_t *running, struct nlattr *opt);
- + net_seqlock_t *running, struct nlattr *opt);
- bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
- const struct gnet_stats_rate_est64 *rate_est);
- #endif
- diff --git a/include/net/neighbour.h b/include/net/neighbour.h
- index 8b683841e574..bf656008f6e7 100644
- --- a/include/net/neighbour.h
- +++ b/include/net/neighbour.h
- @@ -446,7 +446,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
- }
- #endif
-
- -static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
- +static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
- {
- unsigned int seq;
- int hh_len;
- @@ -501,7 +501,7 @@ struct neighbour_cb {
-
- #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
-
- -static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
- +static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
- const struct net_device *dev)
- {
- unsigned int seq;
- diff --git a/include/net/net_seq_lock.h b/include/net/net_seq_lock.h
- new file mode 100644
- index 000000000000..a7034298a82a
- --- /dev/null
- +++ b/include/net/net_seq_lock.h
- @@ -0,0 +1,15 @@
- +#ifndef __NET_NET_SEQ_LOCK_H__
- +#define __NET_NET_SEQ_LOCK_H__
- +
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +# define net_seqlock_t seqlock_t
- +# define net_seq_begin(__r) read_seqbegin(__r)
- +# define net_seq_retry(__r, __s) read_seqretry(__r, __s)
- +
- +#else
- +# define net_seqlock_t seqcount_t
- +# define net_seq_begin(__r) read_seqcount_begin(__r)
- +# define net_seq_retry(__r, __s) read_seqcount_retry(__r, __s)
- +#endif
- +
- +#endif
- diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
- index 7adf4386ac8f..d3fd5c357268 100644
- --- a/include/net/netns/ipv4.h
- +++ b/include/net/netns/ipv4.h
- @@ -69,6 +69,7 @@ struct netns_ipv4 {
-
- int sysctl_icmp_echo_ignore_all;
- int sysctl_icmp_echo_ignore_broadcasts;
- + int sysctl_icmp_echo_sysrq;
- int sysctl_icmp_ignore_bogus_error_responses;
- int sysctl_icmp_ratelimit;
- int sysctl_icmp_ratemask;
- diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
- index f18fc1a0321f..5d2c9b89c168 100644
- --- a/include/net/sch_generic.h
- +++ b/include/net/sch_generic.h
- @@ -10,6 +10,7 @@
- #include <linux/dynamic_queue_limits.h>
- #include <net/gen_stats.h>
- #include <net/rtnetlink.h>
- +#include <net/net_seq_lock.h>
-
- struct Qdisc_ops;
- struct qdisc_walker;
- @@ -86,7 +87,7 @@ struct Qdisc {
- struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
- struct qdisc_skb_head q;
- struct gnet_stats_basic_packed bstats;
- - seqcount_t running;
- + net_seqlock_t running;
- struct gnet_stats_queue qstats;
- unsigned long state;
- struct Qdisc *next_sched;
- @@ -98,13 +99,22 @@ struct Qdisc {
- spinlock_t busylock ____cacheline_aligned_in_smp;
- };
-
- -static inline bool qdisc_is_running(const struct Qdisc *qdisc)
- +static inline bool qdisc_is_running(struct Qdisc *qdisc)
- {
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + return spin_is_locked(&qdisc->running.lock) ? true : false;
- +#else
- return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
- +#endif
- }
-
- static inline bool qdisc_run_begin(struct Qdisc *qdisc)
- {
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + if (try_write_seqlock(&qdisc->running))
- + return true;
- + return false;
- +#else
- if (qdisc_is_running(qdisc))
- return false;
- /* Variant of write_seqcount_begin() telling lockdep a trylock
- @@ -113,11 +123,16 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
- raw_write_seqcount_begin(&qdisc->running);
- seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
- return true;
- +#endif
- }
-
- static inline void qdisc_run_end(struct Qdisc *qdisc)
- {
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + write_sequnlock(&qdisc->running);
- +#else
- write_seqcount_end(&qdisc->running);
- +#endif
- }
-
- static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
- @@ -308,7 +323,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
- return qdisc_lock(root);
- }
-
- -static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
- +static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
- {
- struct Qdisc *root = qdisc_root_sleeping(qdisc);
-
- diff --git a/include/trace/events/hist.h b/include/trace/events/hist.h
- new file mode 100644
- index 000000000000..f7710de1b1f3
- --- /dev/null
- +++ b/include/trace/events/hist.h
- @@ -0,0 +1,73 @@
- +#undef TRACE_SYSTEM
- +#define TRACE_SYSTEM hist
- +
- +#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
- +#define _TRACE_HIST_H
- +
- +#include "latency_hist.h"
- +#include <linux/tracepoint.h>
- +
- +#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
- +#define trace_preemptirqsoff_hist(a, b)
- +#define trace_preemptirqsoff_hist_rcuidle(a, b)
- +#else
- +TRACE_EVENT(preemptirqsoff_hist,
- +
- + TP_PROTO(int reason, int starthist),
- +
- + TP_ARGS(reason, starthist),
- +
- + TP_STRUCT__entry(
- + __field(int, reason)
- + __field(int, starthist)
- + ),
- +
- + TP_fast_assign(
- + __entry->reason = reason;
- + __entry->starthist = starthist;
- + ),
- +
- + TP_printk("reason=%s starthist=%s", getaction(__entry->reason),
- + __entry->starthist ? "start" : "stop")
- +);
- +#endif
- +
- +#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
- +#define trace_hrtimer_interrupt(a, b, c, d)
- +#else
- +TRACE_EVENT(hrtimer_interrupt,
- +
- + TP_PROTO(int cpu, long long offset, struct task_struct *curr,
- + struct task_struct *task),
- +
- + TP_ARGS(cpu, offset, curr, task),
- +
- + TP_STRUCT__entry(
- + __field(int, cpu)
- + __field(long long, offset)
- + __array(char, ccomm, TASK_COMM_LEN)
- + __field(int, cprio)
- + __array(char, tcomm, TASK_COMM_LEN)
- + __field(int, tprio)
- + ),
- +
- + TP_fast_assign(
- + __entry->cpu = cpu;
- + __entry->offset = offset;
- + memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
- + __entry->cprio = curr->prio;
- + memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>",
- + task != NULL ? TASK_COMM_LEN : 7);
- + __entry->tprio = task != NULL ? task->prio : -1;
- + ),
- +
- + TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
- + __entry->cpu, __entry->offset, __entry->ccomm,
- + __entry->cprio, __entry->tcomm, __entry->tprio)
- +);
- +#endif
- +
- +#endif /* _TRACE_HIST_H */
- +
- +/* This part must be outside protection */
- +#include <trace/define_trace.h>
- diff --git a/include/trace/events/latency_hist.h b/include/trace/events/latency_hist.h
- new file mode 100644
- index 000000000000..d3f2fbd560b1
- --- /dev/null
- +++ b/include/trace/events/latency_hist.h
- @@ -0,0 +1,29 @@
- +#ifndef _LATENCY_HIST_H
- +#define _LATENCY_HIST_H
- +
- +enum hist_action {
- + IRQS_ON,
- + PREEMPT_ON,
- + TRACE_STOP,
- + IRQS_OFF,
- + PREEMPT_OFF,
- + TRACE_START,
- +};
- +
- +static char *actions[] = {
- + "IRQS_ON",
- + "PREEMPT_ON",
- + "TRACE_STOP",
- + "IRQS_OFF",
- + "PREEMPT_OFF",
- + "TRACE_START",
- +};
- +
- +static inline char *getaction(int action)
- +{
- + if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0]))
- + return actions[action];
- + return "unknown";
- +}
- +
- +#endif /* _LATENCY_HIST_H */
- diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
- index 9b90c57517a9..516ae88cddf4 100644
- --- a/include/trace/events/sched.h
- +++ b/include/trace/events/sched.h
- @@ -70,7 +70,7 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- - __entry->prio = p->prio;
- + __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
- __entry->success = 1; /* rudiment, kill when possible */
- __entry->target_cpu = task_cpu(p);
- ),
- @@ -147,6 +147,7 @@ TRACE_EVENT(sched_switch,
- memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
- __entry->next_pid = next->pid;
- __entry->next_prio = next->prio;
- + /* XXX SCHED_DEADLINE */
- ),
-
- TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
- @@ -181,7 +182,7 @@ TRACE_EVENT(sched_migrate_task,
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- - __entry->prio = p->prio;
- + __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
- __entry->orig_cpu = task_cpu(p);
- __entry->dest_cpu = dest_cpu;
- ),
- @@ -206,7 +207,7 @@ DECLARE_EVENT_CLASS(sched_process_template,
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- - __entry->prio = p->prio;
- + __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
- ),
-
- TP_printk("comm=%s pid=%d prio=%d",
- @@ -253,7 +254,7 @@ TRACE_EVENT(sched_process_wait,
- TP_fast_assign(
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
- __entry->pid = pid_nr(pid);
- - __entry->prio = current->prio;
- + __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
- ),
-
- TP_printk("comm=%s pid=%d prio=%d",
- @@ -413,9 +414,9 @@ DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
- */
- TRACE_EVENT(sched_pi_setprio,
-
- - TP_PROTO(struct task_struct *tsk, int newprio),
- + TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
-
- - TP_ARGS(tsk, newprio),
- + TP_ARGS(tsk, pi_task),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- @@ -428,7 +429,8 @@ TRACE_EVENT(sched_pi_setprio,
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
- __entry->pid = tsk->pid;
- __entry->oldprio = tsk->prio;
- - __entry->newprio = newprio;
- + __entry->newprio = pi_task ? pi_task->prio : tsk->prio;
- + /* XXX SCHED_DEADLINE bits missing */
- ),
-
- TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
- diff --git a/init/Kconfig b/init/Kconfig
- index 34407f15e6d3..2ce33a32e65d 100644
- --- a/init/Kconfig
- +++ b/init/Kconfig
- @@ -506,7 +506,7 @@ config TINY_RCU
-
- config RCU_EXPERT
- bool "Make expert-level adjustments to RCU configuration"
- - default n
- + default y if PREEMPT_RT_FULL
- help
- This option needs to be enabled if you wish to make
- expert-level adjustments to RCU configuration. By default,
- @@ -623,7 +623,7 @@ config RCU_FANOUT_LEAF
-
- config RCU_FAST_NO_HZ
- bool "Accelerate last non-dyntick-idle CPU's grace periods"
- - depends on NO_HZ_COMMON && SMP && RCU_EXPERT
- + depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL
- default n
- help
- This option permits CPUs to enter dynticks-idle state even if
- @@ -650,7 +650,7 @@ config TREE_RCU_TRACE
- config RCU_BOOST
- bool "Enable RCU priority boosting"
- depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
- - default n
- + default y if PREEMPT_RT_FULL
- help
- This option boosts the priority of preempted RCU readers that
- block the current preemptible RCU grace period for too long.
- @@ -781,19 +781,6 @@ config RCU_NOCB_CPU_ALL
-
- endchoice
-
- -config RCU_EXPEDITE_BOOT
- - bool
- - default n
- - help
- - This option enables expedited grace periods at boot time,
- - as if rcu_expedite_gp() had been invoked early in boot.
- - The corresponding rcu_unexpedite_gp() is invoked from
- - rcu_end_inkernel_boot(), which is intended to be invoked
- - at the end of the kernel-only boot sequence, just before
- - init is exec'ed.
- -
- - Accept the default if unsure.
- -
- endmenu # "RCU Subsystem"
-
- config BUILD_BIN2C
- @@ -1064,6 +1051,7 @@ config CFS_BANDWIDTH
- config RT_GROUP_SCHED
- bool "Group scheduling for SCHED_RR/FIFO"
- depends on CGROUP_SCHED
- + depends on !PREEMPT_RT_FULL
- default n
- help
- This feature lets you explicitly allocate real CPU bandwidth
- @@ -1772,6 +1760,7 @@ choice
-
- config SLAB
- bool "SLAB"
- + depends on !PREEMPT_RT_FULL
- select HAVE_HARDENED_USERCOPY_ALLOCATOR
- help
- The regular slab allocator that is established and known to work
- @@ -1792,6 +1781,7 @@ config SLUB
- config SLOB
- depends on EXPERT
- bool "SLOB (Simple Allocator)"
- + depends on !PREEMPT_RT_FULL
- help
- SLOB replaces the stock allocator with a drastically simpler
- allocator. SLOB is generally more space efficient but
- @@ -1810,7 +1800,7 @@ config SLAB_FREELIST_RANDOM
-
- config SLUB_CPU_PARTIAL
- default y
- - depends on SLUB && SMP
- + depends on SLUB && SMP && !PREEMPT_RT_FULL
- bool "SLUB per cpu partial cache"
- help
- Per cpu partial caches accellerate objects allocation and freeing
- diff --git a/init/Makefile b/init/Makefile
- index c4fb45525d08..821190dfaa75 100644
- --- a/init/Makefile
- +++ b/init/Makefile
- @@ -35,4 +35,4 @@ silent_chk_compile.h = :
- include/generated/compile.h: FORCE
- @$($(quiet)chk_compile.h)
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
- - "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
- + "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
- diff --git a/init/main.c b/init/main.c
- index 99f026565608..48ffaaad8ac9 100644
- --- a/init/main.c
- +++ b/init/main.c
- @@ -508,6 +508,7 @@ asmlinkage __visible void __init start_kernel(void)
- setup_command_line(command_line);
- setup_nr_cpu_ids();
- setup_per_cpu_areas();
- + softirq_early_init();
- boot_cpu_state_init();
- smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
-
- diff --git a/ipc/sem.c b/ipc/sem.c
- index 10b94bc59d4a..b8360eaacc7a 100644
- --- a/ipc/sem.c
- +++ b/ipc/sem.c
- @@ -712,6 +712,13 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
- static void wake_up_sem_queue_prepare(struct list_head *pt,
- struct sem_queue *q, int error)
- {
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + struct task_struct *p = q->sleeper;
- + get_task_struct(p);
- + q->status = error;
- + wake_up_process(p);
- + put_task_struct(p);
- +#else
- if (list_empty(pt)) {
- /*
- * Hold preempt off so that we don't get preempted and have the
- @@ -723,6 +730,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
- q->pid = error;
-
- list_add_tail(&q->list, pt);
- +#endif
- }
-
- /**
- @@ -736,6 +744,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
- */
- static void wake_up_sem_queue_do(struct list_head *pt)
- {
- +#ifndef CONFIG_PREEMPT_RT_BASE
- struct sem_queue *q, *t;
- int did_something;
-
- @@ -748,6 +757,7 @@ static void wake_up_sem_queue_do(struct list_head *pt)
- }
- if (did_something)
- preempt_enable();
- +#endif
- }
-
- static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
- diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
- index ebdb0043203a..b9e6aa7e5aa6 100644
- --- a/kernel/Kconfig.locks
- +++ b/kernel/Kconfig.locks
- @@ -225,11 +225,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW
-
- config MUTEX_SPIN_ON_OWNER
- def_bool y
- - depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
- + depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
-
- config RWSEM_SPIN_ON_OWNER
- def_bool y
- - depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
- + depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
-
- config LOCK_SPIN_ON_OWNER
- def_bool y
- diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
- index 3f9c97419f02..11dbe26a8279 100644
- --- a/kernel/Kconfig.preempt
- +++ b/kernel/Kconfig.preempt
- @@ -1,3 +1,16 @@
- +config PREEMPT
- + bool
- + select PREEMPT_COUNT
- +
- +config PREEMPT_RT_BASE
- + bool
- + select PREEMPT
- +
- +config HAVE_PREEMPT_LAZY
- + bool
- +
- +config PREEMPT_LAZY
- + def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
-
- choice
- prompt "Preemption Model"
- @@ -33,9 +46,9 @@ config PREEMPT_VOLUNTARY
-
- Select this if you are building a kernel for a desktop system.
-
- -config PREEMPT
- +config PREEMPT__LL
- bool "Preemptible Kernel (Low-Latency Desktop)"
- - select PREEMPT_COUNT
- + select PREEMPT
- select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
- help
- This option reduces the latency of the kernel by making
- @@ -52,6 +65,22 @@ config PREEMPT
- embedded system with latency requirements in the milliseconds
- range.
-
- +config PREEMPT_RTB
- + bool "Preemptible Kernel (Basic RT)"
- + select PREEMPT_RT_BASE
- + help
- + This option is basically the same as (Low-Latency Desktop) but
- + enables changes which are preliminary for the full preemptible
- + RT kernel.
- +
- +config PREEMPT_RT_FULL
- + bool "Fully Preemptible Kernel (RT)"
- + depends on IRQ_FORCED_THREADING
- + select PREEMPT_RT_BASE
- + select PREEMPT_RCU
- + help
- + All and everything
- +
- endchoice
-
- config PREEMPT_COUNT
- diff --git a/kernel/cgroup.c b/kernel/cgroup.c
- index 4c233437ee1a..6c3c9f298f22 100644
- --- a/kernel/cgroup.c
- +++ b/kernel/cgroup.c
- @@ -5041,10 +5041,10 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
- queue_work(cgroup_destroy_wq, &css->destroy_work);
- }
-
- -static void css_release_work_fn(struct work_struct *work)
- +static void css_release_work_fn(struct swork_event *sev)
- {
- struct cgroup_subsys_state *css =
- - container_of(work, struct cgroup_subsys_state, destroy_work);
- + container_of(sev, struct cgroup_subsys_state, destroy_swork);
- struct cgroup_subsys *ss = css->ss;
- struct cgroup *cgrp = css->cgroup;
-
- @@ -5087,8 +5087,8 @@ static void css_release(struct percpu_ref *ref)
- struct cgroup_subsys_state *css =
- container_of(ref, struct cgroup_subsys_state, refcnt);
-
- - INIT_WORK(&css->destroy_work, css_release_work_fn);
- - queue_work(cgroup_destroy_wq, &css->destroy_work);
- + INIT_SWORK(&css->destroy_swork, css_release_work_fn);
- + swork_queue(&css->destroy_swork);
- }
-
- static void init_and_link_css(struct cgroup_subsys_state *css,
- @@ -5749,6 +5749,7 @@ static int __init cgroup_wq_init(void)
- */
- cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
- BUG_ON(!cgroup_destroy_wq);
- + BUG_ON(swork_get());
-
- /*
- * Used to destroy pidlists and separate to serve as flush domain.
- diff --git a/kernel/cpu.c b/kernel/cpu.c
- index 802eb3361a0a..c6a4cf8ba645 100644
- --- a/kernel/cpu.c
- +++ b/kernel/cpu.c
- @@ -239,6 +239,289 @@ static struct {
- #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
- #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
-
- +/**
- + * hotplug_pcp - per cpu hotplug descriptor
- + * @unplug: set when pin_current_cpu() needs to sync tasks
- + * @sync_tsk: the task that waits for tasks to finish pinned sections
- + * @refcount: counter of tasks in pinned sections
- + * @grab_lock: set when the tasks entering pinned sections should wait
- + * @synced: notifier for @sync_tsk to tell cpu_down it's finished
- + * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
- + * @mutex_init: zero if the mutex hasn't been initialized yet.
- + *
- + * Although @unplug and @sync_tsk may point to the same task, the @unplug
- + * is used as a flag and still exists after @sync_tsk has exited and
- + * @sync_tsk set to NULL.
- + */
- +struct hotplug_pcp {
- + struct task_struct *unplug;
- + struct task_struct *sync_tsk;
- + int refcount;
- + int grab_lock;
- + struct completion synced;
- + struct completion unplug_wait;
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + /*
- + * Note, on PREEMPT_RT, the hotplug lock must save the state of
- + * the task, otherwise the mutex will cause the task to fail
- + * to sleep when required. (Because it's called from migrate_disable())
- + *
- + * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
- + * state.
- + */
- + spinlock_t lock;
- +#else
- + struct mutex mutex;
- +#endif
- + int mutex_init;
- +};
- +
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +# define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock)
- +# define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock)
- +#else
- +# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
- +# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
- +#endif
- +
- +static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
- +
- +/**
- + * pin_current_cpu - Prevent the current cpu from being unplugged
- + *
- + * Lightweight version of get_online_cpus() to prevent cpu from being
- + * unplugged when code runs in a migration disabled region.
- + *
- + * Must be called with preemption disabled (preempt_count = 1)!
- + */
- +void pin_current_cpu(void)
- +{
- + struct hotplug_pcp *hp;
- + int force = 0;
- +
- +retry:
- + hp = this_cpu_ptr(&hotplug_pcp);
- +
- + if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
- + hp->unplug == current) {
- + hp->refcount++;
- + return;
- + }
- + if (hp->grab_lock) {
- + preempt_enable();
- + hotplug_lock(hp);
- + hotplug_unlock(hp);
- + } else {
- + preempt_enable();
- + /*
- + * Try to push this task off of this CPU.
- + */
- + if (!migrate_me()) {
- + preempt_disable();
- + hp = this_cpu_ptr(&hotplug_pcp);
- + if (!hp->grab_lock) {
- + /*
- + * Just let it continue it's already pinned
- + * or about to sleep.
- + */
- + force = 1;
- + goto retry;
- + }
- + preempt_enable();
- + }
- + }
- + preempt_disable();
- + goto retry;
- +}
- +
- +/**
- + * unpin_current_cpu - Allow unplug of current cpu
- + *
- + * Must be called with preemption or interrupts disabled!
- + */
- +void unpin_current_cpu(void)
- +{
- + struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
- +
- + WARN_ON(hp->refcount <= 0);
- +
- + /* This is safe. sync_unplug_thread is pinned to this cpu */
- + if (!--hp->refcount && hp->unplug && hp->unplug != current)
- + wake_up_process(hp->unplug);
- +}
- +
- +static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
- +{
- + set_current_state(TASK_UNINTERRUPTIBLE);
- + while (hp->refcount) {
- + schedule_preempt_disabled();
- + set_current_state(TASK_UNINTERRUPTIBLE);
- + }
- +}
- +
- +static int sync_unplug_thread(void *data)
- +{
- + struct hotplug_pcp *hp = data;
- +
- + wait_for_completion(&hp->unplug_wait);
- + preempt_disable();
- + hp->unplug = current;
- + wait_for_pinned_cpus(hp);
- +
- + /*
- + * This thread will synchronize the cpu_down() with threads
- + * that have pinned the CPU. When the pinned CPU count reaches
- + * zero, we inform the cpu_down code to continue to the next step.
- + */
- + set_current_state(TASK_UNINTERRUPTIBLE);
- + preempt_enable();
- + complete(&hp->synced);
- +
- + /*
- + * If all succeeds, the next step will need tasks to wait till
- + * the CPU is offline before continuing. To do this, the grab_lock
- + * is set and tasks going into pin_current_cpu() will block on the
- + * mutex. But we still need to wait for those that are already in
- + * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
- + * will kick this thread out.
- + */
- + while (!hp->grab_lock && !kthread_should_stop()) {
- + schedule();
- + set_current_state(TASK_UNINTERRUPTIBLE);
- + }
- +
- + /* Make sure grab_lock is seen before we see a stale completion */
- + smp_mb();
- +
- + /*
- + * Now just before cpu_down() enters stop machine, we need to make
- + * sure all tasks that are in pinned CPU sections are out, and new
- + * tasks will now grab the lock, keeping them from entering pinned
- + * CPU sections.
- + */
- + if (!kthread_should_stop()) {
- + preempt_disable();
- + wait_for_pinned_cpus(hp);
- + preempt_enable();
- + complete(&hp->synced);
- + }
- +
- + set_current_state(TASK_UNINTERRUPTIBLE);
- + while (!kthread_should_stop()) {
- + schedule();
- + set_current_state(TASK_UNINTERRUPTIBLE);
- + }
- + set_current_state(TASK_RUNNING);
- +
- + /*
- + * Force this thread off this CPU as it's going down and
- + * we don't want any more work on this CPU.
- + */
- + current->flags &= ~PF_NO_SETAFFINITY;
- + set_cpus_allowed_ptr(current, cpu_present_mask);
- + migrate_me();
- + return 0;
- +}
- +
- +static void __cpu_unplug_sync(struct hotplug_pcp *hp)
- +{
- + wake_up_process(hp->sync_tsk);
- + wait_for_completion(&hp->synced);
- +}
- +
- +static void __cpu_unplug_wait(unsigned int cpu)
- +{
- + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
- +
- + complete(&hp->unplug_wait);
- + wait_for_completion(&hp->synced);
- +}
- +
- +/*
- + * Start the sync_unplug_thread on the target cpu and wait for it to
- + * complete.
- + */
- +static int cpu_unplug_begin(unsigned int cpu)
- +{
- + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
- + int err;
- +
- + /* Protected by cpu_hotplug.lock */
- + if (!hp->mutex_init) {
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + spin_lock_init(&hp->lock);
- +#else
- + mutex_init(&hp->mutex);
- +#endif
- + hp->mutex_init = 1;
- + }
- +
- + /* Inform the scheduler to migrate tasks off this CPU */
- + tell_sched_cpu_down_begin(cpu);
- +
- + init_completion(&hp->synced);
- + init_completion(&hp->unplug_wait);
- +
- + hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
- + if (IS_ERR(hp->sync_tsk)) {
- + err = PTR_ERR(hp->sync_tsk);
- + hp->sync_tsk = NULL;
- + return err;
- + }
- + kthread_bind(hp->sync_tsk, cpu);
- +
- + /*
- + * Wait for tasks to get out of the pinned sections,
- + * it's still OK if new tasks enter. Some CPU notifiers will
- + * wait for tasks that are going to enter these sections and
- + * we must not have them block.
- + */
- + wake_up_process(hp->sync_tsk);
- + return 0;
- +}
- +
- +static void cpu_unplug_sync(unsigned int cpu)
- +{
- + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
- +
- + init_completion(&hp->synced);
- + /* The completion needs to be initialzied before setting grab_lock */
- + smp_wmb();
- +
- + /* Grab the mutex before setting grab_lock */
- + hotplug_lock(hp);
- + hp->grab_lock = 1;
- +
- + /*
- + * The CPU notifiers have been completed.
- + * Wait for tasks to get out of pinned CPU sections and have new
- + * tasks block until the CPU is completely down.
- + */
- + __cpu_unplug_sync(hp);
- +
- + /* All done with the sync thread */
- + kthread_stop(hp->sync_tsk);
- + hp->sync_tsk = NULL;
- +}
- +
- +static void cpu_unplug_done(unsigned int cpu)
- +{
- + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
- +
- + hp->unplug = NULL;
- + /* Let all tasks know cpu unplug is finished before cleaning up */
- + smp_wmb();
- +
- + if (hp->sync_tsk)
- + kthread_stop(hp->sync_tsk);
- +
- + if (hp->grab_lock) {
- + hotplug_unlock(hp);
- + /* protected by cpu_hotplug.lock */
- + hp->grab_lock = 0;
- + }
- + tell_sched_cpu_down_done(cpu);
- +}
-
- void get_online_cpus(void)
- {
- @@ -802,10 +1085,14 @@ static int takedown_cpu(unsigned int cpu)
- struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
- int err;
-
- + __cpu_unplug_wait(cpu);
- /* Park the smpboot threads */
- kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
- smpboot_park_threads(cpu);
-
- + /* Notifiers are done. Don't let any more tasks pin this CPU. */
- + cpu_unplug_sync(cpu);
- +
- /*
- * Prevent irq alloc/free while the dying cpu reorganizes the
- * interrupt affinities.
- @@ -890,6 +1177,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
- struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
- int prev_state, ret = 0;
- bool hasdied = false;
- + int mycpu;
- + cpumask_var_t cpumask;
- + cpumask_var_t cpumask_org;
-
- if (num_online_cpus() == 1)
- return -EBUSY;
- @@ -897,7 +1187,34 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
- if (!cpu_present(cpu))
- return -EINVAL;
-
- + /* Move the downtaker off the unplug cpu */
- + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
- + return -ENOMEM;
- + if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
- + free_cpumask_var(cpumask);
- + return -ENOMEM;
- + }
- +
- + cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
- + cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
- + set_cpus_allowed_ptr(current, cpumask);
- + free_cpumask_var(cpumask);
- + migrate_disable();
- + mycpu = smp_processor_id();
- + if (mycpu == cpu) {
- + printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
- + migrate_enable();
- + ret = -EBUSY;
- + goto restore_cpus;
- + }
- +
- + migrate_enable();
- cpu_hotplug_begin();
- + ret = cpu_unplug_begin(cpu);
- + if (ret) {
- + printk("cpu_unplug_begin(%d) failed\n", cpu);
- + goto out_cancel;
- + }
-
- cpuhp_tasks_frozen = tasks_frozen;
-
- @@ -936,10 +1253,15 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
-
- hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
- out:
- + cpu_unplug_done(cpu);
- +out_cancel:
- cpu_hotplug_done();
- /* This post dead nonsense must die */
- if (!ret && hasdied)
- cpu_notify_nofail(CPU_POST_DEAD, cpu);
- +restore_cpus:
- + set_cpus_allowed_ptr(current, cpumask_org);
- + free_cpumask_var(cpumask_org);
- return ret;
- }
-
- @@ -1242,6 +1564,8 @@ core_initcall(cpu_hotplug_pm_sync_init);
-
- #endif /* CONFIG_PM_SLEEP_SMP */
-
- +int __boot_cpu_id;
- +
- #endif /* CONFIG_SMP */
-
- /* Boot processor state steps */
- @@ -1926,6 +2250,10 @@ void __init boot_cpu_init(void)
- set_cpu_active(cpu, true);
- set_cpu_present(cpu, true);
- set_cpu_possible(cpu, true);
- +
- +#ifdef CONFIG_SMP
- + __boot_cpu_id = cpu;
- +#endif
- }
-
- /*
- diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
- index 009cc9a17d95..67b02e138a47 100644
- --- a/kernel/cpu_pm.c
- +++ b/kernel/cpu_pm.c
- @@ -22,15 +22,21 @@
- #include <linux/spinlock.h>
- #include <linux/syscore_ops.h>
-
- -static DEFINE_RWLOCK(cpu_pm_notifier_lock);
- -static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
- +static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
-
- static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
- {
- int ret;
-
- - ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
- + /*
- + * __atomic_notifier_call_chain has a RCU read critical section, which
- + * could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let
- + * RCU know this.
- + */
- + rcu_irq_enter_irqson();
- + ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
- nr_to_call, nr_calls);
- + rcu_irq_exit_irqson();
-
- return notifier_to_errno(ret);
- }
- @@ -47,14 +53,7 @@ static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
- */
- int cpu_pm_register_notifier(struct notifier_block *nb)
- {
- - unsigned long flags;
- - int ret;
- -
- - write_lock_irqsave(&cpu_pm_notifier_lock, flags);
- - ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
- - write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
- -
- - return ret;
- + return atomic_notifier_chain_register(&cpu_pm_notifier_chain, nb);
- }
- EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
-
- @@ -69,14 +68,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
- */
- int cpu_pm_unregister_notifier(struct notifier_block *nb)
- {
- - unsigned long flags;
- - int ret;
- -
- - write_lock_irqsave(&cpu_pm_notifier_lock, flags);
- - ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
- - write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
- -
- - return ret;
- + return atomic_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
- }
- EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
-
- @@ -100,7 +92,6 @@ int cpu_pm_enter(void)
- int nr_calls;
- int ret = 0;
-
- - read_lock(&cpu_pm_notifier_lock);
- ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
- if (ret)
- /*
- @@ -108,7 +99,6 @@ int cpu_pm_enter(void)
- * PM entry who are notified earlier to prepare for it.
- */
- cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
- - read_unlock(&cpu_pm_notifier_lock);
-
- return ret;
- }
- @@ -128,13 +118,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter);
- */
- int cpu_pm_exit(void)
- {
- - int ret;
- -
- - read_lock(&cpu_pm_notifier_lock);
- - ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
- - read_unlock(&cpu_pm_notifier_lock);
- -
- - return ret;
- + return cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
- }
- EXPORT_SYMBOL_GPL(cpu_pm_exit);
-
- @@ -159,7 +143,6 @@ int cpu_cluster_pm_enter(void)
- int nr_calls;
- int ret = 0;
-
- - read_lock(&cpu_pm_notifier_lock);
- ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
- if (ret)
- /*
- @@ -167,7 +150,6 @@ int cpu_cluster_pm_enter(void)
- * PM entry who are notified earlier to prepare for it.
- */
- cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
- - read_unlock(&cpu_pm_notifier_lock);
-
- return ret;
- }
- @@ -190,13 +172,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
- */
- int cpu_cluster_pm_exit(void)
- {
- - int ret;
- -
- - read_lock(&cpu_pm_notifier_lock);
- - ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
- - read_unlock(&cpu_pm_notifier_lock);
- -
- - return ret;
- + return cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
- }
- EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
-
- diff --git a/kernel/cpuset.c b/kernel/cpuset.c
- index 511b1dd8ff09..1dd63833ecdc 100644
- --- a/kernel/cpuset.c
- +++ b/kernel/cpuset.c
- @@ -285,7 +285,7 @@ static struct cpuset top_cpuset = {
- */
-
- static DEFINE_MUTEX(cpuset_mutex);
- -static DEFINE_SPINLOCK(callback_lock);
- +static DEFINE_RAW_SPINLOCK(callback_lock);
-
- static struct workqueue_struct *cpuset_migrate_mm_wq;
-
- @@ -908,9 +908,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
- continue;
- rcu_read_unlock();
-
- - spin_lock_irq(&callback_lock);
- + raw_spin_lock_irq(&callback_lock);
- cpumask_copy(cp->effective_cpus, new_cpus);
- - spin_unlock_irq(&callback_lock);
- + raw_spin_unlock_irq(&callback_lock);
-
- WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
- !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
- @@ -975,9 +975,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
- if (retval < 0)
- return retval;
-
- - spin_lock_irq(&callback_lock);
- + raw_spin_lock_irq(&callback_lock);
- cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
- - spin_unlock_irq(&callback_lock);
- + raw_spin_unlock_irq(&callback_lock);
-
- /* use trialcs->cpus_allowed as a temp variable */
- update_cpumasks_hier(cs, trialcs->cpus_allowed);
- @@ -1177,9 +1177,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
- continue;
- rcu_read_unlock();
-
- - spin_lock_irq(&callback_lock);
- + raw_spin_lock_irq(&callback_lock);
- cp->effective_mems = *new_mems;
- - spin_unlock_irq(&callback_lock);
- + raw_spin_unlock_irq(&callback_lock);
-
- WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
- !nodes_equal(cp->mems_allowed, cp->effective_mems));
- @@ -1247,9 +1247,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
- if (retval < 0)
- goto done;
-
- - spin_lock_irq(&callback_lock);
- + raw_spin_lock_irq(&callback_lock);
- cs->mems_allowed = trialcs->mems_allowed;
- - spin_unlock_irq(&callback_lock);
- + raw_spin_unlock_irq(&callback_lock);
-
- /* use trialcs->mems_allowed as a temp variable */
- update_nodemasks_hier(cs, &trialcs->mems_allowed);
- @@ -1340,9 +1340,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
- spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
- || (is_spread_page(cs) != is_spread_page(trialcs)));
-
- - spin_lock_irq(&callback_lock);
- + raw_spin_lock_irq(&callback_lock);
- cs->flags = trialcs->flags;
- - spin_unlock_irq(&callback_lock);
- + raw_spin_unlock_irq(&callback_lock);
-
- if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
- rebuild_sched_domains_locked();
- @@ -1757,7 +1757,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
- cpuset_filetype_t type = seq_cft(sf)->private;
- int ret = 0;
-
- - spin_lock_irq(&callback_lock);
- + raw_spin_lock_irq(&callback_lock);
-
- switch (type) {
- case FILE_CPULIST:
- @@ -1776,7 +1776,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
- ret = -EINVAL;
- }
-
- - spin_unlock_irq(&callback_lock);
- + raw_spin_unlock_irq(&callback_lock);
- return ret;
- }
-
- @@ -1991,12 +1991,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
-
- cpuset_inc();
-
- - spin_lock_irq(&callback_lock);
- + raw_spin_lock_irq(&callback_lock);
- if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
- cpumask_copy(cs->effective_cpus, parent->effective_cpus);
- cs->effective_mems = parent->effective_mems;
- }
- - spin_unlock_irq(&callback_lock);
- + raw_spin_unlock_irq(&callback_lock);
-
- if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
- goto out_unlock;
- @@ -2023,12 +2023,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
- }
- rcu_read_unlock();
-
- - spin_lock_irq(&callback_lock);
- + raw_spin_lock_irq(&callback_lock);
- cs->mems_allowed = parent->mems_allowed;
- cs->effective_mems = parent->mems_allowed;
- cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
- cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
- - spin_unlock_irq(&callback_lock);
- + raw_spin_unlock_irq(&callback_lock);
- out_unlock:
- mutex_unlock(&cpuset_mutex);
- return 0;
- @@ -2067,7 +2067,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
- static void cpuset_bind(struct cgroup_subsys_state *root_css)
- {
- mutex_lock(&cpuset_mutex);
- - spin_lock_irq(&callback_lock);
- + raw_spin_lock_irq(&callback_lock);
-
- if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
- cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
- @@ -2078,7 +2078,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
- top_cpuset.mems_allowed = top_cpuset.effective_mems;
- }
-
- - spin_unlock_irq(&callback_lock);
- + raw_spin_unlock_irq(&callback_lock);
- mutex_unlock(&cpuset_mutex);
- }
-
- @@ -2179,12 +2179,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
- {
- bool is_empty;
-
- - spin_lock_irq(&callback_lock);
- + raw_spin_lock_irq(&callback_lock);
- cpumask_copy(cs->cpus_allowed, new_cpus);
- cpumask_copy(cs->effective_cpus, new_cpus);
- cs->mems_allowed = *new_mems;
- cs->effective_mems = *new_mems;
- - spin_unlock_irq(&callback_lock);
- + raw_spin_unlock_irq(&callback_lock);
-
- /*
- * Don't call update_tasks_cpumask() if the cpuset becomes empty,
- @@ -2221,10 +2221,10 @@ hotplug_update_tasks(struct cpuset *cs,
- if (nodes_empty(*new_mems))
- *new_mems = parent_cs(cs)->effective_mems;
-
- - spin_lock_irq(&callback_lock);
- + raw_spin_lock_irq(&callback_lock);
- cpumask_copy(cs->effective_cpus, new_cpus);
- cs->effective_mems = *new_mems;
- - spin_unlock_irq(&callback_lock);
- + raw_spin_unlock_irq(&callback_lock);
-
- if (cpus_updated)
- update_tasks_cpumask(cs);
- @@ -2317,21 +2317,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
-
- /* synchronize cpus_allowed to cpu_active_mask */
- if (cpus_updated) {
- - spin_lock_irq(&callback_lock);
- + raw_spin_lock_irq(&callback_lock);
- if (!on_dfl)
- cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
- cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
- - spin_unlock_irq(&callback_lock);
- + raw_spin_unlock_irq(&callback_lock);
- /* we don't mess with cpumasks of tasks in top_cpuset */
- }
-
- /* synchronize mems_allowed to N_MEMORY */
- if (mems_updated) {
- - spin_lock_irq(&callback_lock);
- + raw_spin_lock_irq(&callback_lock);
- if (!on_dfl)
- top_cpuset.mems_allowed = new_mems;
- top_cpuset.effective_mems = new_mems;
- - spin_unlock_irq(&callback_lock);
- + raw_spin_unlock_irq(&callback_lock);
- update_tasks_nodemask(&top_cpuset);
- }
-
- @@ -2436,11 +2436,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
- {
- unsigned long flags;
-
- - spin_lock_irqsave(&callback_lock, flags);
- + raw_spin_lock_irqsave(&callback_lock, flags);
- rcu_read_lock();
- guarantee_online_cpus(task_cs(tsk), pmask);
- rcu_read_unlock();
- - spin_unlock_irqrestore(&callback_lock, flags);
- + raw_spin_unlock_irqrestore(&callback_lock, flags);
- }
-
- void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
- @@ -2488,11 +2488,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
- nodemask_t mask;
- unsigned long flags;
-
- - spin_lock_irqsave(&callback_lock, flags);
- + raw_spin_lock_irqsave(&callback_lock, flags);
- rcu_read_lock();
- guarantee_online_mems(task_cs(tsk), &mask);
- rcu_read_unlock();
- - spin_unlock_irqrestore(&callback_lock, flags);
- + raw_spin_unlock_irqrestore(&callback_lock, flags);
-
- return mask;
- }
- @@ -2584,14 +2584,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
- return true;
-
- /* Not hardwall and node outside mems_allowed: scan up cpusets */
- - spin_lock_irqsave(&callback_lock, flags);
- + raw_spin_lock_irqsave(&callback_lock, flags);
-
- rcu_read_lock();
- cs = nearest_hardwall_ancestor(task_cs(current));
- allowed = node_isset(node, cs->mems_allowed);
- rcu_read_unlock();
-
- - spin_unlock_irqrestore(&callback_lock, flags);
- + raw_spin_unlock_irqrestore(&callback_lock, flags);
- return allowed;
- }
-
- diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
- index 77777d918676..3203e9dee9f8 100644
- --- a/kernel/debug/kdb/kdb_io.c
- +++ b/kernel/debug/kdb/kdb_io.c
- @@ -554,7 +554,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
- int linecount;
- int colcount;
- int logging, saved_loglevel = 0;
- - int saved_trap_printk;
- int got_printf_lock = 0;
- int retlen = 0;
- int fnd, len;
- @@ -565,8 +564,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
- unsigned long uninitialized_var(flags);
-
- preempt_disable();
- - saved_trap_printk = kdb_trap_printk;
- - kdb_trap_printk = 0;
-
- /* Serialize kdb_printf if multiple cpus try to write at once.
- * But if any cpu goes recursive in kdb, just print the output,
- @@ -855,7 +852,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
- } else {
- __release(kdb_printf_lock);
- }
- - kdb_trap_printk = saved_trap_printk;
- preempt_enable();
- return retlen;
- }
- @@ -865,9 +861,11 @@ int kdb_printf(const char *fmt, ...)
- va_list ap;
- int r;
-
- + kdb_trap_printk++;
- va_start(ap, fmt);
- r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
- va_end(ap);
- + kdb_trap_printk--;
-
- return r;
- }
- diff --git a/kernel/events/core.c b/kernel/events/core.c
- index 13b9784427b0..f74fbfe5465c 100644
- --- a/kernel/events/core.c
- +++ b/kernel/events/core.c
- @@ -1050,6 +1050,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
- raw_spin_lock_init(&cpuctx->hrtimer_lock);
- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
- timer->function = perf_mux_hrtimer_handler;
- + timer->irqsafe = 1;
- }
-
- static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
- @@ -8405,6 +8406,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
-
- hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hwc->hrtimer.function = perf_swevent_hrtimer;
- + hwc->hrtimer.irqsafe = 1;
-
- /*
- * Since hrtimers have a fixed rate, we can do a static freq->period
- diff --git a/kernel/exit.c b/kernel/exit.c
- index 3076f3089919..fb2ebcf3ca7c 100644
- --- a/kernel/exit.c
- +++ b/kernel/exit.c
- @@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk)
- * Do this under ->siglock, we can race with another thread
- * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
- */
- - flush_sigqueue(&tsk->pending);
- + flush_task_sigqueue(tsk);
- tsk->sighand = NULL;
- spin_unlock(&sighand->siglock);
-
- diff --git a/kernel/fork.c b/kernel/fork.c
- index 70e10cb49be0..2529725eefa2 100644
- --- a/kernel/fork.c
- +++ b/kernel/fork.c
- @@ -77,6 +77,7 @@
- #include <linux/compiler.h>
- #include <linux/sysctl.h>
- #include <linux/kcov.h>
- +#include <linux/kprobes.h>
-
- #include <asm/pgtable.h>
- #include <asm/pgalloc.h>
- @@ -378,13 +379,24 @@ static inline void put_signal_struct(struct signal_struct *sig)
- if (atomic_dec_and_test(&sig->sigcnt))
- free_signal_struct(sig);
- }
- -
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +static
- +#endif
- void __put_task_struct(struct task_struct *tsk)
- {
- WARN_ON(!tsk->exit_state);
- WARN_ON(atomic_read(&tsk->usage));
- WARN_ON(tsk == current);
-
- + /*
- + * Remove function-return probe instances associated with this
- + * task and put them back on the free list.
- + */
- + kprobe_flush_task(tsk);
- +
- + /* Task is done with its stack. */
- + put_task_stack(tsk);
- +
- cgroup_free(tsk);
- task_numa_free(tsk);
- security_task_free(tsk);
- @@ -395,7 +407,18 @@ void __put_task_struct(struct task_struct *tsk)
- if (!profile_handoff_task(tsk))
- free_task(tsk);
- }
- +#ifndef CONFIG_PREEMPT_RT_BASE
- EXPORT_SYMBOL_GPL(__put_task_struct);
- +#else
- +void __put_task_struct_cb(struct rcu_head *rhp)
- +{
- + struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
- +
- + __put_task_struct(tsk);
- +
- +}
- +EXPORT_SYMBOL_GPL(__put_task_struct_cb);
- +#endif
-
- void __init __weak arch_task_cache_init(void) { }
-
- @@ -541,6 +564,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
- tsk->splice_pipe = NULL;
- tsk->task_frag.page = NULL;
- tsk->wake_q.next = NULL;
- + tsk->wake_q_sleeper.next = NULL;
-
- account_kernel_stack(tsk, 1);
-
- @@ -867,6 +891,19 @@ void __mmdrop(struct mm_struct *mm)
- }
- EXPORT_SYMBOL_GPL(__mmdrop);
-
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +/*
- + * RCU callback for delayed mm drop. Not strictly rcu, but we don't
- + * want another facility to make this work.
- + */
- +void __mmdrop_delayed(struct rcu_head *rhp)
- +{
- + struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
- +
- + __mmdrop(mm);
- +}
- +#endif
- +
- static inline void __mmput(struct mm_struct *mm)
- {
- VM_BUG_ON(atomic_read(&mm->mm_users));
- @@ -1432,6 +1469,7 @@ static void rt_mutex_init_task(struct task_struct *p)
- #ifdef CONFIG_RT_MUTEXES
- p->pi_waiters = RB_ROOT;
- p->pi_waiters_leftmost = NULL;
- + p->pi_top_task = NULL;
- p->pi_blocked_on = NULL;
- #endif
- }
- @@ -1441,6 +1479,9 @@ static void rt_mutex_init_task(struct task_struct *p)
- */
- static void posix_cpu_timers_init(struct task_struct *tsk)
- {
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + tsk->posix_timer_list = NULL;
- +#endif
- tsk->cputime_expires.prof_exp = 0;
- tsk->cputime_expires.virt_exp = 0;
- tsk->cputime_expires.sched_exp = 0;
- @@ -1567,6 +1608,7 @@ static __latent_entropy struct task_struct *copy_process(
- spin_lock_init(&p->alloc_lock);
-
- init_sigpending(&p->pending);
- + p->sigqueue_cache = NULL;
-
- p->utime = p->stime = p->gtime = 0;
- p->utimescaled = p->stimescaled = 0;
- diff --git a/kernel/futex.c b/kernel/futex.c
- index 88bad86180ac..2e074d63e8fa 100644
- --- a/kernel/futex.c
- +++ b/kernel/futex.c
- @@ -801,7 +801,7 @@ static int refill_pi_state_cache(void)
- return 0;
- }
-
- -static struct futex_pi_state * alloc_pi_state(void)
- +static struct futex_pi_state *alloc_pi_state(void)
- {
- struct futex_pi_state *pi_state = current->pi_state_cache;
-
- @@ -811,6 +811,11 @@ static struct futex_pi_state * alloc_pi_state(void)
- return pi_state;
- }
-
- +static void get_pi_state(struct futex_pi_state *pi_state)
- +{
- + WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
- +}
- +
- /*
- * Drops a reference to the pi_state object and frees or caches it
- * when the last reference is gone.
- @@ -855,7 +860,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
- * Look up the task based on what TID userspace gave us.
- * We dont trust it.
- */
- -static struct task_struct * futex_find_get_task(pid_t pid)
- +static struct task_struct *futex_find_get_task(pid_t pid)
- {
- struct task_struct *p;
-
- @@ -905,7 +910,9 @@ void exit_pi_state_list(struct task_struct *curr)
- * task still owns the PI-state:
- */
- if (head->next != next) {
- + raw_spin_unlock_irq(&curr->pi_lock);
- spin_unlock(&hb->lock);
- + raw_spin_lock_irq(&curr->pi_lock);
- continue;
- }
-
- @@ -915,10 +922,12 @@ void exit_pi_state_list(struct task_struct *curr)
- pi_state->owner = NULL;
- raw_spin_unlock_irq(&curr->pi_lock);
-
- - rt_mutex_unlock(&pi_state->pi_mutex);
- -
- + get_pi_state(pi_state);
- spin_unlock(&hb->lock);
-
- + rt_mutex_futex_unlock(&pi_state->pi_mutex);
- + put_pi_state(pi_state);
- +
- raw_spin_lock_irq(&curr->pi_lock);
- }
- raw_spin_unlock_irq(&curr->pi_lock);
- @@ -972,6 +981,39 @@ void exit_pi_state_list(struct task_struct *curr)
- *
- * [10] There is no transient state which leaves owner and user space
- * TID out of sync.
- + *
- + *
- + * Serialization and lifetime rules:
- + *
- + * hb->lock:
- + *
- + * hb -> futex_q, relation
- + * futex_q -> pi_state, relation
- + *
- + * (cannot be raw because hb can contain arbitrary amount
- + * of futex_q's)
- + *
- + * pi_mutex->wait_lock:
- + *
- + * {uval, pi_state}
- + *
- + * (and pi_mutex 'obviously')
- + *
- + * p->pi_lock:
- + *
- + * p->pi_state_list -> pi_state->list, relation
- + *
- + * pi_state->refcount:
- + *
- + * pi_state lifetime
- + *
- + *
- + * Lock order:
- + *
- + * hb->lock
- + * pi_mutex->wait_lock
- + * p->pi_lock
- + *
- */
-
- /*
- @@ -979,10 +1021,13 @@ void exit_pi_state_list(struct task_struct *curr)
- * the pi_state against the user space value. If correct, attach to
- * it.
- */
- -static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
- +static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
- + struct futex_pi_state *pi_state,
- struct futex_pi_state **ps)
- {
- pid_t pid = uval & FUTEX_TID_MASK;
- + u32 uval2;
- + int ret;
-
- /*
- * Userspace might have messed up non-PI and PI futexes [3]
- @@ -990,8 +1035,38 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
- if (unlikely(!pi_state))
- return -EINVAL;
-
- + /*
- + * We get here with hb->lock held, and having found a
- + * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
- + * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
- + * which in turn means that futex_lock_pi() still has a reference on
- + * our pi_state.
- + *
- + * The waiter holding a reference on @pi_state also protects against
- + * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
- + * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
- + * free pi_state before we can take a reference ourselves.
- + */
- WARN_ON(!atomic_read(&pi_state->refcount));
-
- + /*
- + * Now that we have a pi_state, we can acquire wait_lock
- + * and do the state validation.
- + */
- + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
- +
- + /*
- + * Since {uval, pi_state} is serialized by wait_lock, and our current
- + * uval was read without holding it, it can have changed. Verify it
- + * still is what we expect it to be, otherwise retry the entire
- + * operation.
- + */
- + if (get_futex_value_locked(&uval2, uaddr))
- + goto out_efault;
- +
- + if (uval != uval2)
- + goto out_eagain;
- +
- /*
- * Handle the owner died case:
- */
- @@ -1007,11 +1082,11 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
- * is not 0. Inconsistent state. [5]
- */
- if (pid)
- - return -EINVAL;
- + goto out_einval;
- /*
- * Take a ref on the state and return success. [4]
- */
- - goto out_state;
- + goto out_attach;
- }
-
- /*
- @@ -1023,14 +1098,14 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
- * Take a ref on the state and return success. [6]
- */
- if (!pid)
- - goto out_state;
- + goto out_attach;
- } else {
- /*
- * If the owner died bit is not set, then the pi_state
- * must have an owner. [7]
- */
- if (!pi_state->owner)
- - return -EINVAL;
- + goto out_einval;
- }
-
- /*
- @@ -1039,11 +1114,29 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
- * user space TID. [9/10]
- */
- if (pid != task_pid_vnr(pi_state->owner))
- - return -EINVAL;
- -out_state:
- - atomic_inc(&pi_state->refcount);
- + goto out_einval;
- +
- +out_attach:
- + get_pi_state(pi_state);
- + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
- *ps = pi_state;
- return 0;
- +
- +out_einval:
- + ret = -EINVAL;
- + goto out_error;
- +
- +out_eagain:
- + ret = -EAGAIN;
- + goto out_error;
- +
- +out_efault:
- + ret = -EFAULT;
- + goto out_error;
- +
- +out_error:
- + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
- + return ret;
- }
-
- /*
- @@ -1094,6 +1187,9 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
-
- /*
- * No existing pi state. First waiter. [2]
- + *
- + * This creates pi_state, we have hb->lock held, this means nothing can
- + * observe this state, wait_lock is irrelevant.
- */
- pi_state = alloc_pi_state();
-
- @@ -1118,17 +1214,18 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
- return 0;
- }
-
- -static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
- +static int lookup_pi_state(u32 __user *uaddr, u32 uval,
- + struct futex_hash_bucket *hb,
- union futex_key *key, struct futex_pi_state **ps)
- {
- - struct futex_q *match = futex_top_waiter(hb, key);
- + struct futex_q *top_waiter = futex_top_waiter(hb, key);
-
- /*
- * If there is a waiter on that futex, validate it and
- * attach to the pi_state when the validation succeeds.
- */
- - if (match)
- - return attach_to_pi_state(uval, match->pi_state, ps);
- + if (top_waiter)
- + return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
-
- /*
- * We are the first waiter - try to look up the owner based on
- @@ -1147,7 +1244,7 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
- if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
- return -EFAULT;
-
- - /*If user space value changed, let the caller retry */
- + /* If user space value changed, let the caller retry */
- return curval != uval ? -EAGAIN : 0;
- }
-
- @@ -1175,7 +1272,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
- struct task_struct *task, int set_waiters)
- {
- u32 uval, newval, vpid = task_pid_vnr(task);
- - struct futex_q *match;
- + struct futex_q *top_waiter;
- int ret;
-
- /*
- @@ -1201,9 +1298,9 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
- * Lookup existing state first. If it exists, try to attach to
- * its pi_state.
- */
- - match = futex_top_waiter(hb, key);
- - if (match)
- - return attach_to_pi_state(uval, match->pi_state, ps);
- + top_waiter = futex_top_waiter(hb, key);
- + if (top_waiter)
- + return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
-
- /*
- * No waiter and user TID is 0. We are here because the
- @@ -1284,50 +1381,45 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
- wake_q_add(wake_q, p);
- __unqueue_futex(q);
- /*
- - * The waiting task can free the futex_q as soon as
- - * q->lock_ptr = NULL is written, without taking any locks. A
- - * memory barrier is required here to prevent the following
- - * store to lock_ptr from getting ahead of the plist_del.
- + * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
- + * is written, without taking any locks. This is possible in the event
- + * of a spurious wakeup, for example. A memory barrier is required here
- + * to prevent the following store to lock_ptr from getting ahead of the
- + * plist_del in __unqueue_futex().
- */
- - smp_wmb();
- - q->lock_ptr = NULL;
- + smp_store_release(&q->lock_ptr, NULL);
- }
-
- -static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
- - struct futex_hash_bucket *hb)
- +/*
- + * Caller must hold a reference on @pi_state.
- + */
- +static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
- {
- - struct task_struct *new_owner;
- - struct futex_pi_state *pi_state = this->pi_state;
- u32 uninitialized_var(curval), newval;
- + struct task_struct *new_owner;
- + bool postunlock = false;
- WAKE_Q(wake_q);
- - bool deboost;
- + WAKE_Q(wake_sleeper_q);
- int ret = 0;
-
- - if (!pi_state)
- - return -EINVAL;
- -
- - /*
- - * If current does not own the pi_state then the futex is
- - * inconsistent and user space fiddled with the futex value.
- - */
- - if (pi_state->owner != current)
- - return -EINVAL;
- -
- - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
- new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
- + if (WARN_ON_ONCE(!new_owner)) {
- + /*
- + * As per the comment in futex_unlock_pi() this should not happen.
- + *
- + * When this happens, give up our locks and try again, giving
- + * the futex_lock_pi() instance time to complete, either by
- + * waiting on the rtmutex or removing itself from the futex
- + * queue.
- + */
- + ret = -EAGAIN;
- + goto out_unlock;
- + }
-
- /*
- - * It is possible that the next waiter (the one that brought
- - * this owner to the kernel) timed out and is no longer
- - * waiting on the lock.
- - */
- - if (!new_owner)
- - new_owner = this->task;
- -
- - /*
- - * We pass it to the next owner. The WAITERS bit is always
- - * kept enabled while there is PI state around. We cleanup the
- - * owner died bit, because we are the owner.
- + * We pass it to the next owner. The WAITERS bit is always kept
- + * enabled while there is PI state around. We cleanup the owner
- + * died bit, because we are the owner.
- */
- newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
-
- @@ -1336,6 +1428,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
-
- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
- ret = -EFAULT;
- +
- } else if (curval != uval) {
- /*
- * If a unconditional UNLOCK_PI operation (user space did not
- @@ -1348,10 +1441,14 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
- else
- ret = -EINVAL;
- }
- - if (ret) {
- - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
- - return ret;
- - }
- +
- + if (ret)
- + goto out_unlock;
- +
- + /*
- + * This is a point of no return; once we modify the uval there is no
- + * going back and subsequent operations must not fail.
- + */
-
- raw_spin_lock(&pi_state->owner->pi_lock);
- WARN_ON(list_empty(&pi_state->list));
- @@ -1364,22 +1461,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
- pi_state->owner = new_owner;
- raw_spin_unlock(&new_owner->pi_lock);
-
- + postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
- + &wake_sleeper_q);
- +out_unlock:
- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
-
- - deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
- + if (postunlock)
- + rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
-
- - /*
- - * First unlock HB so the waiter does not spin on it once he got woken
- - * up. Second wake up the waiter before the priority is adjusted. If we
- - * deboost first (and lose our higher priority), then the task might get
- - * scheduled away before the wake up can take place.
- - */
- - spin_unlock(&hb->lock);
- - wake_up_q(&wake_q);
- - if (deboost)
- - rt_mutex_adjust_prio(current);
- -
- - return 0;
- + return ret;
- }
-
- /*
- @@ -1825,7 +1915,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
- * If that call succeeds then we have pi_state and an
- * initial refcount on it.
- */
- - ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
- + ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
- }
-
- switch (ret) {
- @@ -1908,7 +1998,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
- * refcount on the pi_state and store the pointer in
- * the futex_q object of the waiter.
- */
- - atomic_inc(&pi_state->refcount);
- + get_pi_state(pi_state);
- this->pi_state = pi_state;
- ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
- this->rt_waiter,
- @@ -1925,6 +2015,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
- requeue_pi_wake_futex(this, &key2, hb2);
- drop_count++;
- continue;
- + } else if (ret == -EAGAIN) {
- + /*
- + * Waiter was woken by timeout or
- + * signal and has set pi_blocked_on to
- + * PI_WAKEUP_INPROGRESS before we
- + * tried to enqueue it on the rtmutex.
- + */
- + this->pi_state = NULL;
- + put_pi_state(pi_state);
- + continue;
- } else if (ret) {
- /*
- * rt_mutex_start_proxy_lock() detected a
- @@ -2008,20 +2108,7 @@ queue_unlock(struct futex_hash_bucket *hb)
- hb_waiters_dec(hb);
- }
-
- -/**
- - * queue_me() - Enqueue the futex_q on the futex_hash_bucket
- - * @q: The futex_q to enqueue
- - * @hb: The destination hash bucket
- - *
- - * The hb->lock must be held by the caller, and is released here. A call to
- - * queue_me() is typically paired with exactly one call to unqueue_me(). The
- - * exceptions involve the PI related operations, which may use unqueue_me_pi()
- - * or nothing if the unqueue is done as part of the wake process and the unqueue
- - * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
- - * an example).
- - */
- -static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
- - __releases(&hb->lock)
- +static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
- {
- int prio;
-
- @@ -2038,6 +2125,24 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
- plist_node_init(&q->list, prio);
- plist_add(&q->list, &hb->chain);
- q->task = current;
- +}
- +
- +/**
- + * queue_me() - Enqueue the futex_q on the futex_hash_bucket
- + * @q: The futex_q to enqueue
- + * @hb: The destination hash bucket
- + *
- + * The hb->lock must be held by the caller, and is released here. A call to
- + * queue_me() is typically paired with exactly one call to unqueue_me(). The
- + * exceptions involve the PI related operations, which may use unqueue_me_pi()
- + * or nothing if the unqueue is done as part of the wake process and the unqueue
- + * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
- + * an example).
- + */
- +static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
- + __releases(&hb->lock)
- +{
- + __queue_me(q, hb);
- spin_unlock(&hb->lock);
- }
-
- @@ -2124,10 +2229,13 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
- {
- u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
- struct futex_pi_state *pi_state = q->pi_state;
- - struct task_struct *oldowner = pi_state->owner;
- u32 uval, uninitialized_var(curval), newval;
- + struct task_struct *oldowner;
- int ret;
-
- + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
- +
- + oldowner = pi_state->owner;
- /* Owner died? */
- if (!pi_state->owner)
- newtid |= FUTEX_OWNER_DIED;
- @@ -2135,7 +2243,8 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
- /*
- * We are here either because we stole the rtmutex from the
- * previous highest priority waiter or we are the highest priority
- - * waiter but failed to get the rtmutex the first time.
- + * waiter but have failed to get the rtmutex the first time.
- + *
- * We have to replace the newowner TID in the user space variable.
- * This must be atomic as we have to preserve the owner died bit here.
- *
- @@ -2143,17 +2252,16 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
- * because we can fault here. Imagine swapped out pages or a fork
- * that marked all the anonymous memory readonly for cow.
- *
- - * Modifying pi_state _before_ the user space value would
- - * leave the pi_state in an inconsistent state when we fault
- - * here, because we need to drop the hash bucket lock to
- - * handle the fault. This might be observed in the PID check
- - * in lookup_pi_state.
- + * Modifying pi_state _before_ the user space value would leave the
- + * pi_state in an inconsistent state when we fault here, because we
- + * need to drop the locks to handle the fault. This might be observed
- + * in the PID check in lookup_pi_state.
- */
- retry:
- if (get_futex_value_locked(&uval, uaddr))
- goto handle_fault;
-
- - while (1) {
- + for (;;) {
- newval = (uval & FUTEX_OWNER_DIED) | newtid;
-
- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
- @@ -2168,47 +2276,60 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
- * itself.
- */
- if (pi_state->owner != NULL) {
- - raw_spin_lock_irq(&pi_state->owner->pi_lock);
- + raw_spin_lock(&pi_state->owner->pi_lock);
- WARN_ON(list_empty(&pi_state->list));
- list_del_init(&pi_state->list);
- - raw_spin_unlock_irq(&pi_state->owner->pi_lock);
- + raw_spin_unlock(&pi_state->owner->pi_lock);
- }
-
- pi_state->owner = newowner;
-
- - raw_spin_lock_irq(&newowner->pi_lock);
- + raw_spin_lock(&newowner->pi_lock);
- WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &newowner->pi_state_list);
- - raw_spin_unlock_irq(&newowner->pi_lock);
- + raw_spin_unlock(&newowner->pi_lock);
- + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
- +
- return 0;
-
- /*
- - * To handle the page fault we need to drop the hash bucket
- - * lock here. That gives the other task (either the highest priority
- - * waiter itself or the task which stole the rtmutex) the
- - * chance to try the fixup of the pi_state. So once we are
- - * back from handling the fault we need to check the pi_state
- - * after reacquiring the hash bucket lock and before trying to
- - * do another fixup. When the fixup has been done already we
- - * simply return.
- + * To handle the page fault we need to drop the locks here. That gives
- + * the other task (either the highest priority waiter itself or the
- + * task which stole the rtmutex) the chance to try the fixup of the
- + * pi_state. So once we are back from handling the fault we need to
- + * check the pi_state after reacquiring the locks and before trying to
- + * do another fixup. When the fixup has been done already we simply
- + * return.
- + *
- + * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
- + * drop hb->lock since the caller owns the hb -> futex_q relation.
- + * Dropping the pi_mutex->wait_lock requires the state revalidate.
- */
- handle_fault:
- + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
- spin_unlock(q->lock_ptr);
-
- ret = fault_in_user_writeable(uaddr);
-
- spin_lock(q->lock_ptr);
- + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
-
- /*
- * Check if someone else fixed it for us:
- */
- - if (pi_state->owner != oldowner)
- - return 0;
- + if (pi_state->owner != oldowner) {
- + ret = 0;
- + goto out_unlock;
- + }
-
- if (ret)
- - return ret;
- + goto out_unlock;
-
- goto retry;
- +
- +out_unlock:
- + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
- + return ret;
- }
-
- static long futex_wait_restart(struct restart_block *restart);
- @@ -2230,57 +2351,32 @@ static long futex_wait_restart(struct restart_block *restart);
- */
- static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
- {
- - struct task_struct *owner;
- int ret = 0;
-
- if (locked) {
- /*
- * Got the lock. We might not be the anticipated owner if we
- * did a lock-steal - fix up the PI-state in that case:
- + *
- + * We can safely read pi_state->owner without holding wait_lock
- + * because we now own the rt_mutex, only the owner will attempt
- + * to change it.
- */
- if (q->pi_state->owner != current)
- ret = fixup_pi_state_owner(uaddr, q, current);
- goto out;
- }
-
- - /*
- - * Catch the rare case, where the lock was released when we were on the
- - * way back before we locked the hash bucket.
- - */
- - if (q->pi_state->owner == current) {
- - /*
- - * Try to get the rt_mutex now. This might fail as some other
- - * task acquired the rt_mutex after we removed ourself from the
- - * rt_mutex waiters list.
- - */
- - if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
- - locked = 1;
- - goto out;
- - }
- -
- - /*
- - * pi_state is incorrect, some other task did a lock steal and
- - * we returned due to timeout or signal without taking the
- - * rt_mutex. Too late.
- - */
- - raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
- - owner = rt_mutex_owner(&q->pi_state->pi_mutex);
- - if (!owner)
- - owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
- - raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
- - ret = fixup_pi_state_owner(uaddr, q, owner);
- - goto out;
- - }
- -
- /*
- * Paranoia check. If we did not take the lock, then we should not be
- * the owner of the rt_mutex.
- */
- - if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
- + if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
- "pi-state %p\n", ret,
- q->pi_state->pi_mutex.owner,
- q->pi_state->owner);
- + }
-
- out:
- return ret ? ret : locked;
- @@ -2504,6 +2600,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
- ktime_t *time, int trylock)
- {
- struct hrtimer_sleeper timeout, *to = NULL;
- + struct futex_pi_state *pi_state = NULL;
- + struct rt_mutex_waiter rt_waiter;
- struct futex_hash_bucket *hb;
- struct futex_q q = futex_q_init;
- int res, ret;
- @@ -2556,24 +2654,76 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
- }
- }
-
- + WARN_ON(!q.pi_state);
- +
- /*
- * Only actually queue now that the atomic ops are done:
- */
- - queue_me(&q, hb);
- + __queue_me(&q, hb);
-
- - WARN_ON(!q.pi_state);
- - /*
- - * Block on the PI mutex:
- - */
- - if (!trylock) {
- - ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
- - } else {
- - ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
- + if (trylock) {
- + ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
- /* Fixup the trylock return value: */
- ret = ret ? 0 : -EWOULDBLOCK;
- + goto no_block;
- + }
- +
- + rt_mutex_init_waiter(&rt_waiter, false);
- +
- + /*
- + * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
- + * hold it while doing rt_mutex_start_proxy(), because then it will
- + * include hb->lock in the blocking chain, even through we'll not in
- + * fact hold it while blocking. This will lead it to report -EDEADLK
- + * and BUG when futex_unlock_pi() interleaves with this.
- + *
- + * Therefore acquire wait_lock while holding hb->lock, but drop the
- + * latter before calling rt_mutex_start_proxy_lock(). This still fully
- + * serializes against futex_unlock_pi() as that does the exact same
- + * lock handoff sequence.
- + */
- + raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
- + /*
- + * the migrate_disable() here disables migration in the in_atomic() fast
- + * path which is enabled again in the following spin_unlock(). We have
- + * one migrate_disable() pending in the slow-path which is reversed
- + * after the raw_spin_unlock_irq() where we leave the atomic context.
- + */
- + migrate_disable();
- +
- + spin_unlock(q.lock_ptr);
- + ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
- + raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
- + migrate_enable();
- +
- + if (ret) {
- + if (ret == 1)
- + ret = 0;
- +
- + spin_lock(q.lock_ptr);
- + goto no_block;
- }
-
- +
- + if (unlikely(to))
- + hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
- +
- + ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
- +
- spin_lock(q.lock_ptr);
- + /*
- + * If we failed to acquire the lock (signal/timeout), we must
- + * first acquire the hb->lock before removing the lock from the
- + * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
- + * wait lists consistent.
- + *
- + * In particular; it is important that futex_unlock_pi() can not
- + * observe this inconsistency.
- + */
- + if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
- + ret = 0;
- +
- +no_block:
- /*
- * Fixup the pi_state owner and possibly acquire the lock if we
- * haven't already.
- @@ -2590,12 +2740,19 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
- * If fixup_owner() faulted and was unable to handle the fault, unlock
- * it and return the fault to userspace.
- */
- - if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
- - rt_mutex_unlock(&q.pi_state->pi_mutex);
- + if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
- + pi_state = q.pi_state;
- + get_pi_state(pi_state);
- + }
-
- /* Unqueue and drop the lock */
- unqueue_me_pi(&q);
-
- + if (pi_state) {
- + rt_mutex_futex_unlock(&pi_state->pi_mutex);
- + put_pi_state(pi_state);
- + }
- +
- goto out_put_key;
-
- out_unlock_put_key:
- @@ -2604,8 +2761,10 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
- out_put_key:
- put_futex_key(&q.key);
- out:
- - if (to)
- + if (to) {
- + hrtimer_cancel(&to->timer);
- destroy_hrtimer_on_stack(&to->timer);
- + }
- return ret != -EINTR ? ret : -ERESTARTNOINTR;
-
- uaddr_faulted:
- @@ -2632,7 +2791,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
- u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
- union futex_key key = FUTEX_KEY_INIT;
- struct futex_hash_bucket *hb;
- - struct futex_q *match;
- + struct futex_q *top_waiter;
- int ret;
-
- retry:
- @@ -2656,12 +2815,48 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
- * all and we at least want to know if user space fiddled
- * with the futex value instead of blindly unlocking.
- */
- - match = futex_top_waiter(hb, &key);
- - if (match) {
- - ret = wake_futex_pi(uaddr, uval, match, hb);
- + top_waiter = futex_top_waiter(hb, &key);
- + if (top_waiter) {
- + struct futex_pi_state *pi_state = top_waiter->pi_state;
- +
- + ret = -EINVAL;
- + if (!pi_state)
- + goto out_unlock;
- +
- + /*
- + * If current does not own the pi_state then the futex is
- + * inconsistent and user space fiddled with the futex value.
- + */
- + if (pi_state->owner != current)
- + goto out_unlock;
- +
- + get_pi_state(pi_state);
- + /*
- + * By taking wait_lock while still holding hb->lock, we ensure
- + * there is no point where we hold neither; and therefore
- + * wake_futex_pi() must observe a state consistent with what we
- + * observed.
- + */
- + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
- + /*
- + * Magic trickery for now to make the RT migrate disable
- + * logic happy. The following spin_unlock() happens with
- + * interrupts disabled so the internal migrate_enable()
- + * won't undo the migrate_disable() which was issued when
- + * locking hb->lock.
- + */
- + migrate_disable();
- + spin_unlock(&hb->lock);
- +
- + /* Drops pi_state->pi_mutex.wait_lock */
- + ret = wake_futex_pi(uaddr, uval, pi_state);
- +
- + migrate_enable();
- +
- + put_pi_state(pi_state);
- +
- /*
- - * In case of success wake_futex_pi dropped the hash
- - * bucket lock.
- + * Success, we're done! No tricky corner cases.
- */
- if (!ret)
- goto out_putkey;
- @@ -2676,7 +2871,6 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
- * setting the FUTEX_WAITERS bit. Try again.
- */
- if (ret == -EAGAIN) {
- - spin_unlock(&hb->lock);
- put_futex_key(&key);
- goto retry;
- }
- @@ -2684,7 +2878,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
- * wake_futex_pi has detected invalid state. Tell user
- * space.
- */
- - goto out_unlock;
- + goto out_putkey;
- }
-
- /*
- @@ -2694,8 +2888,10 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
- * preserve the WAITERS bit not the OWNER_DIED one. We are the
- * owner.
- */
- - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
- + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
- + spin_unlock(&hb->lock);
- goto pi_faulted;
- + }
-
- /*
- * If uval has changed, let user space handle it.
- @@ -2709,7 +2905,6 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
- return ret;
-
- pi_faulted:
- - spin_unlock(&hb->lock);
- put_futex_key(&key);
-
- ret = fault_in_user_writeable(uaddr);
- @@ -2813,8 +3008,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- u32 __user *uaddr2)
- {
- struct hrtimer_sleeper timeout, *to = NULL;
- + struct futex_pi_state *pi_state = NULL;
- struct rt_mutex_waiter rt_waiter;
- - struct futex_hash_bucket *hb;
- + struct futex_hash_bucket *hb, *hb2;
- union futex_key key2 = FUTEX_KEY_INIT;
- struct futex_q q = futex_q_init;
- int res, ret;
- @@ -2839,10 +3035,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- * The waiter is allocated on our stack, manipulated by the requeue
- * code while we sleep on uaddr.
- */
- - debug_rt_mutex_init_waiter(&rt_waiter);
- - RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
- - RB_CLEAR_NODE(&rt_waiter.tree_entry);
- - rt_waiter.task = NULL;
- + rt_mutex_init_waiter(&rt_waiter, false);
-
- ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
- if (unlikely(ret != 0))
- @@ -2873,20 +3066,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- /* Queue the futex_q, drop the hb lock, wait for wakeup. */
- futex_wait_queue_me(hb, &q, to);
-
- - spin_lock(&hb->lock);
- - ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
- - spin_unlock(&hb->lock);
- - if (ret)
- - goto out_put_keys;
- + /*
- + * On RT we must avoid races with requeue and trying to block
- + * on two mutexes (hb->lock and uaddr2's rtmutex) by
- + * serializing access to pi_blocked_on with pi_lock.
- + */
- + raw_spin_lock_irq(¤t->pi_lock);
- + if (current->pi_blocked_on) {
- + /*
- + * We have been requeued or are in the process of
- + * being requeued.
- + */
- + raw_spin_unlock_irq(¤t->pi_lock);
- + } else {
- + /*
- + * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
- + * prevents a concurrent requeue from moving us to the
- + * uaddr2 rtmutex. After that we can safely acquire
- + * (and possibly block on) hb->lock.
- + */
- + current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
- + raw_spin_unlock_irq(¤t->pi_lock);
- +
- + spin_lock(&hb->lock);
- +
- + /*
- + * Clean up pi_blocked_on. We might leak it otherwise
- + * when we succeeded with the hb->lock in the fast
- + * path.
- + */
- + raw_spin_lock_irq(¤t->pi_lock);
- + current->pi_blocked_on = NULL;
- + raw_spin_unlock_irq(¤t->pi_lock);
- +
- + ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
- + spin_unlock(&hb->lock);
- + if (ret)
- + goto out_put_keys;
- + }
-
- /*
- - * In order for us to be here, we know our q.key == key2, and since
- - * we took the hb->lock above, we also know that futex_requeue() has
- - * completed and we no longer have to concern ourselves with a wakeup
- - * race with the atomic proxy lock acquisition by the requeue code. The
- - * futex_requeue dropped our key1 reference and incremented our key2
- - * reference count.
- + * In order to be here, we have either been requeued, are in
- + * the process of being requeued, or requeue successfully
- + * acquired uaddr2 on our behalf. If pi_blocked_on was
- + * non-null above, we may be racing with a requeue. Do not
- + * rely on q->lock_ptr to be hb2->lock until after blocking on
- + * hb->lock or hb2->lock. The futex_requeue dropped our key1
- + * reference and incremented our key2 reference count.
- */
- + hb2 = hash_futex(&key2);
-
- /* Check if the requeue code acquired the second futex for us. */
- if (!q.rt_waiter) {
- @@ -2895,16 +3123,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- * did a lock-steal - fix up the PI-state in that case.
- */
- if (q.pi_state && (q.pi_state->owner != current)) {
- - spin_lock(q.lock_ptr);
- + spin_lock(&hb2->lock);
- + BUG_ON(&hb2->lock != q.lock_ptr);
- ret = fixup_pi_state_owner(uaddr2, &q, current);
- - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
- - rt_mutex_unlock(&q.pi_state->pi_mutex);
- + if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
- + pi_state = q.pi_state;
- + get_pi_state(pi_state);
- + }
- /*
- * Drop the reference to the pi state which
- * the requeue_pi() code acquired for us.
- */
- put_pi_state(q.pi_state);
- - spin_unlock(q.lock_ptr);
- + spin_unlock(&hb2->lock);
- }
- } else {
- struct rt_mutex *pi_mutex;
- @@ -2916,10 +3147,14 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- */
- WARN_ON(!q.pi_state);
- pi_mutex = &q.pi_state->pi_mutex;
- - ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
- - debug_rt_mutex_free_waiter(&rt_waiter);
- + ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
-
- - spin_lock(q.lock_ptr);
- + spin_lock(&hb2->lock);
- + BUG_ON(&hb2->lock != q.lock_ptr);
- + if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
- + ret = 0;
- +
- + debug_rt_mutex_free_waiter(&rt_waiter);
- /*
- * Fixup the pi_state owner and possibly acquire the lock if we
- * haven't already.
- @@ -2937,13 +3172,20 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- * the fault, unlock the rt_mutex and return the fault to
- * userspace.
- */
- - if (ret && rt_mutex_owner(pi_mutex) == current)
- - rt_mutex_unlock(pi_mutex);
- + if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
- + pi_state = q.pi_state;
- + get_pi_state(pi_state);
- + }
-
- /* Unqueue and drop the lock. */
- unqueue_me_pi(&q);
- }
-
- + if (pi_state) {
- + rt_mutex_futex_unlock(&pi_state->pi_mutex);
- + put_pi_state(pi_state);
- + }
- +
- if (ret == -EINTR) {
- /*
- * We've already been requeued, but cannot restart by calling
- diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
- index d3f24905852c..f87aa8fdcc51 100644
- --- a/kernel/irq/handle.c
- +++ b/kernel/irq/handle.c
- @@ -181,10 +181,16 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
- {
- irqreturn_t retval;
- unsigned int flags = 0;
- + struct pt_regs *regs = get_irq_regs();
- + u64 ip = regs ? instruction_pointer(regs) : 0;
-
- retval = __handle_irq_event_percpu(desc, &flags);
-
- - add_interrupt_randomness(desc->irq_data.irq, flags);
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + desc->random_ip = ip;
- +#else
- + add_interrupt_randomness(desc->irq_data.irq, flags, ip);
- +#endif
-
- if (!noirqdebug)
- note_interrupt(desc, retval);
- diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
- index ea41820ab12e..5994867526f3 100644
- --- a/kernel/irq/manage.c
- +++ b/kernel/irq/manage.c
- @@ -22,6 +22,7 @@
- #include "internals.h"
-
- #ifdef CONFIG_IRQ_FORCED_THREADING
- +# ifndef CONFIG_PREEMPT_RT_BASE
- __read_mostly bool force_irqthreads;
-
- static int __init setup_forced_irqthreads(char *arg)
- @@ -30,6 +31,7 @@ static int __init setup_forced_irqthreads(char *arg)
- return 0;
- }
- early_param("threadirqs", setup_forced_irqthreads);
- +# endif
- #endif
-
- static void __synchronize_hardirq(struct irq_desc *desc)
- @@ -233,7 +235,12 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
-
- if (desc->affinity_notify) {
- kref_get(&desc->affinity_notify->kref);
- +
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + swork_queue(&desc->affinity_notify->swork);
- +#else
- schedule_work(&desc->affinity_notify->work);
- +#endif
- }
- irqd_set(data, IRQD_AFFINITY_SET);
-
- @@ -271,10 +278,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
- }
- EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
-
- -static void irq_affinity_notify(struct work_struct *work)
- +static void _irq_affinity_notify(struct irq_affinity_notify *notify)
- {
- - struct irq_affinity_notify *notify =
- - container_of(work, struct irq_affinity_notify, work);
- struct irq_desc *desc = irq_to_desc(notify->irq);
- cpumask_var_t cpumask;
- unsigned long flags;
- @@ -296,6 +301,35 @@ static void irq_affinity_notify(struct work_struct *work)
- kref_put(¬ify->kref, notify->release);
- }
-
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +static void init_helper_thread(void)
- +{
- + static int init_sworker_once;
- +
- + if (init_sworker_once)
- + return;
- + if (WARN_ON(swork_get()))
- + return;
- + init_sworker_once = 1;
- +}
- +
- +static void irq_affinity_notify(struct swork_event *swork)
- +{
- + struct irq_affinity_notify *notify =
- + container_of(swork, struct irq_affinity_notify, swork);
- + _irq_affinity_notify(notify);
- +}
- +
- +#else
- +
- +static void irq_affinity_notify(struct work_struct *work)
- +{
- + struct irq_affinity_notify *notify =
- + container_of(work, struct irq_affinity_notify, work);
- + _irq_affinity_notify(notify);
- +}
- +#endif
- +
- /**
- * irq_set_affinity_notifier - control notification of IRQ affinity changes
- * @irq: Interrupt for which to enable/disable notification
- @@ -324,7 +358,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
- if (notify) {
- notify->irq = irq;
- kref_init(¬ify->kref);
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + INIT_SWORK(¬ify->swork, irq_affinity_notify);
- + init_helper_thread();
- +#else
- INIT_WORK(¬ify->work, irq_affinity_notify);
- +#endif
- }
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- @@ -879,7 +918,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
- local_bh_disable();
- ret = action->thread_fn(action->irq, action->dev_id);
- irq_finalize_oneshot(desc, action);
- - local_bh_enable();
- + /*
- + * Interrupts which have real time requirements can be set up
- + * to avoid softirq processing in the thread handler. This is
- + * safe as these interrupts do not raise soft interrupts.
- + */
- + if (irq_settings_no_softirq_call(desc))
- + _local_bh_enable();
- + else
- + local_bh_enable();
- return ret;
- }
-
- @@ -976,6 +1023,12 @@ static int irq_thread(void *data)
- if (action_ret == IRQ_WAKE_THREAD)
- irq_wake_secondary(desc, action);
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + migrate_disable();
- + add_interrupt_randomness(action->irq, 0,
- + desc->random_ip ^ (unsigned long) action);
- + migrate_enable();
- +#endif
- wake_threads_waitq(desc);
- }
-
- @@ -1338,6 +1391,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
- irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
- }
-
- + if (new->flags & IRQF_NO_SOFTIRQ_CALL)
- + irq_settings_set_no_softirq_call(desc);
- +
- /* Set default affinity mask once everything is setup */
- setup_affinity(desc, mask);
-
- @@ -2063,7 +2119,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
- * This call sets the internal irqchip state of an interrupt,
- * depending on the value of @which.
- *
- - * This function should be called with preemption disabled if the
- + * This function should be called with migration disabled if the
- * interrupt controller has per-cpu registers.
- */
- int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
- diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
- index 320579d89091..2df2d4445b1e 100644
- --- a/kernel/irq/settings.h
- +++ b/kernel/irq/settings.h
- @@ -16,6 +16,7 @@ enum {
- _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
- _IRQ_IS_POLLED = IRQ_IS_POLLED,
- _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
- + _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
- _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
- };
-
- @@ -30,6 +31,7 @@ enum {
- #define IRQ_PER_CPU_DEVID GOT_YOU_MORON
- #define IRQ_IS_POLLED GOT_YOU_MORON
- #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
- +#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
- #undef IRQF_MODIFY_MASK
- #define IRQF_MODIFY_MASK GOT_YOU_MORON
-
- @@ -40,6 +42,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
- desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
- }
-
- +static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
- +{
- + return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
- +}
- +
- +static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
- +{
- + desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
- +}
- +
- static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
- {
- return desc->status_use_accessors & _IRQ_PER_CPU;
- diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
- index 5707f97a3e6a..73f38dc7a7fb 100644
- --- a/kernel/irq/spurious.c
- +++ b/kernel/irq/spurious.c
- @@ -442,6 +442,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
-
- static int __init irqfixup_setup(char *str)
- {
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
- + return 1;
- +#endif
- irqfixup = 1;
- printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
- printk(KERN_WARNING "This may impact system performance.\n");
- @@ -454,6 +458,10 @@ module_param(irqfixup, int, 0644);
-
- static int __init irqpoll_setup(char *str)
- {
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
- + return 1;
- +#endif
- irqfixup = 2;
- printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
- "enabled\n");
- diff --git a/kernel/irq_work.c b/kernel/irq_work.c
- index bcf107ce0854..2899ba0d23d1 100644
- --- a/kernel/irq_work.c
- +++ b/kernel/irq_work.c
- @@ -17,6 +17,7 @@
- #include <linux/cpu.h>
- #include <linux/notifier.h>
- #include <linux/smp.h>
- +#include <linux/interrupt.h>
- #include <asm/processor.h>
-
-
- @@ -65,6 +66,8 @@ void __weak arch_irq_work_raise(void)
- */
- bool irq_work_queue_on(struct irq_work *work, int cpu)
- {
- + struct llist_head *list;
- +
- /* All work should have been flushed before going offline */
- WARN_ON_ONCE(cpu_is_offline(cpu));
-
- @@ -75,7 +78,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
- if (!irq_work_claim(work))
- return false;
-
- - if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
- + if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
- + list = &per_cpu(lazy_list, cpu);
- + else
- + list = &per_cpu(raised_list, cpu);
- +
- + if (llist_add(&work->llnode, list))
- arch_send_call_function_single_ipi(cpu);
-
- return true;
- @@ -86,6 +94,9 @@ EXPORT_SYMBOL_GPL(irq_work_queue_on);
- /* Enqueue the irq work @work on the current CPU */
- bool irq_work_queue(struct irq_work *work)
- {
- + struct llist_head *list;
- + bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
- +
- /* Only queue if not already pending */
- if (!irq_work_claim(work))
- return false;
- @@ -93,13 +104,15 @@ bool irq_work_queue(struct irq_work *work)
- /* Queue the entry and raise the IPI if needed. */
- preempt_disable();
-
- - /* If the work is "lazy", handle it from next tick if any */
- - if (work->flags & IRQ_WORK_LAZY) {
- - if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
- - tick_nohz_tick_stopped())
- - arch_irq_work_raise();
- - } else {
- - if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
- + lazy_work = work->flags & IRQ_WORK_LAZY;
- +
- + if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
- + list = this_cpu_ptr(&lazy_list);
- + else
- + list = this_cpu_ptr(&raised_list);
- +
- + if (llist_add(&work->llnode, list)) {
- + if (!lazy_work || tick_nohz_tick_stopped())
- arch_irq_work_raise();
- }
-
- @@ -116,9 +129,8 @@ bool irq_work_needs_cpu(void)
- raised = this_cpu_ptr(&raised_list);
- lazy = this_cpu_ptr(&lazy_list);
-
- - if (llist_empty(raised) || arch_irq_work_has_interrupt())
- - if (llist_empty(lazy))
- - return false;
- + if (llist_empty(raised) && llist_empty(lazy))
- + return false;
-
- /* All work should have been flushed before going offline */
- WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
- @@ -132,7 +144,7 @@ static void irq_work_run_list(struct llist_head *list)
- struct irq_work *work;
- struct llist_node *llnode;
-
- - BUG_ON(!irqs_disabled());
- + BUG_ON_NONRT(!irqs_disabled());
-
- if (llist_empty(list))
- return;
- @@ -169,7 +181,16 @@ static void irq_work_run_list(struct llist_head *list)
- void irq_work_run(void)
- {
- irq_work_run_list(this_cpu_ptr(&raised_list));
- - irq_work_run_list(this_cpu_ptr(&lazy_list));
- + if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
- + /*
- + * NOTE: we raise softirq via IPI for safety,
- + * and execute in irq_work_tick() to move the
- + * overhead from hard to soft irq context.
- + */
- + if (!llist_empty(this_cpu_ptr(&lazy_list)))
- + raise_softirq(TIMER_SOFTIRQ);
- + } else
- + irq_work_run_list(this_cpu_ptr(&lazy_list));
- }
- EXPORT_SYMBOL_GPL(irq_work_run);
-
- @@ -179,8 +200,17 @@ void irq_work_tick(void)
-
- if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
- irq_work_run_list(raised);
- +
- + if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
- + irq_work_run_list(this_cpu_ptr(&lazy_list));
- +}
- +
- +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
- +void irq_work_tick_soft(void)
- +{
- irq_work_run_list(this_cpu_ptr(&lazy_list));
- }
- +#endif
-
- /*
- * Synchronize against the irq_work @entry, ensures the entry is not
- diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
- index ee1bc1bb8feb..ddef07958840 100644
- --- a/kernel/ksysfs.c
- +++ b/kernel/ksysfs.c
- @@ -136,6 +136,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
-
- #endif /* CONFIG_KEXEC_CORE */
-
- +#if defined(CONFIG_PREEMPT_RT_FULL)
- +static ssize_t realtime_show(struct kobject *kobj,
- + struct kobj_attribute *attr, char *buf)
- +{
- + return sprintf(buf, "%d\n", 1);
- +}
- +KERNEL_ATTR_RO(realtime);
- +#endif
- +
- /* whether file capabilities are enabled */
- static ssize_t fscaps_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
- @@ -224,6 +233,9 @@ static struct attribute * kernel_attrs[] = {
- #ifndef CONFIG_TINY_RCU
- &rcu_expedited_attr.attr,
- &rcu_normal_attr.attr,
- +#endif
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + &realtime_attr.attr,
- #endif
- NULL
- };
- diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
- index 6f88e352cd4f..6ff9e8011dd0 100644
- --- a/kernel/locking/Makefile
- +++ b/kernel/locking/Makefile
- @@ -2,7 +2,7 @@
- # and is generally not a function of system call inputs.
- KCOV_INSTRUMENT := n
-
- -obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
- +obj-y += semaphore.o percpu-rwsem.o
-
- ifdef CONFIG_FUNCTION_TRACER
- CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
- @@ -11,7 +11,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
- CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
- endif
-
- +ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
- +obj-y += mutex.o
- obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
- +endif
- +obj-y += rwsem.o
- obj-$(CONFIG_LOCKDEP) += lockdep.o
- ifeq ($(CONFIG_PROC_FS),y)
- obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
- @@ -24,7 +28,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
- obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
- obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
- obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
- +ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
- obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
- obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
- +endif
- +obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o rwsem-rt.o
- obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
- obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
- diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
- index 6599c7f3071d..79f8e00e802e 100644
- --- a/kernel/locking/lockdep.c
- +++ b/kernel/locking/lockdep.c
- @@ -658,6 +658,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
- struct lockdep_subclass_key *key;
- struct hlist_head *hash_head;
- struct lock_class *class;
- + bool is_static = false;
-
- if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
- debug_locks_off();
- @@ -671,10 +672,23 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
-
- /*
- * Static locks do not have their class-keys yet - for them the key
- - * is the lock object itself:
- + * is the lock object itself. If the lock is in the per cpu area,
- + * the canonical address of the lock (per cpu offset removed) is
- + * used.
- */
- - if (unlikely(!lock->key))
- - lock->key = (void *)lock;
- + if (unlikely(!lock->key)) {
- + unsigned long can_addr, addr = (unsigned long)lock;
- +
- + if (__is_kernel_percpu_address(addr, &can_addr))
- + lock->key = (void *)can_addr;
- + else if (__is_module_percpu_address(addr, &can_addr))
- + lock->key = (void *)can_addr;
- + else if (static_obj(lock))
- + lock->key = (void *)lock;
- + else
- + return ERR_PTR(-EINVAL);
- + is_static = true;
- + }
-
- /*
- * NOTE: the class-key must be unique. For dynamic locks, a static
- @@ -706,7 +720,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
- }
- }
-
- - return NULL;
- + return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
- }
-
- /*
- @@ -724,19 +738,18 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
- DEBUG_LOCKS_WARN_ON(!irqs_disabled());
-
- class = look_up_lock_class(lock, subclass);
- - if (likely(class))
- + if (likely(!IS_ERR_OR_NULL(class)))
- goto out_set_class_cache;
-
- /*
- * Debug-check: all keys must be persistent!
- - */
- - if (!static_obj(lock->key)) {
- + */
- + if (IS_ERR(class)) {
- debug_locks_off();
- printk("INFO: trying to register non-static key.\n");
- printk("the code is fine but needs lockdep annotation.\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
- -
- return NULL;
- }
-
- @@ -3417,7 +3430,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
- * Clearly if the lock hasn't been acquired _ever_, we're not
- * holding it either, so report failure.
- */
- - if (!class)
- + if (IS_ERR_OR_NULL(class))
- return 0;
-
- /*
- @@ -3696,6 +3709,7 @@ static void check_flags(unsigned long flags)
- }
- }
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * We dont accurately track softirq state in e.g.
- * hardirq contexts (such as on 4KSTACKS), so only
- @@ -3710,6 +3724,7 @@ static void check_flags(unsigned long flags)
- DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
- }
- }
- +#endif
-
- if (!debug_locks)
- print_irqtrace_events(current);
- @@ -4166,7 +4181,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
- * If the class exists we look it up and zap it:
- */
- class = look_up_lock_class(lock, j);
- - if (class)
- + if (!IS_ERR_OR_NULL(class))
- zap_class(class);
- }
- /*
- diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
- index d3de04b12f8c..0f49abeae337 100644
- --- a/kernel/locking/locktorture.c
- +++ b/kernel/locking/locktorture.c
- @@ -26,7 +26,6 @@
- #include <linux/kthread.h>
- #include <linux/sched/rt.h>
- #include <linux/spinlock.h>
- -#include <linux/rwlock.h>
- #include <linux/mutex.h>
- #include <linux/rwsem.h>
- #include <linux/smp.h>
- diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
- index ce182599cf2e..2ad3a1e8344c 100644
- --- a/kernel/locking/percpu-rwsem.c
- +++ b/kernel/locking/percpu-rwsem.c
- @@ -18,7 +18,7 @@ int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
- /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
- rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
- __init_rwsem(&sem->rw_sem, name, rwsem_key);
- - init_waitqueue_head(&sem->writer);
- + init_swait_queue_head(&sem->writer);
- sem->readers_block = 0;
- return 0;
- }
- @@ -103,7 +103,7 @@ void __percpu_up_read(struct percpu_rw_semaphore *sem)
- __this_cpu_dec(*sem->read_count);
-
- /* Prod writer to recheck readers_active */
- - wake_up(&sem->writer);
- + swake_up(&sem->writer);
- }
- EXPORT_SYMBOL_GPL(__percpu_up_read);
-
- @@ -160,7 +160,7 @@ void percpu_down_write(struct percpu_rw_semaphore *sem)
- */
-
- /* Wait for all now active readers to complete. */
- - wait_event(sem->writer, readers_active_check(sem));
- + swait_event(sem->writer, readers_active_check(sem));
- }
- EXPORT_SYMBOL_GPL(percpu_down_write);
-
- diff --git a/kernel/locking/rt.c b/kernel/locking/rt.c
- new file mode 100644
- index 000000000000..6284e3b15091
- --- /dev/null
- +++ b/kernel/locking/rt.c
- @@ -0,0 +1,331 @@
- +/*
- + * kernel/rt.c
- + *
- + * Real-Time Preemption Support
- + *
- + * started by Ingo Molnar:
- + *
- + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
- + *
- + * historic credit for proving that Linux spinlocks can be implemented via
- + * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
- + * and others) who prototyped it on 2.4 and did lots of comparative
- + * research and analysis; TimeSys, for proving that you can implement a
- + * fully preemptible kernel via the use of IRQ threading and mutexes;
- + * Bill Huey for persuasively arguing on lkml that the mutex model is the
- + * right one; and to MontaVista, who ported pmutexes to 2.6.
- + *
- + * This code is a from-scratch implementation and is not based on pmutexes,
- + * but the idea of converting spinlocks to mutexes is used here too.
- + *
- + * lock debugging, locking tree, deadlock detection:
- + *
- + * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
- + * Released under the General Public License (GPL).
- + *
- + * Includes portions of the generic R/W semaphore implementation from:
- + *
- + * Copyright (c) 2001 David Howells (dhowells@redhat.com).
- + * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
- + * - Derived also from comments by Linus
- + *
- + * Pending ownership of locks and ownership stealing:
- + *
- + * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
- + *
- + * (also by Steven Rostedt)
- + * - Converted single pi_lock to individual task locks.
- + *
- + * By Esben Nielsen:
- + * Doing priority inheritance with help of the scheduler.
- + *
- + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
- + * - major rework based on Esben Nielsens initial patch
- + * - replaced thread_info references by task_struct refs
- + * - removed task->pending_owner dependency
- + * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
- + * in the scheduler return path as discussed with Steven Rostedt
- + *
- + * Copyright (C) 2006, Kihon Technologies Inc.
- + * Steven Rostedt <rostedt@goodmis.org>
- + * - debugged and patched Thomas Gleixner's rework.
- + * - added back the cmpxchg to the rework.
- + * - turned atomic require back on for SMP.
- + */
- +
- +#include <linux/spinlock.h>
- +#include <linux/rtmutex.h>
- +#include <linux/sched.h>
- +#include <linux/delay.h>
- +#include <linux/module.h>
- +#include <linux/kallsyms.h>
- +#include <linux/syscalls.h>
- +#include <linux/interrupt.h>
- +#include <linux/plist.h>
- +#include <linux/fs.h>
- +#include <linux/futex.h>
- +#include <linux/hrtimer.h>
- +
- +#include "rtmutex_common.h"
- +
- +/*
- + * struct mutex functions
- + */
- +void __mutex_do_init(struct mutex *mutex, const char *name,
- + struct lock_class_key *key)
- +{
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- + /*
- + * Make sure we are not reinitializing a held lock:
- + */
- + debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
- + lockdep_init_map(&mutex->dep_map, name, key, 0);
- +#endif
- + mutex->lock.save_state = 0;
- +}
- +EXPORT_SYMBOL(__mutex_do_init);
- +
- +void __lockfunc _mutex_lock(struct mutex *lock)
- +{
- + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- + rt_mutex_lock(&lock->lock);
- +}
- +EXPORT_SYMBOL(_mutex_lock);
- +
- +int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
- +{
- + int ret;
- +
- + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- + ret = rt_mutex_lock_interruptible(&lock->lock);
- + if (ret)
- + mutex_release(&lock->dep_map, 1, _RET_IP_);
- + return ret;
- +}
- +EXPORT_SYMBOL(_mutex_lock_interruptible);
- +
- +int __lockfunc _mutex_lock_killable(struct mutex *lock)
- +{
- + int ret;
- +
- + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- + ret = rt_mutex_lock_killable(&lock->lock);
- + if (ret)
- + mutex_release(&lock->dep_map, 1, _RET_IP_);
- + return ret;
- +}
- +EXPORT_SYMBOL(_mutex_lock_killable);
- +
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- +void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
- +{
- + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
- + rt_mutex_lock(&lock->lock);
- +}
- +EXPORT_SYMBOL(_mutex_lock_nested);
- +
- +void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
- +{
- + mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
- + rt_mutex_lock(&lock->lock);
- +}
- +EXPORT_SYMBOL(_mutex_lock_nest_lock);
- +
- +int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
- +{
- + int ret;
- +
- + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
- + ret = rt_mutex_lock_interruptible(&lock->lock);
- + if (ret)
- + mutex_release(&lock->dep_map, 1, _RET_IP_);
- + return ret;
- +}
- +EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
- +
- +int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
- +{
- + int ret;
- +
- + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- + ret = rt_mutex_lock_killable(&lock->lock);
- + if (ret)
- + mutex_release(&lock->dep_map, 1, _RET_IP_);
- + return ret;
- +}
- +EXPORT_SYMBOL(_mutex_lock_killable_nested);
- +#endif
- +
- +int __lockfunc _mutex_trylock(struct mutex *lock)
- +{
- + int ret = rt_mutex_trylock(&lock->lock);
- +
- + if (ret)
- + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- +
- + return ret;
- +}
- +EXPORT_SYMBOL(_mutex_trylock);
- +
- +void __lockfunc _mutex_unlock(struct mutex *lock)
- +{
- + mutex_release(&lock->dep_map, 1, _RET_IP_);
- + rt_mutex_unlock(&lock->lock);
- +}
- +EXPORT_SYMBOL(_mutex_unlock);
- +
- +/*
- + * rwlock_t functions
- + */
- +int __lockfunc rt_write_trylock(rwlock_t *rwlock)
- +{
- + int ret;
- +
- + migrate_disable();
- + ret = rt_mutex_trylock(&rwlock->lock);
- + if (ret)
- + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
- + else
- + migrate_enable();
- +
- + return ret;
- +}
- +EXPORT_SYMBOL(rt_write_trylock);
- +
- +int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
- +{
- + int ret;
- +
- + *flags = 0;
- + ret = rt_write_trylock(rwlock);
- + return ret;
- +}
- +EXPORT_SYMBOL(rt_write_trylock_irqsave);
- +
- +int __lockfunc rt_read_trylock(rwlock_t *rwlock)
- +{
- + struct rt_mutex *lock = &rwlock->lock;
- + int ret = 1;
- +
- + /*
- + * recursive read locks succeed when current owns the lock,
- + * but not when read_depth == 0 which means that the lock is
- + * write locked.
- + */
- + if (rt_mutex_owner(lock) != current) {
- + migrate_disable();
- + ret = rt_mutex_trylock(lock);
- + if (ret)
- + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
- + else
- + migrate_enable();
- +
- + } else if (!rwlock->read_depth) {
- + ret = 0;
- + }
- +
- + if (ret)
- + rwlock->read_depth++;
- +
- + return ret;
- +}
- +EXPORT_SYMBOL(rt_read_trylock);
- +
- +void __lockfunc rt_write_lock(rwlock_t *rwlock)
- +{
- + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
- + __rt_spin_lock(&rwlock->lock);
- +}
- +EXPORT_SYMBOL(rt_write_lock);
- +
- +void __lockfunc rt_read_lock(rwlock_t *rwlock)
- +{
- + struct rt_mutex *lock = &rwlock->lock;
- +
- +
- + /*
- + * recursive read locks succeed when current owns the lock
- + */
- + if (rt_mutex_owner(lock) != current) {
- + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
- + __rt_spin_lock(lock);
- + }
- + rwlock->read_depth++;
- +}
- +
- +EXPORT_SYMBOL(rt_read_lock);
- +
- +void __lockfunc rt_write_unlock(rwlock_t *rwlock)
- +{
- + /* NOTE: we always pass in '1' for nested, for simplicity */
- + rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
- + __rt_spin_unlock(&rwlock->lock);
- + migrate_enable();
- +}
- +EXPORT_SYMBOL(rt_write_unlock);
- +
- +void __lockfunc rt_read_unlock(rwlock_t *rwlock)
- +{
- + /* Release the lock only when read_depth is down to 0 */
- + if (--rwlock->read_depth == 0) {
- + rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
- + __rt_spin_unlock(&rwlock->lock);
- + migrate_enable();
- + }
- +}
- +EXPORT_SYMBOL(rt_read_unlock);
- +
- +unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
- +{
- + rt_write_lock(rwlock);
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(rt_write_lock_irqsave);
- +
- +unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
- +{
- + rt_read_lock(rwlock);
- +
- + return 0;
- +}
- +EXPORT_SYMBOL(rt_read_lock_irqsave);
- +
- +void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
- +{
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- + /*
- + * Make sure we are not reinitializing a held lock:
- + */
- + debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
- + lockdep_init_map(&rwlock->dep_map, name, key, 0);
- +#endif
- + rwlock->lock.save_state = 1;
- + rwlock->read_depth = 0;
- +}
- +EXPORT_SYMBOL(__rt_rwlock_init);
- +
- +/**
- + * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
- + * @cnt: the atomic which we are to dec
- + * @lock: the mutex to return holding if we dec to 0
- + *
- + * return true and hold lock if we dec to 0, return false otherwise
- + */
- +int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
- +{
- + /* dec if we can't possibly hit 0 */
- + if (atomic_add_unless(cnt, -1, 1))
- + return 0;
- + /* we might hit 0, so take the lock */
- + mutex_lock(lock);
- + if (!atomic_dec_and_test(cnt)) {
- + /* when we actually did the dec, we didn't hit 0 */
- + mutex_unlock(lock);
- + return 0;
- + }
- + /* we hit 0, and we hold the lock */
- + return 1;
- +}
- +EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
- diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
- index 62b6cee8ea7f..0613c4b1d059 100644
- --- a/kernel/locking/rtmutex-debug.c
- +++ b/kernel/locking/rtmutex-debug.c
- @@ -173,12 +173,3 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
- lock->name = name;
- }
-
- -void
- -rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task)
- -{
- -}
- -
- -void rt_mutex_deadlock_account_unlock(struct task_struct *task)
- -{
- -}
- -
- diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
- index d0519c3432b6..b585af9a1b50 100644
- --- a/kernel/locking/rtmutex-debug.h
- +++ b/kernel/locking/rtmutex-debug.h
- @@ -9,9 +9,6 @@
- * This file contains macros used solely by rtmutex.c. Debug version.
- */
-
- -extern void
- -rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
- -extern void rt_mutex_deadlock_account_unlock(struct task_struct *task);
- extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
- extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
- extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
- diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
- index 2c49d76f96c3..3a8b5d44aaf8 100644
- --- a/kernel/locking/rtmutex.c
- +++ b/kernel/locking/rtmutex.c
- @@ -7,6 +7,11 @@
- * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
- * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
- * Copyright (C) 2006 Esben Nielsen
- + * Adaptive Spinlocks:
- + * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
- + * and Peter Morreale,
- + * Adaptive Spinlocks simplification:
- + * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
- *
- * See Documentation/locking/rt-mutex-design.txt for details.
- */
- @@ -16,6 +21,8 @@
- #include <linux/sched/rt.h>
- #include <linux/sched/deadline.h>
- #include <linux/timer.h>
- +#include <linux/ww_mutex.h>
- +#include <linux/blkdev.h>
-
- #include "rtmutex_common.h"
-
- @@ -133,6 +140,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
- WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
- }
-
- +static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
- +{
- + return waiter && waiter != PI_WAKEUP_INPROGRESS &&
- + waiter != PI_REQUEUE_INPROGRESS;
- +}
- +
- /*
- * We can speed up the acquire/release, if there's no debugging state to be
- * set up.
- @@ -222,6 +235,12 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
- }
- #endif
-
- +/*
- + * Only use with rt_mutex_waiter_{less,equal}()
- + */
- +#define task_to_waiter(p) &(struct rt_mutex_waiter) \
- + { .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) }
- +
- static inline int
- rt_mutex_waiter_less(struct rt_mutex_waiter *left,
- struct rt_mutex_waiter *right)
- @@ -236,12 +255,51 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
- * then right waiter has a dl_prio() too.
- */
- if (dl_prio(left->prio))
- - return dl_time_before(left->task->dl.deadline,
- - right->task->dl.deadline);
- + return dl_time_before(left->deadline, right->deadline);
-
- return 0;
- }
-
- +static inline int
- +rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
- + struct rt_mutex_waiter *right)
- +{
- + if (left->prio != right->prio)
- + return 0;
- +
- + /*
- + * If both waiters have dl_prio(), we check the deadlines of the
- + * associated tasks.
- + * If left waiter has a dl_prio(), and we didn't return 0 above,
- + * then right waiter has a dl_prio() too.
- + */
- + if (dl_prio(left->prio))
- + return left->deadline == right->deadline;
- +
- + return 1;
- +}
- +
- +#define STEAL_NORMAL 0
- +#define STEAL_LATERAL 1
- +
- +static inline int
- +rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode)
- +{
- + struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
- +
- + if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter))
- + return 1;
- +
- + /*
- + * Note that RT tasks are excluded from lateral-steals
- + * to prevent the introduction of an unbounded latency.
- + */
- + if (mode == STEAL_NORMAL || rt_task(waiter->task))
- + return 0;
- +
- + return rt_mutex_waiter_equal(waiter, top_waiter);
- +}
- +
- static void
- rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
- {
- @@ -320,72 +378,16 @@ rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
- RB_CLEAR_NODE(&waiter->pi_tree_entry);
- }
-
- -/*
- - * Calculate task priority from the waiter tree priority
- - *
- - * Return task->normal_prio when the waiter tree is empty or when
- - * the waiter is not allowed to do priority boosting
- - */
- -int rt_mutex_getprio(struct task_struct *task)
- -{
- - if (likely(!task_has_pi_waiters(task)))
- - return task->normal_prio;
- -
- - return min(task_top_pi_waiter(task)->prio,
- - task->normal_prio);
- -}
- -
- -struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
- -{
- - if (likely(!task_has_pi_waiters(task)))
- - return NULL;
- -
- - return task_top_pi_waiter(task)->task;
- -}
- -
- -/*
- - * Called by sched_setscheduler() to get the priority which will be
- - * effective after the change.
- - */
- -int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
- -{
- - if (!task_has_pi_waiters(task))
- - return newprio;
- -
- - if (task_top_pi_waiter(task)->task->prio <= newprio)
- - return task_top_pi_waiter(task)->task->prio;
- - return newprio;
- -}
- -
- -/*
- - * Adjust the priority of a task, after its pi_waiters got modified.
- - *
- - * This can be both boosting and unboosting. task->pi_lock must be held.
- - */
- -static void __rt_mutex_adjust_prio(struct task_struct *task)
- +static void rt_mutex_adjust_prio(struct task_struct *p)
- {
- - int prio = rt_mutex_getprio(task);
- + struct task_struct *pi_task = NULL;
-
- - if (task->prio != prio || dl_prio(prio))
- - rt_mutex_setprio(task, prio);
- -}
- + lockdep_assert_held(&p->pi_lock);
-
- -/*
- - * Adjust task priority (undo boosting). Called from the exit path of
- - * rt_mutex_slowunlock() and rt_mutex_slowlock().
- - *
- - * (Note: We do this outside of the protection of lock->wait_lock to
- - * allow the lock to be taken while or before we readjust the priority
- - * of task. We do not use the spin_xx_mutex() variants here as we are
- - * outside of the debug path.)
- - */
- -void rt_mutex_adjust_prio(struct task_struct *task)
- -{
- - unsigned long flags;
- + if (task_has_pi_waiters(p))
- + pi_task = task_top_pi_waiter(p)->task;
-
- - raw_spin_lock_irqsave(&task->pi_lock, flags);
- - __rt_mutex_adjust_prio(task);
- - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
- + rt_mutex_setprio(p, pi_task);
- }
-
- /*
- @@ -414,6 +416,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
- return debug_rt_mutex_detect_deadlock(waiter, chwalk);
- }
-
- +static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
- +{
- + if (waiter->savestate)
- + wake_up_lock_sleeper(waiter->task);
- + else
- + wake_up_process(waiter->task);
- +}
- +
- /*
- * Max number of times we'll walk the boosting chain:
- */
- @@ -421,7 +431,8 @@ int max_lock_depth = 1024;
-
- static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
- {
- - return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
- + return rt_mutex_real_waiter(p->pi_blocked_on) ?
- + p->pi_blocked_on->lock : NULL;
- }
-
- /*
- @@ -557,7 +568,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
- * reached or the state of the chain has changed while we
- * dropped the locks.
- */
- - if (!waiter)
- + if (!rt_mutex_real_waiter(waiter))
- goto out_unlock_pi;
-
- /*
- @@ -608,7 +619,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
- * enabled we continue, but stop the requeueing in the chain
- * walk.
- */
- - if (waiter->prio == task->prio) {
- + if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
- if (!detect_deadlock)
- goto out_unlock_pi;
- else
- @@ -704,7 +715,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
-
- /* [7] Requeue the waiter in the lock waiter tree. */
- rt_mutex_dequeue(lock, waiter);
- +
- + /*
- + * Update the waiter prio fields now that we're dequeued.
- + *
- + * These values can have changed through either:
- + *
- + * sys_sched_set_scheduler() / sys_sched_setattr()
- + *
- + * or
- + *
- + * DL CBS enforcement advancing the effective deadline.
- + *
- + * Even though pi_waiters also uses these fields, and that tree is only
- + * updated in [11], we can do this here, since we hold [L], which
- + * serializes all pi_waiters access and rb_erase() does not care about
- + * the values of the node being removed.
- + */
- waiter->prio = task->prio;
- + waiter->deadline = task->dl.deadline;
- +
- rt_mutex_enqueue(lock, waiter);
-
- /* [8] Release the task */
- @@ -719,13 +749,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
- * follow here. This is the end of the chain we are walking.
- */
- if (!rt_mutex_owner(lock)) {
- + struct rt_mutex_waiter *lock_top_waiter;
- +
- /*
- * If the requeue [7] above changed the top waiter,
- * then we need to wake the new top waiter up to try
- * to get the lock.
- */
- - if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
- - wake_up_process(rt_mutex_top_waiter(lock)->task);
- + lock_top_waiter = rt_mutex_top_waiter(lock);
- + if (prerequeue_top_waiter != lock_top_waiter)
- + rt_mutex_wake_waiter(lock_top_waiter);
- raw_spin_unlock_irq(&lock->wait_lock);
- return 0;
- }
- @@ -745,7 +778,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
- */
- rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
- rt_mutex_enqueue_pi(task, waiter);
- - __rt_mutex_adjust_prio(task);
- + rt_mutex_adjust_prio(task);
-
- } else if (prerequeue_top_waiter == waiter) {
- /*
- @@ -761,7 +794,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
- rt_mutex_dequeue_pi(task, waiter);
- waiter = rt_mutex_top_waiter(lock);
- rt_mutex_enqueue_pi(task, waiter);
- - __rt_mutex_adjust_prio(task);
- + rt_mutex_adjust_prio(task);
- } else {
- /*
- * Nothing changed. No need to do any priority
- @@ -818,6 +851,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
- return ret;
- }
-
- +
- /*
- * Try to take an rt-mutex
- *
- @@ -827,10 +861,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
- * @task: The task which wants to acquire the lock
- * @waiter: The waiter that is queued to the lock's wait tree if the
- * callsite called task_blocked_on_lock(), otherwise NULL
- + * @mode: Lock steal mode (STEAL_NORMAL, STEAL_LATERAL)
- */
- -static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
- - struct rt_mutex_waiter *waiter)
- +static int __try_to_take_rt_mutex(struct rt_mutex *lock,
- + struct task_struct *task,
- + struct rt_mutex_waiter *waiter, int mode)
- {
- + lockdep_assert_held(&lock->wait_lock);
- +
- /*
- * Before testing whether we can acquire @lock, we set the
- * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
- @@ -863,12 +901,11 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
- */
- if (waiter) {
- /*
- - * If waiter is not the highest priority waiter of
- - * @lock, give up.
- + * If waiter is not the highest priority waiter of @lock,
- + * or its peer when lateral steal is allowed, give up.
- */
- - if (waiter != rt_mutex_top_waiter(lock))
- + if (!rt_mutex_steal(lock, waiter, mode))
- return 0;
- -
- /*
- * We can acquire the lock. Remove the waiter from the
- * lock waiters tree.
- @@ -886,13 +923,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
- */
- if (rt_mutex_has_waiters(lock)) {
- /*
- - * If @task->prio is greater than or equal to
- - * the top waiter priority (kernel view),
- - * @task lost.
- + * If @task->prio is greater than the top waiter
- + * priority (kernel view), or equal to it when a
- + * lateral steal is forbidden, @task lost.
- */
- - if (task->prio >= rt_mutex_top_waiter(lock)->prio)
- + if (!rt_mutex_steal(lock, task_to_waiter(task), mode))
- return 0;
- -
- /*
- * The current top waiter stays enqueued. We
- * don't have to change anything in the lock
- @@ -936,177 +972,589 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
- */
- rt_mutex_set_owner(lock, task);
-
- - rt_mutex_deadlock_account_lock(lock, task);
- -
- return 1;
- }
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- /*
- - * Task blocks on lock.
- - *
- - * Prepare waiter and propagate pi chain
- - *
- - * This must be called with lock->wait_lock held and interrupts disabled
- + * preemptible spin_lock functions:
- */
- -static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
- - struct rt_mutex_waiter *waiter,
- - struct task_struct *task,
- - enum rtmutex_chainwalk chwalk)
- +static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
- + void (*slowfn)(struct rt_mutex *lock,
- + bool mg_off),
- + bool do_mig_dis)
- {
- - struct task_struct *owner = rt_mutex_owner(lock);
- - struct rt_mutex_waiter *top_waiter = waiter;
- - struct rt_mutex *next_lock;
- - int chain_walk = 0, res;
- + might_sleep_no_state_check();
-
- - /*
- - * Early deadlock detection. We really don't want the task to
- - * enqueue on itself just to untangle the mess later. It's not
- - * only an optimization. We drop the locks, so another waiter
- - * can come in before the chain walk detects the deadlock. So
- - * the other will detect the deadlock and return -EDEADLOCK,
- - * which is wrong, as the other waiter is not in a deadlock
- - * situation.
- - */
- - if (owner == task)
- - return -EDEADLK;
- + if (do_mig_dis)
- + migrate_disable();
-
- - raw_spin_lock(&task->pi_lock);
- - __rt_mutex_adjust_prio(task);
- - waiter->task = task;
- - waiter->lock = lock;
- - waiter->prio = task->prio;
- + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
- + return;
- + else
- + slowfn(lock, do_mig_dis);
- +}
-
- - /* Get the top priority waiter on the lock */
- - if (rt_mutex_has_waiters(lock))
- - top_waiter = rt_mutex_top_waiter(lock);
- - rt_mutex_enqueue(lock, waiter);
- +static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
- + void (*slowfn)(struct rt_mutex *lock))
- +{
- + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
- + return;
- + else
- + slowfn(lock);
- +}
- +#ifdef CONFIG_SMP
- +/*
- + * Note that owner is a speculative pointer and dereferencing relies
- + * on rcu_read_lock() and the check against the lock owner.
- + */
- +static int adaptive_wait(struct rt_mutex *lock,
- + struct task_struct *owner)
- +{
- + int res = 0;
-
- - task->pi_blocked_on = waiter;
- + rcu_read_lock();
- + for (;;) {
- + if (owner != rt_mutex_owner(lock))
- + break;
- + /*
- + * Ensure that owner->on_cpu is dereferenced _after_
- + * checking the above to be valid.
- + */
- + barrier();
- + if (!owner->on_cpu) {
- + res = 1;
- + break;
- + }
- + cpu_relax();
- + }
- + rcu_read_unlock();
- + return res;
- +}
- +#else
- +static int adaptive_wait(struct rt_mutex *lock,
- + struct task_struct *orig_owner)
- +{
- + return 1;
- +}
- +#endif
-
- - raw_spin_unlock(&task->pi_lock);
- +static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
- + struct rt_mutex_waiter *waiter,
- + struct task_struct *task,
- + enum rtmutex_chainwalk chwalk);
- +/*
- + * Slow path lock function spin_lock style: this variant is very
- + * careful not to miss any non-lock wakeups.
- + *
- + * We store the current state under p->pi_lock in p->saved_state and
- + * the try_to_wake_up() code handles this accordingly.
- + */
- +static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock,
- + bool mg_off)
- +{
- + struct task_struct *lock_owner, *self = current;
- + struct rt_mutex_waiter waiter, *top_waiter;
- + unsigned long flags;
- + int ret;
-
- - if (!owner)
- - return 0;
- + rt_mutex_init_waiter(&waiter, true);
-
- - raw_spin_lock(&owner->pi_lock);
- - if (waiter == rt_mutex_top_waiter(lock)) {
- - rt_mutex_dequeue_pi(owner, top_waiter);
- - rt_mutex_enqueue_pi(owner, waiter);
- + raw_spin_lock_irqsave(&lock->wait_lock, flags);
-
- - __rt_mutex_adjust_prio(owner);
- - if (owner->pi_blocked_on)
- - chain_walk = 1;
- - } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
- - chain_walk = 1;
- + if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
- + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
- + return;
- }
-
- - /* Store the lock on which owner is blocked or NULL */
- - next_lock = task_blocked_on_lock(owner);
- + BUG_ON(rt_mutex_owner(lock) == self);
-
- - raw_spin_unlock(&owner->pi_lock);
- /*
- - * Even if full deadlock detection is on, if the owner is not
- - * blocked itself, we can avoid finding this out in the chain
- - * walk.
- + * We save whatever state the task is in and we'll restore it
- + * after acquiring the lock taking real wakeups into account
- + * as well. We are serialized via pi_lock against wakeups. See
- + * try_to_wake_up().
- */
- - if (!chain_walk || !next_lock)
- - return 0;
- + raw_spin_lock(&self->pi_lock);
- + self->saved_state = self->state;
- + __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
- + raw_spin_unlock(&self->pi_lock);
-
- - /*
- - * The owner can't disappear while holding a lock,
- - * so the owner struct is protected by wait_lock.
- - * Gets dropped in rt_mutex_adjust_prio_chain()!
- - */
- - get_task_struct(owner);
- + ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
- + BUG_ON(ret);
-
- - raw_spin_unlock_irq(&lock->wait_lock);
- + for (;;) {
- + /* Try to acquire the lock again. */
- + if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
- + break;
-
- - res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
- - next_lock, waiter, task);
- + top_waiter = rt_mutex_top_waiter(lock);
- + lock_owner = rt_mutex_owner(lock);
-
- - raw_spin_lock_irq(&lock->wait_lock);
- + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
- - return res;
- -}
- + debug_rt_mutex_print_deadlock(&waiter);
-
- -/*
- - * Remove the top waiter from the current tasks pi waiter tree and
- - * queue it up.
- - *
- - * Called with lock->wait_lock held and interrupts disabled.
- - */
- -static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
- - struct rt_mutex *lock)
- -{
- - struct rt_mutex_waiter *waiter;
- + if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) {
- + if (mg_off)
- + migrate_enable();
- + schedule();
- + if (mg_off)
- + migrate_disable();
- + }
-
- - raw_spin_lock(¤t->pi_lock);
- + raw_spin_lock_irqsave(&lock->wait_lock, flags);
-
- - waiter = rt_mutex_top_waiter(lock);
- + raw_spin_lock(&self->pi_lock);
- + __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
- + raw_spin_unlock(&self->pi_lock);
- + }
-
- /*
- - * Remove it from current->pi_waiters. We do not adjust a
- - * possible priority boost right now. We execute wakeup in the
- - * boosted mode and go back to normal after releasing
- - * lock->wait_lock.
- + * Restore the task state to current->saved_state. We set it
- + * to the original state above and the try_to_wake_up() code
- + * has possibly updated it when a real (non-rtmutex) wakeup
- + * happened while we were blocked. Clear saved_state so
- + * try_to_wakeup() does not get confused.
- */
- - rt_mutex_dequeue_pi(current, waiter);
- + raw_spin_lock(&self->pi_lock);
- + __set_current_state_no_track(self->saved_state);
- + self->saved_state = TASK_RUNNING;
- + raw_spin_unlock(&self->pi_lock);
-
- /*
- - * As we are waking up the top waiter, and the waiter stays
- - * queued on the lock until it gets the lock, this lock
- - * obviously has waiters. Just set the bit here and this has
- - * the added benefit of forcing all new tasks into the
- - * slow path making sure no task of lower priority than
- - * the top waiter can steal this lock.
- + * try_to_take_rt_mutex() sets the waiter bit
- + * unconditionally. We might have to fix that up:
- */
- - lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
- + fixup_rt_mutex_waiters(lock);
-
- - raw_spin_unlock(¤t->pi_lock);
- + BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
- + BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
-
- - wake_q_add(wake_q, waiter->task);
- + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
- +
- + debug_rt_mutex_free_waiter(&waiter);
- }
-
- +static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
- + struct wake_q_head *wake_q,
- + struct wake_q_head *wq_sleeper);
- /*
- - * Remove a waiter from a lock and give up
- - *
- - * Must be called with lock->wait_lock held and interrupts disabled. I must
- - * have just failed to try_to_take_rt_mutex().
- + * Slow path to release a rt_mutex spin_lock style
- */
- -static void remove_waiter(struct rt_mutex *lock,
- - struct rt_mutex_waiter *waiter)
- +static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
- {
- - bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
- - struct task_struct *owner = rt_mutex_owner(lock);
- - struct rt_mutex *next_lock;
- + unsigned long flags;
- + WAKE_Q(wake_q);
- + WAKE_Q(wake_sleeper_q);
- + bool postunlock;
-
- - raw_spin_lock(¤t->pi_lock);
- - rt_mutex_dequeue(lock, waiter);
- - current->pi_blocked_on = NULL;
- - raw_spin_unlock(¤t->pi_lock);
- + raw_spin_lock_irqsave(&lock->wait_lock, flags);
- + postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q);
- + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
- - /*
- - * Only update priority if the waiter was the highest priority
- - * waiter of the lock and there is an owner to update.
- - */
- - if (!owner || !is_top_waiter)
- - return;
- + if (postunlock)
- + rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
- +}
-
- - raw_spin_lock(&owner->pi_lock);
- +void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
- +{
- + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false);
- + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- +}
- +EXPORT_SYMBOL(rt_spin_lock__no_mg);
-
- - rt_mutex_dequeue_pi(owner, waiter);
- +void __lockfunc rt_spin_lock(spinlock_t *lock)
- +{
- + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
- + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- +}
- +EXPORT_SYMBOL(rt_spin_lock);
-
- - if (rt_mutex_has_waiters(lock))
- - rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
- +void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
- +{
- + rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true);
- +}
- +EXPORT_SYMBOL(__rt_spin_lock);
-
- - __rt_mutex_adjust_prio(owner);
- +void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
- +{
- + rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false);
- +}
- +EXPORT_SYMBOL(__rt_spin_lock__no_mg);
-
- - /* Store the lock on which owner is blocked or NULL */
- - next_lock = task_blocked_on_lock(owner);
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- +void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
- +{
- + spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
- +}
- +EXPORT_SYMBOL(rt_spin_lock_nested);
- +#endif
-
- - raw_spin_unlock(&owner->pi_lock);
- +void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock)
- +{
- + /* NOTE: we always pass in '1' for nested, for simplicity */
- + spin_release(&lock->dep_map, 1, _RET_IP_);
- + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
- +}
- +EXPORT_SYMBOL(rt_spin_unlock__no_mg);
-
- - /*
- +void __lockfunc rt_spin_unlock(spinlock_t *lock)
- +{
- + /* NOTE: we always pass in '1' for nested, for simplicity */
- + spin_release(&lock->dep_map, 1, _RET_IP_);
- + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
- + migrate_enable();
- +}
- +EXPORT_SYMBOL(rt_spin_unlock);
- +
- +void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
- +{
- + rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
- +}
- +EXPORT_SYMBOL(__rt_spin_unlock);
- +
- +/*
- + * Wait for the lock to get unlocked: instead of polling for an unlock
- + * (like raw spinlocks do), we lock and unlock, to force the kernel to
- + * schedule if there's contention:
- + */
- +void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
- +{
- + spin_lock(lock);
- + spin_unlock(lock);
- +}
- +EXPORT_SYMBOL(rt_spin_unlock_wait);
- +
- +int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
- +{
- + int ret;
- +
- + ret = rt_mutex_trylock(&lock->lock);
- + if (ret)
- + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- + return ret;
- +}
- +EXPORT_SYMBOL(rt_spin_trylock__no_mg);
- +
- +int __lockfunc rt_spin_trylock(spinlock_t *lock)
- +{
- + int ret;
- +
- + migrate_disable();
- + ret = rt_mutex_trylock(&lock->lock);
- + if (ret)
- + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- + else
- + migrate_enable();
- + return ret;
- +}
- +EXPORT_SYMBOL(rt_spin_trylock);
- +
- +int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
- +{
- + int ret;
- +
- + local_bh_disable();
- + ret = rt_mutex_trylock(&lock->lock);
- + if (ret) {
- + migrate_disable();
- + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- + } else
- + local_bh_enable();
- + return ret;
- +}
- +EXPORT_SYMBOL(rt_spin_trylock_bh);
- +
- +int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
- +{
- + int ret;
- +
- + *flags = 0;
- + ret = rt_mutex_trylock(&lock->lock);
- + if (ret) {
- + migrate_disable();
- + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- + }
- + return ret;
- +}
- +EXPORT_SYMBOL(rt_spin_trylock_irqsave);
- +
- +int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
- +{
- + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
- + if (atomic_add_unless(atomic, -1, 1))
- + return 0;
- + rt_spin_lock(lock);
- + if (atomic_dec_and_test(atomic))
- + return 1;
- + rt_spin_unlock(lock);
- + return 0;
- +}
- +EXPORT_SYMBOL(atomic_dec_and_spin_lock);
- +
- + void
- +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
- +{
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- + /*
- + * Make sure we are not reinitializing a held lock:
- + */
- + debug_check_no_locks_freed((void *)lock, sizeof(*lock));
- + lockdep_init_map(&lock->dep_map, name, key, 0);
- +#endif
- +}
- +EXPORT_SYMBOL(__rt_spin_lock_init);
- +
- +#endif /* PREEMPT_RT_FULL */
- +
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + static inline int __sched
- +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
- +{
- + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
- + struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
- +
- + if (!hold_ctx)
- + return 0;
- +
- + if (unlikely(ctx == hold_ctx))
- + return -EALREADY;
- +
- + if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
- + (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
- +#ifdef CONFIG_DEBUG_MUTEXES
- + DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
- + ctx->contending_lock = ww;
- +#endif
- + return -EDEADLK;
- + }
- +
- + return 0;
- +}
- +#else
- + static inline int __sched
- +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
- +{
- + BUG();
- + return 0;
- +}
- +
- +#endif
- +
- +static inline int
- +try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
- + struct rt_mutex_waiter *waiter)
- +{
- + return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
- +}
- +
- +/*
- + * Task blocks on lock.
- + *
- + * Prepare waiter and propagate pi chain
- + *
- + * This must be called with lock->wait_lock held and interrupts disabled
- + */
- +static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
- + struct rt_mutex_waiter *waiter,
- + struct task_struct *task,
- + enum rtmutex_chainwalk chwalk)
- +{
- + struct task_struct *owner = rt_mutex_owner(lock);
- + struct rt_mutex_waiter *top_waiter = waiter;
- + struct rt_mutex *next_lock;
- + int chain_walk = 0, res;
- +
- + lockdep_assert_held(&lock->wait_lock);
- +
- + /*
- + * Early deadlock detection. We really don't want the task to
- + * enqueue on itself just to untangle the mess later. It's not
- + * only an optimization. We drop the locks, so another waiter
- + * can come in before the chain walk detects the deadlock. So
- + * the other will detect the deadlock and return -EDEADLOCK,
- + * which is wrong, as the other waiter is not in a deadlock
- + * situation.
- + */
- + if (owner == task)
- + return -EDEADLK;
- +
- + raw_spin_lock(&task->pi_lock);
- +
- + /*
- + * In the case of futex requeue PI, this will be a proxy
- + * lock. The task will wake unaware that it is enqueueed on
- + * this lock. Avoid blocking on two locks and corrupting
- + * pi_blocked_on via the PI_WAKEUP_INPROGRESS
- + * flag. futex_wait_requeue_pi() sets this when it wakes up
- + * before requeue (due to a signal or timeout). Do not enqueue
- + * the task if PI_WAKEUP_INPROGRESS is set.
- + */
- + if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
- + raw_spin_unlock(&task->pi_lock);
- + return -EAGAIN;
- + }
- +
- + BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
- +
- + rt_mutex_adjust_prio(task);
- + waiter->task = task;
- + waiter->lock = lock;
- + waiter->prio = task->prio;
- + waiter->deadline = task->dl.deadline;
- +
- + /* Get the top priority waiter on the lock */
- + if (rt_mutex_has_waiters(lock))
- + top_waiter = rt_mutex_top_waiter(lock);
- + rt_mutex_enqueue(lock, waiter);
- +
- + task->pi_blocked_on = waiter;
- +
- + raw_spin_unlock(&task->pi_lock);
- +
- + if (!owner)
- + return 0;
- +
- + raw_spin_lock(&owner->pi_lock);
- + if (waiter == rt_mutex_top_waiter(lock)) {
- + rt_mutex_dequeue_pi(owner, top_waiter);
- + rt_mutex_enqueue_pi(owner, waiter);
- +
- + rt_mutex_adjust_prio(owner);
- + if (rt_mutex_real_waiter(owner->pi_blocked_on))
- + chain_walk = 1;
- + } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
- + chain_walk = 1;
- + }
- +
- + /* Store the lock on which owner is blocked or NULL */
- + next_lock = task_blocked_on_lock(owner);
- +
- + raw_spin_unlock(&owner->pi_lock);
- + /*
- + * Even if full deadlock detection is on, if the owner is not
- + * blocked itself, we can avoid finding this out in the chain
- + * walk.
- + */
- + if (!chain_walk || !next_lock)
- + return 0;
- +
- + /*
- + * The owner can't disappear while holding a lock,
- + * so the owner struct is protected by wait_lock.
- + * Gets dropped in rt_mutex_adjust_prio_chain()!
- + */
- + get_task_struct(owner);
- +
- + raw_spin_unlock_irq(&lock->wait_lock);
- +
- + res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
- + next_lock, waiter, task);
- +
- + raw_spin_lock_irq(&lock->wait_lock);
- +
- + return res;
- +}
- +
- +/*
- + * Remove the top waiter from the current tasks pi waiter tree and
- + * queue it up.
- + *
- + * Called with lock->wait_lock held and interrupts disabled.
- + */
- +static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
- + struct wake_q_head *wake_sleeper_q,
- + struct rt_mutex *lock)
- +{
- + struct rt_mutex_waiter *waiter;
- +
- + raw_spin_lock(¤t->pi_lock);
- +
- + waiter = rt_mutex_top_waiter(lock);
- +
- + /*
- + * Remove it from current->pi_waiters and deboost.
- + *
- + * We must in fact deboost here in order to ensure we call
- + * rt_mutex_setprio() to update p->pi_top_task before the
- + * task unblocks.
- + */
- + rt_mutex_dequeue_pi(current, waiter);
- + rt_mutex_adjust_prio(current);
- +
- + /*
- + * As we are waking up the top waiter, and the waiter stays
- + * queued on the lock until it gets the lock, this lock
- + * obviously has waiters. Just set the bit here and this has
- + * the added benefit of forcing all new tasks into the
- + * slow path making sure no task of lower priority than
- + * the top waiter can steal this lock.
- + */
- + lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
- +
- + /*
- + * We deboosted before waking the top waiter task such that we don't
- + * run two tasks with the 'same' priority (and ensure the
- + * p->pi_top_task pointer points to a blocked task). This however can
- + * lead to priority inversion if we would get preempted after the
- + * deboost but before waking our donor task, hence the preempt_disable()
- + * before unlock.
- + *
- + * Pairs with preempt_enable() in rt_mutex_postunlock();
- + */
- + preempt_disable();
- + if (waiter->savestate)
- + wake_q_add_sleeper(wake_sleeper_q, waiter->task);
- + else
- + wake_q_add(wake_q, waiter->task);
- + raw_spin_unlock(¤t->pi_lock);
- +}
- +
- +/*
- + * Remove a waiter from a lock and give up
- + *
- + * Must be called with lock->wait_lock held and interrupts disabled. I must
- + * have just failed to try_to_take_rt_mutex().
- + */
- +static void remove_waiter(struct rt_mutex *lock,
- + struct rt_mutex_waiter *waiter)
- +{
- + bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
- + struct task_struct *owner = rt_mutex_owner(lock);
- + struct rt_mutex *next_lock = NULL;
- +
- + lockdep_assert_held(&lock->wait_lock);
- +
- + raw_spin_lock(¤t->pi_lock);
- + rt_mutex_dequeue(lock, waiter);
- + current->pi_blocked_on = NULL;
- + raw_spin_unlock(¤t->pi_lock);
- +
- + /*
- + * Only update priority if the waiter was the highest priority
- + * waiter of the lock and there is an owner to update.
- + */
- + if (!owner || !is_top_waiter)
- + return;
- +
- + raw_spin_lock(&owner->pi_lock);
- +
- + rt_mutex_dequeue_pi(owner, waiter);
- +
- + if (rt_mutex_has_waiters(lock))
- + rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
- +
- + rt_mutex_adjust_prio(owner);
- +
- + /* Store the lock on which owner is blocked or NULL */
- + if (rt_mutex_real_waiter(owner->pi_blocked_on))
- + next_lock = task_blocked_on_lock(owner);
- +
- + raw_spin_unlock(&owner->pi_lock);
- +
- + /*
- * Don't walk the chain, if the owner task is not blocked
- * itself.
- */
- @@ -1138,21 +1586,30 @@ void rt_mutex_adjust_pi(struct task_struct *task)
- raw_spin_lock_irqsave(&task->pi_lock, flags);
-
- waiter = task->pi_blocked_on;
- - if (!waiter || (waiter->prio == task->prio &&
- - !dl_prio(task->prio))) {
- + if (!rt_mutex_real_waiter(waiter) ||
- + rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
- return;
- }
- next_lock = waiter->lock;
- - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-
- /* gets dropped in rt_mutex_adjust_prio_chain()! */
- get_task_struct(task);
-
- + raw_spin_unlock_irqrestore(&task->pi_lock, flags);
- rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
- next_lock, NULL, task);
- }
-
- +void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
- +{
- + debug_rt_mutex_init_waiter(waiter);
- + RB_CLEAR_NODE(&waiter->pi_tree_entry);
- + RB_CLEAR_NODE(&waiter->tree_entry);
- + waiter->task = NULL;
- + waiter->savestate = savestate;
- +}
- +
- /**
- * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
- * @lock: the rt_mutex to take
- @@ -1166,7 +1623,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
- static int __sched
- __rt_mutex_slowlock(struct rt_mutex *lock, int state,
- struct hrtimer_sleeper *timeout,
- - struct rt_mutex_waiter *waiter)
- + struct rt_mutex_waiter *waiter,
- + struct ww_acquire_ctx *ww_ctx)
- {
- int ret = 0;
-
- @@ -1175,16 +1633,17 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
- if (try_to_take_rt_mutex(lock, current, waiter))
- break;
-
- - /*
- - * TASK_INTERRUPTIBLE checks for signals and
- - * timeout. Ignored otherwise.
- - */
- - if (unlikely(state == TASK_INTERRUPTIBLE)) {
- - /* Signal pending? */
- - if (signal_pending(current))
- - ret = -EINTR;
- - if (timeout && !timeout->task)
- - ret = -ETIMEDOUT;
- + if (timeout && !timeout->task) {
- + ret = -ETIMEDOUT;
- + break;
- + }
- + if (signal_pending_state(state, current)) {
- + ret = -EINTR;
- + break;
- + }
- +
- + if (ww_ctx && ww_ctx->acquired > 0) {
- + ret = __mutex_lock_check_stamp(lock, ww_ctx);
- if (ret)
- break;
- }
- @@ -1223,35 +1682,94 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
- }
- }
-
- -/*
- - * Slow path lock function:
- - */
- -static int __sched
- -rt_mutex_slowlock(struct rt_mutex *lock, int state,
- - struct hrtimer_sleeper *timeout,
- - enum rtmutex_chainwalk chwalk)
- +static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
- + struct ww_acquire_ctx *ww_ctx)
- +{
- +#ifdef CONFIG_DEBUG_MUTEXES
- + /*
- + * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
- + * but released with a normal mutex_unlock in this call.
- + *
- + * This should never happen, always use ww_mutex_unlock.
- + */
- + DEBUG_LOCKS_WARN_ON(ww->ctx);
- +
- + /*
- + * Not quite done after calling ww_acquire_done() ?
- + */
- + DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
- +
- + if (ww_ctx->contending_lock) {
- + /*
- + * After -EDEADLK you tried to
- + * acquire a different ww_mutex? Bad!
- + */
- + DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
- +
- + /*
- + * You called ww_mutex_lock after receiving -EDEADLK,
- + * but 'forgot' to unlock everything else first?
- + */
- + DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
- + ww_ctx->contending_lock = NULL;
- + }
- +
- + /*
- + * Naughty, using a different class will lead to undefined behavior!
- + */
- + DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
- +#endif
- + ww_ctx->acquired++;
- +}
- +
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +static void ww_mutex_account_lock(struct rt_mutex *lock,
- + struct ww_acquire_ctx *ww_ctx)
- {
- - struct rt_mutex_waiter waiter;
- - unsigned long flags;
- - int ret = 0;
- + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
- + struct rt_mutex_waiter *waiter, *n;
-
- - debug_rt_mutex_init_waiter(&waiter);
- - RB_CLEAR_NODE(&waiter.pi_tree_entry);
- - RB_CLEAR_NODE(&waiter.tree_entry);
- + /*
- + * This branch gets optimized out for the common case,
- + * and is only important for ww_mutex_lock.
- + */
- + ww_mutex_lock_acquired(ww, ww_ctx);
- + ww->ctx = ww_ctx;
-
- /*
- - * Technically we could use raw_spin_[un]lock_irq() here, but this can
- - * be called in early boot if the cmpxchg() fast path is disabled
- - * (debug, no architecture support). In this case we will acquire the
- - * rtmutex with lock->wait_lock held. But we cannot unconditionally
- - * enable interrupts in that early boot case. So we need to use the
- - * irqsave/restore variants.
- + * Give any possible sleeping processes the chance to wake up,
- + * so they can recheck if they have to back off.
- */
- - raw_spin_lock_irqsave(&lock->wait_lock, flags);
- + rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
- + tree_entry) {
- + /* XXX debug rt mutex waiter wakeup */
- +
- + BUG_ON(waiter->lock != lock);
- + rt_mutex_wake_waiter(waiter);
- + }
- +}
- +
- +#else
- +
- +static void ww_mutex_account_lock(struct rt_mutex *lock,
- + struct ww_acquire_ctx *ww_ctx)
- +{
- + BUG();
- +}
- +#endif
- +
- +int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
- + struct hrtimer_sleeper *timeout,
- + enum rtmutex_chainwalk chwalk,
- + struct ww_acquire_ctx *ww_ctx,
- + struct rt_mutex_waiter *waiter)
- +{
- + int ret;
-
- /* Try to acquire the lock again: */
- if (try_to_take_rt_mutex(lock, current, NULL)) {
- - raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
- + if (ww_ctx)
- + ww_mutex_account_lock(lock, ww_ctx);
- return 0;
- }
-
- @@ -1261,17 +1779,27 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
- if (unlikely(timeout))
- hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
-
- - ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
- + ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
-
- - if (likely(!ret))
- + if (likely(!ret)) {
- /* sleep on the mutex */
- - ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
- + ret = __rt_mutex_slowlock(lock, state, timeout, waiter,
- + ww_ctx);
- + } else if (ww_ctx) {
- + /* ww_mutex received EDEADLK, let it become EALREADY */
- + ret = __mutex_lock_check_stamp(lock, ww_ctx);
- + BUG_ON(!ret);
- + }
-
- if (unlikely(ret)) {
- __set_current_state(TASK_RUNNING);
- if (rt_mutex_has_waiters(lock))
- - remove_waiter(lock, &waiter);
- - rt_mutex_handle_deadlock(ret, chwalk, &waiter);
- + remove_waiter(lock, waiter);
- + /* ww_mutex want to report EDEADLK/EALREADY, let them */
- + if (!ww_ctx)
- + rt_mutex_handle_deadlock(ret, chwalk, waiter);
- + } else if (ww_ctx) {
- + ww_mutex_account_lock(lock, ww_ctx);
- }
-
- /*
- @@ -1279,6 +1807,36 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
- * unconditionally. We might have to fix that up.
- */
- fixup_rt_mutex_waiters(lock);
- + return ret;
- +}
- +
- +/*
- + * Slow path lock function:
- + */
- +static int __sched
- +rt_mutex_slowlock(struct rt_mutex *lock, int state,
- + struct hrtimer_sleeper *timeout,
- + enum rtmutex_chainwalk chwalk,
- + struct ww_acquire_ctx *ww_ctx)
- +{
- + struct rt_mutex_waiter waiter;
- + unsigned long flags;
- + int ret = 0;
- +
- + rt_mutex_init_waiter(&waiter, false);
- +
- + /*
- + * Technically we could use raw_spin_[un]lock_irq() here, but this can
- + * be called in early boot if the cmpxchg() fast path is disabled
- + * (debug, no architecture support). In this case we will acquire the
- + * rtmutex with lock->wait_lock held. But we cannot unconditionally
- + * enable interrupts in that early boot case. So we need to use the
- + * irqsave/restore variants.
- + */
- + raw_spin_lock_irqsave(&lock->wait_lock, flags);
- +
- + ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx,
- + &waiter);
-
- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
- @@ -1328,10 +1886,12 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
-
- /*
- * Slow path to release a rt-mutex.
- - * Return whether the current task needs to undo a potential priority boosting.
- + *
- + * Return whether the current task needs to call rt_mutex_postunlock().
- */
- static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
- - struct wake_q_head *wake_q)
- + struct wake_q_head *wake_q,
- + struct wake_q_head *wake_sleeper_q)
- {
- unsigned long flags;
-
- @@ -1340,8 +1900,6 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
-
- debug_rt_mutex_unlock(lock);
-
- - rt_mutex_deadlock_account_unlock(current);
- -
- /*
- * We must be careful here if the fast path is enabled. If we
- * have no waiters queued we cannot set owner to NULL here
- @@ -1387,12 +1945,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
- *
- * Queue the next waiter for wakeup once we release the wait_lock.
- */
- - mark_wakeup_next_waiter(wake_q, lock);
- -
- + mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
- - /* check PI boosting */
- - return true;
- + return true; /* call rt_mutex_postunlock() */
- }
-
- /*
- @@ -1403,63 +1959,97 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
- */
- static inline int
- rt_mutex_fastlock(struct rt_mutex *lock, int state,
- + struct ww_acquire_ctx *ww_ctx,
- int (*slowfn)(struct rt_mutex *lock, int state,
- struct hrtimer_sleeper *timeout,
- - enum rtmutex_chainwalk chwalk))
- + enum rtmutex_chainwalk chwalk,
- + struct ww_acquire_ctx *ww_ctx))
- {
- - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
- - rt_mutex_deadlock_account_lock(lock, current);
- + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
- return 0;
- - } else
- - return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
- +
- + /*
- + * If rt_mutex blocks, the function sched_submit_work will not call
- + * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true).
- + * We must call blk_schedule_flush_plug here, if we don't call it,
- + * a deadlock in device mapper may happen.
- + */
- + if (unlikely(blk_needs_flush_plug(current)))
- + blk_schedule_flush_plug(current);
- +
- + return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
- }
-
- static inline int
- rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
- struct hrtimer_sleeper *timeout,
- enum rtmutex_chainwalk chwalk,
- + struct ww_acquire_ctx *ww_ctx,
- int (*slowfn)(struct rt_mutex *lock, int state,
- struct hrtimer_sleeper *timeout,
- - enum rtmutex_chainwalk chwalk))
- + enum rtmutex_chainwalk chwalk,
- + struct ww_acquire_ctx *ww_ctx))
- {
- if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
- - likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
- - rt_mutex_deadlock_account_lock(lock, current);
- + likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
- return 0;
- - } else
- - return slowfn(lock, state, timeout, chwalk);
- +
- + if (unlikely(blk_needs_flush_plug(current)))
- + blk_schedule_flush_plug(current);
- +
- + return slowfn(lock, state, timeout, chwalk, ww_ctx);
- }
-
- static inline int
- rt_mutex_fasttrylock(struct rt_mutex *lock,
- int (*slowfn)(struct rt_mutex *lock))
- {
- - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
- - rt_mutex_deadlock_account_lock(lock, current);
- + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
- return 1;
- - }
- +
- return slowfn(lock);
- }
-
- +/*
- + * Performs the wakeup of the the top-waiter and re-enables preemption.
- + */
- +void rt_mutex_postunlock(struct wake_q_head *wake_q,
- + struct wake_q_head *wq_sleeper)
- +{
- + wake_up_q(wake_q);
- + wake_up_q_sleeper(wq_sleeper);
- +
- + /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
- + preempt_enable();
- +}
- +
- static inline void
- rt_mutex_fastunlock(struct rt_mutex *lock,
- bool (*slowfn)(struct rt_mutex *lock,
- - struct wake_q_head *wqh))
- + struct wake_q_head *wqh,
- + struct wake_q_head *wq_sleeper))
- {
- WAKE_Q(wake_q);
- + WAKE_Q(wake_sleeper_q);
-
- - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
- - rt_mutex_deadlock_account_unlock(current);
- + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
- + return;
-
- - } else {
- - bool deboost = slowfn(lock, &wake_q);
- + if (slowfn(lock, &wake_q, &wake_sleeper_q))
- + rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
- +}
-
- - wake_up_q(&wake_q);
- +/**
- + * rt_mutex_lock_state - lock a rt_mutex with a given state
- + *
- + * @lock: The rt_mutex to be locked
- + * @state: The state to set when blocking on the rt_mutex
- + */
- +int __sched rt_mutex_lock_state(struct rt_mutex *lock, int state)
- +{
- + might_sleep();
-
- - /* Undo pi boosting if necessary: */
- - if (deboost)
- - rt_mutex_adjust_prio(current);
- - }
- + return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock);
- }
-
- /**
- @@ -1469,15 +2059,13 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
- */
- void __sched rt_mutex_lock(struct rt_mutex *lock)
- {
- - might_sleep();
- -
- - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
- + rt_mutex_lock_state(lock, TASK_UNINTERRUPTIBLE);
- }
- EXPORT_SYMBOL_GPL(rt_mutex_lock);
-
- /**
- * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
- - *
- + **
- * @lock: the rt_mutex to be locked
- *
- * Returns:
- @@ -1486,23 +2074,32 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
- */
- int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
- {
- - might_sleep();
- -
- - return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
- + return rt_mutex_lock_state(lock, TASK_INTERRUPTIBLE);
- }
- EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-
- -/*
- - * Futex variant with full deadlock detection.
- +/**
- + * rt_mutex_lock_killable - lock a rt_mutex killable
- + *
- + * @lock: the rt_mutex to be locked
- + * @detect_deadlock: deadlock detection on/off
- + *
- + * Returns:
- + * 0 on success
- + * -EINTR when interrupted by a signal
- */
- -int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
- - struct hrtimer_sleeper *timeout)
- +int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
- {
- - might_sleep();
- + return rt_mutex_lock_state(lock, TASK_KILLABLE);
- +}
- +EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-
- - return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
- - RT_MUTEX_FULL_CHAINWALK,
- - rt_mutex_slowlock);
- +/*
- + * Futex variant, must not use fastpath.
- + */
- +int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
- +{
- + return rt_mutex_slowtrylock(lock);
- }
-
- /**
- @@ -1525,6 +2122,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
-
- return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
- RT_MUTEX_MIN_CHAINWALK,
- + NULL,
- rt_mutex_slowlock);
- }
- EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
- @@ -1542,7 +2140,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
- */
- int __sched rt_mutex_trylock(struct rt_mutex *lock)
- {
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + if (WARN_ON_ONCE(in_irq() || in_nmi()))
- +#else
- if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
- +#endif
- return 0;
-
- return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
- @@ -1560,21 +2162,53 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
- }
- EXPORT_SYMBOL_GPL(rt_mutex_unlock);
-
- +static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
- + struct wake_q_head *wake_q,
- + struct wake_q_head *wq_sleeper)
- +{
- + lockdep_assert_held(&lock->wait_lock);
- +
- + debug_rt_mutex_unlock(lock);
- +
- + if (!rt_mutex_has_waiters(lock)) {
- + lock->owner = NULL;
- + return false; /* done */
- + }
- +
- + /*
- + * We've already deboosted, mark_wakeup_next_waiter() will
- + * retain preempt_disabled when we drop the wait_lock, to
- + * avoid inversion prior to the wakeup. preempt_disable()
- + * therein pairs with rt_mutex_postunlock().
- + */
- + mark_wakeup_next_waiter(wake_q, wq_sleeper, lock);
- +
- + return true; /* call postunlock() */
- +}
- +
- /**
- - * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
- - * @lock: the rt_mutex to be unlocked
- - *
- - * Returns: true/false indicating whether priority adjustment is
- - * required or not.
- + * Futex variant, that since futex variants do not use the fast-path, can be
- + * simple and will not need to retry.
- */
- -bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
- - struct wake_q_head *wqh)
- +bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
- + struct wake_q_head *wake_q,
- + struct wake_q_head *wq_sleeper)
- {
- - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
- - rt_mutex_deadlock_account_unlock(current);
- - return false;
- - }
- - return rt_mutex_slowunlock(lock, wqh);
- + return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper);
- +}
- +
- +void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
- +{
- + WAKE_Q(wake_q);
- + WAKE_Q(wake_sleeper_q);
- + bool postunlock;
- +
- + raw_spin_lock_irq(&lock->wait_lock);
- + postunlock = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q);
- + raw_spin_unlock_irq(&lock->wait_lock);
- +
- + if (postunlock)
- + rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
- }
-
- /**
- @@ -1607,13 +2241,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
- void __rt_mutex_init(struct rt_mutex *lock, const char *name)
- {
- lock->owner = NULL;
- - raw_spin_lock_init(&lock->wait_lock);
- lock->waiters = RB_ROOT;
- lock->waiters_leftmost = NULL;
-
- debug_rt_mutex_init(lock, name);
- }
- -EXPORT_SYMBOL_GPL(__rt_mutex_init);
- +EXPORT_SYMBOL(__rt_mutex_init);
-
- /**
- * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
- @@ -1628,10 +2261,9 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
- void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
- struct task_struct *proxy_owner)
- {
- - __rt_mutex_init(lock, NULL);
- + rt_mutex_init(lock);
- debug_rt_mutex_proxy_lock(lock, proxy_owner);
- rt_mutex_set_owner(lock, proxy_owner);
- - rt_mutex_deadlock_account_lock(lock, proxy_owner);
- }
-
- /**
- @@ -1647,34 +2279,44 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
- {
- debug_rt_mutex_proxy_unlock(lock);
- rt_mutex_set_owner(lock, NULL);
- - rt_mutex_deadlock_account_unlock(proxy_owner);
- }
-
- -/**
- - * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
- - * @lock: the rt_mutex to take
- - * @waiter: the pre-initialized rt_mutex_waiter
- - * @task: the task to prepare
- - *
- - * Returns:
- - * 0 - task blocked on lock
- - * 1 - acquired the lock for task, caller should wake it up
- - * <0 - error
- - *
- - * Special API call for FUTEX_REQUEUE_PI support.
- - */
- -int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
- +int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
- struct rt_mutex_waiter *waiter,
- struct task_struct *task)
- {
- int ret;
-
- - raw_spin_lock_irq(&lock->wait_lock);
- -
- - if (try_to_take_rt_mutex(lock, task, NULL)) {
- - raw_spin_unlock_irq(&lock->wait_lock);
- + if (try_to_take_rt_mutex(lock, task, NULL))
- return 1;
- +
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + /*
- + * In PREEMPT_RT there's an added race.
- + * If the task, that we are about to requeue, times out,
- + * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
- + * to skip this task. But right after the task sets
- + * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
- + * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
- + * This will replace the PI_WAKEUP_INPROGRESS with the actual
- + * lock that it blocks on. We *must not* place this task
- + * on this proxy lock in that case.
- + *
- + * To prevent this race, we first take the task's pi_lock
- + * and check if it has updated its pi_blocked_on. If it has,
- + * we assume that it woke up and we return -EAGAIN.
- + * Otherwise, we set the task's pi_blocked_on to
- + * PI_REQUEUE_INPROGRESS, so that if the task is waking up
- + * it will know that we are in the process of requeuing it.
- + */
- + raw_spin_lock(&task->pi_lock);
- + if (task->pi_blocked_on) {
- + raw_spin_unlock(&task->pi_lock);
- + return -EAGAIN;
- }
- + task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
- + raw_spin_unlock(&task->pi_lock);
- +#endif
-
- /* We enforce deadlock detection for futexes */
- ret = task_blocks_on_rt_mutex(lock, waiter, task,
- @@ -1690,16 +2332,40 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
- ret = 0;
- }
-
- - if (unlikely(ret))
- + if (ret && rt_mutex_has_waiters(lock))
- remove_waiter(lock, waiter);
-
- - raw_spin_unlock_irq(&lock->wait_lock);
- -
- debug_rt_mutex_print_deadlock(waiter);
-
- return ret;
- }
-
- +/**
- + * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
- + * @lock: the rt_mutex to take
- + * @waiter: the pre-initialized rt_mutex_waiter
- + * @task: the task to prepare
- + *
- + * Returns:
- + * 0 - task blocked on lock
- + * 1 - acquired the lock for task, caller should wake it up
- + * <0 - error
- + *
- + * Special API call for FUTEX_REQUEUE_PI support.
- + */
- +int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
- + struct rt_mutex_waiter *waiter,
- + struct task_struct *task)
- +{
- + int ret;
- +
- + raw_spin_lock_irq(&lock->wait_lock);
- + ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
- + raw_spin_unlock_irq(&lock->wait_lock);
- +
- + return ret;
- +}
- +
- /**
- * rt_mutex_next_owner - return the next owner of the lock
- *
- @@ -1721,36 +2387,106 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
- }
-
- /**
- - * rt_mutex_finish_proxy_lock() - Complete lock acquisition
- + * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
- * @lock: the rt_mutex we were woken on
- * @to: the timeout, null if none. hrtimer should already have
- * been started.
- * @waiter: the pre-initialized rt_mutex_waiter
- *
- - * Complete the lock acquisition started our behalf by another thread.
- + * Wait for the the lock acquisition started on our behalf by
- + * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
- + * rt_mutex_cleanup_proxy_lock().
- *
- * Returns:
- * 0 - success
- * <0 - error, one of -EINTR, -ETIMEDOUT
- *
- - * Special API call for PI-futex requeue support
- + * Special API call for PI-futex support
- */
- -int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
- +int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
- struct hrtimer_sleeper *to,
- struct rt_mutex_waiter *waiter)
- {
- + struct task_struct *tsk = current;
- int ret;
-
- raw_spin_lock_irq(&lock->wait_lock);
- -
- + /* sleep on the mutex */
- set_current_state(TASK_INTERRUPTIBLE);
- + ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
- + /*
- + * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
- + * have to fix that up.
- + */
- + fixup_rt_mutex_waiters(lock);
-
- - /* sleep on the mutex */
- - ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
- + /*
- + * RT has a problem here when the wait got interrupted by a timeout
- + * or a signal. task->pi_blocked_on is still set. The task must
- + * acquire the hash bucket lock when returning from this function.
- + *
- + * If the hash bucket lock is contended then the
- + * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in
- + * task_blocks_on_rt_mutex() will trigger. This can be avoided by
- + * clearing task->pi_blocked_on which removes the task from the
- + * boosting chain of the rtmutex. That's correct because the task
- + * is not longer blocked on it.
- + */
- + if (ret) {
- + raw_spin_lock(&tsk->pi_lock);
- + tsk->pi_blocked_on = NULL;
- + raw_spin_unlock(&tsk->pi_lock);
- + }
- + raw_spin_unlock_irq(&lock->wait_lock);
-
- - if (unlikely(ret))
- - remove_waiter(lock, waiter);
- + return ret;
- +}
- +
- +/**
- + * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
- + * @lock: the rt_mutex we were woken on
- + * @waiter: the pre-initialized rt_mutex_waiter
- + *
- + * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
- + *
- + * Unless we acquired the lock; we're still enqueued on the wait-list and can
- + * in fact still be granted ownership until we're removed. Therefore we can
- + * find we are in fact the owner and must disregard the
- + * rt_mutex_wait_proxy_lock() failure.
- + *
- + * Returns:
- + * true - did the cleanup, we done.
- + * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
- + * caller should disregards its return value.
- + *
- + * Special API call for PI-futex support
- + */
- +bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
- + struct rt_mutex_waiter *waiter)
- +{
- + bool cleanup = false;
-
- + raw_spin_lock_irq(&lock->wait_lock);
- + /*
- + * Do an unconditional try-lock, this deals with the lock stealing
- + * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
- + * sets a NULL owner.
- + *
- + * We're not interested in the return value, because the subsequent
- + * test on rt_mutex_owner() will infer that. If the trylock succeeded,
- + * we will own the lock and it will have removed the waiter. If we
- + * failed the trylock, we're still not owner and we need to remove
- + * ourselves.
- + */
- + try_to_take_rt_mutex(lock, current, waiter);
- + /*
- + * Unless we're the owner; we're still enqueued on the wait_list.
- + * So check if we became owner, if not, take us off the wait_list.
- + */
- + if (rt_mutex_owner(lock) != current) {
- + remove_waiter(lock, waiter);
- + cleanup = true;
- + }
- /*
- * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
- * have to fix that up.
- @@ -1759,5 +2495,91 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
-
- raw_spin_unlock_irq(&lock->wait_lock);
-
- + return cleanup;
- +}
- +
- +static inline int
- +ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
- +{
- +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
- + unsigned tmp;
- +
- + if (ctx->deadlock_inject_countdown-- == 0) {
- + tmp = ctx->deadlock_inject_interval;
- + if (tmp > UINT_MAX/4)
- + tmp = UINT_MAX;
- + else
- + tmp = tmp*2 + tmp + tmp/2;
- +
- + ctx->deadlock_inject_interval = tmp;
- + ctx->deadlock_inject_countdown = tmp;
- + ctx->contending_lock = lock;
- +
- + ww_mutex_unlock(lock);
- +
- + return -EDEADLK;
- + }
- +#endif
- +
- + return 0;
- +}
- +
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +int __sched
- +__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
- +{
- + int ret;
- +
- + might_sleep();
- +
- + mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
- + ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
- + if (ret)
- + mutex_release(&lock->base.dep_map, 1, _RET_IP_);
- + else if (!ret && ww_ctx->acquired > 1)
- + return ww_mutex_deadlock_injection(lock, ww_ctx);
- +
- + return ret;
- +}
- +EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
- +
- +int __sched
- +__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
- +{
- + int ret;
- +
- + might_sleep();
- +
- + mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
- + ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
- + if (ret)
- + mutex_release(&lock->base.dep_map, 1, _RET_IP_);
- + else if (!ret && ww_ctx->acquired > 1)
- + return ww_mutex_deadlock_injection(lock, ww_ctx);
- +
- return ret;
- }
- +EXPORT_SYMBOL_GPL(__ww_mutex_lock);
- +
- +void __sched ww_mutex_unlock(struct ww_mutex *lock)
- +{
- + int nest = !!lock->ctx;
- +
- + /*
- + * The unlocking fastpath is the 0->1 transition from 'locked'
- + * into 'unlocked' state:
- + */
- + if (nest) {
- +#ifdef CONFIG_DEBUG_MUTEXES
- + DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
- +#endif
- + if (lock->ctx->acquired > 0)
- + lock->ctx->acquired--;
- + lock->ctx = NULL;
- + }
- +
- + mutex_release(&lock->base.dep_map, nest, _RET_IP_);
- + rt_mutex_unlock(&lock->base.lock);
- +}
- +EXPORT_SYMBOL(ww_mutex_unlock);
- +#endif
- diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
- index c4060584c407..6607802efa8b 100644
- --- a/kernel/locking/rtmutex.h
- +++ b/kernel/locking/rtmutex.h
- @@ -11,8 +11,6 @@
- */
-
- #define rt_mutex_deadlock_check(l) (0)
- -#define rt_mutex_deadlock_account_lock(m, t) do { } while (0)
- -#define rt_mutex_deadlock_account_unlock(l) do { } while (0)
- #define debug_rt_mutex_init_waiter(w) do { } while (0)
- #define debug_rt_mutex_free_waiter(w) do { } while (0)
- #define debug_rt_mutex_lock(l) do { } while (0)
- diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
- index e317e1cbb3eb..64d89d780059 100644
- --- a/kernel/locking/rtmutex_common.h
- +++ b/kernel/locking/rtmutex_common.h
- @@ -27,12 +27,14 @@ struct rt_mutex_waiter {
- struct rb_node pi_tree_entry;
- struct task_struct *task;
- struct rt_mutex *lock;
- + bool savestate;
- #ifdef CONFIG_DEBUG_RT_MUTEXES
- unsigned long ip;
- struct pid *deadlock_task_pid;
- struct rt_mutex *deadlock_lock;
- #endif
- int prio;
- + u64 deadline;
- };
-
- /*
- @@ -98,21 +100,45 @@ enum rtmutex_chainwalk {
- /*
- * PI-futex support (proxy locking functions, etc.):
- */
- +#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
- +#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
- +
- extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
- extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
- struct task_struct *proxy_owner);
- extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
- struct task_struct *proxy_owner);
- +extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savetate);
- +extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
- + struct rt_mutex_waiter *waiter,
- + struct task_struct *task);
- extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
- struct rt_mutex_waiter *waiter,
- struct task_struct *task);
- -extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
- - struct hrtimer_sleeper *to,
- - struct rt_mutex_waiter *waiter);
- -extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
- -extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
- - struct wake_q_head *wqh);
- -extern void rt_mutex_adjust_prio(struct task_struct *task);
- +extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
- + struct hrtimer_sleeper *to,
- + struct rt_mutex_waiter *waiter);
- +extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
- + struct rt_mutex_waiter *waiter);
- +
- +extern int rt_mutex_futex_trylock(struct rt_mutex *l);
- +
- +extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
- +extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
- + struct wake_q_head *wqh,
- + struct wake_q_head *wq_sleeper);
- +
- +extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
- + struct wake_q_head *wq_sleeper);
- +
- +/* RW semaphore special interface */
- +struct ww_acquire_ctx;
- +
- +int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
- + struct hrtimer_sleeper *timeout,
- + enum rtmutex_chainwalk chwalk,
- + struct ww_acquire_ctx *ww_ctx,
- + struct rt_mutex_waiter *waiter);
-
- #ifdef CONFIG_DEBUG_RT_MUTEXES
- # include "rtmutex-debug.h"
- diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c
- new file mode 100644
- index 000000000000..4a708ffcded6
- --- /dev/null
- +++ b/kernel/locking/rwsem-rt.c
- @@ -0,0 +1,268 @@
- +/*
- + */
- +#include <linux/rwsem.h>
- +#include <linux/sched.h>
- +#include <linux/export.h>
- +
- +#include "rtmutex_common.h"
- +
- +/*
- + * RT-specific reader/writer semaphores
- + *
- + * down_write()
- + * 1) Lock sem->rtmutex
- + * 2) Remove the reader BIAS to force readers into the slow path
- + * 3) Wait until all readers have left the critical region
- + * 4) Mark it write locked
- + *
- + * up_write()
- + * 1) Remove the write locked marker
- + * 2) Set the reader BIAS so readers can use the fast path again
- + * 3) Unlock sem->rtmutex to release blocked readers
- + *
- + * down_read()
- + * 1) Try fast path acquisition (reader BIAS is set)
- + * 2) Take sem->rtmutex.wait_lock which protects the writelocked flag
- + * 3) If !writelocked, acquire it for read
- + * 4) If writelocked, block on sem->rtmutex
- + * 5) unlock sem->rtmutex, goto 1)
- + *
- + * up_read()
- + * 1) Try fast path release (reader count != 1)
- + * 2) Wake the writer waiting in down_write()#3
- + *
- + * down_read()#3 has the consequence, that rw semaphores on RT are not writer
- + * fair, but writers, which should be avoided in RT tasks (think mmap_sem),
- + * are subject to the rtmutex priority/DL inheritance mechanism.
- + *
- + * It's possible to make the rw semaphores writer fair by keeping a list of
- + * active readers. A blocked writer would force all newly incoming readers to
- + * block on the rtmutex, but the rtmutex would have to be proxy locked for one
- + * reader after the other. We can't use multi-reader inheritance because there
- + * is no way to support that with SCHED_DEADLINE. Implementing the one by one
- + * reader boosting/handover mechanism is a major surgery for a very dubious
- + * value.
- + *
- + * The risk of writer starvation is there, but the pathological use cases
- + * which trigger it are not necessarily the typical RT workloads.
- + */
- +
- +void __rwsem_init(struct rw_semaphore *sem, const char *name,
- + struct lock_class_key *key)
- +{
- +#ifdef CONFIG_DEBUG_LOCK_ALLOC
- + /*
- + * Make sure we are not reinitializing a held semaphore:
- + */
- + debug_check_no_locks_freed((void *)sem, sizeof(*sem));
- + lockdep_init_map(&sem->dep_map, name, key, 0);
- +#endif
- + atomic_set(&sem->readers, READER_BIAS);
- +}
- +EXPORT_SYMBOL(__rwsem_init);
- +
- +int __down_read_trylock(struct rw_semaphore *sem)
- +{
- + int r, old;
- +
- + /*
- + * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
- + * set.
- + */
- + for (r = atomic_read(&sem->readers); r < 0;) {
- + old = atomic_cmpxchg(&sem->readers, r, r + 1);
- + if (likely(old == r))
- + return 1;
- + r = old;
- + }
- + return 0;
- +}
- +
- +void __sched __down_read(struct rw_semaphore *sem)
- +{
- + struct rt_mutex *m = &sem->rtmutex;
- + struct rt_mutex_waiter waiter;
- +
- + if (__down_read_trylock(sem))
- + return;
- +
- + might_sleep();
- + raw_spin_lock_irq(&m->wait_lock);
- + /*
- + * Allow readers as long as the writer has not completely
- + * acquired the semaphore for write.
- + */
- + if (atomic_read(&sem->readers) != WRITER_BIAS) {
- + atomic_inc(&sem->readers);
- + raw_spin_unlock_irq(&m->wait_lock);
- + return;
- + }
- +
- + /*
- + * Call into the slow lock path with the rtmutex->wait_lock
- + * held, so this can't result in the following race:
- + *
- + * Reader1 Reader2 Writer
- + * down_read()
- + * down_write()
- + * rtmutex_lock(m)
- + * swait()
- + * down_read()
- + * unlock(m->wait_lock)
- + * up_read()
- + * swake()
- + * lock(m->wait_lock)
- + * sem->writelocked=true
- + * unlock(m->wait_lock)
- + *
- + * up_write()
- + * sem->writelocked=false
- + * rtmutex_unlock(m)
- + * down_read()
- + * down_write()
- + * rtmutex_lock(m)
- + * swait()
- + * rtmutex_lock(m)
- + *
- + * That would put Reader1 behind the writer waiting on
- + * Reader2 to call up_read() which might be unbound.
- + */
- + rt_mutex_init_waiter(&waiter, false);
- + rt_mutex_slowlock_locked(m, TASK_UNINTERRUPTIBLE, NULL,
- + RT_MUTEX_MIN_CHAINWALK, NULL,
- + &waiter);
- + /*
- + * The slowlock() above is guaranteed to return with the rtmutex is
- + * now held, so there can't be a writer active. Increment the reader
- + * count and immediately drop the rtmutex again.
- + */
- + atomic_inc(&sem->readers);
- + raw_spin_unlock_irq(&m->wait_lock);
- + rt_mutex_unlock(m);
- +
- + debug_rt_mutex_free_waiter(&waiter);
- +}
- +
- +void __up_read(struct rw_semaphore *sem)
- +{
- + struct rt_mutex *m = &sem->rtmutex;
- + struct task_struct *tsk;
- +
- + /*
- + * sem->readers can only hit 0 when a writer is waiting for the
- + * active readers to leave the critical region.
- + */
- + if (!atomic_dec_and_test(&sem->readers))
- + return;
- +
- + might_sleep();
- + raw_spin_lock_irq(&m->wait_lock);
- + /*
- + * Wake the writer, i.e. the rtmutex owner. It might release the
- + * rtmutex concurrently in the fast path (due to a signal), but to
- + * clean up the rwsem it needs to acquire m->wait_lock. The worst
- + * case which can happen is a spurious wakeup.
- + */
- + tsk = rt_mutex_owner(m);
- + if (tsk)
- + wake_up_process(tsk);
- +
- + raw_spin_unlock_irq(&m->wait_lock);
- +}
- +
- +static void __up_write_unlock(struct rw_semaphore *sem, int bias,
- + unsigned long flags)
- +{
- + struct rt_mutex *m = &sem->rtmutex;
- +
- + atomic_add(READER_BIAS - bias, &sem->readers);
- + raw_spin_unlock_irqrestore(&m->wait_lock, flags);
- + rt_mutex_unlock(m);
- +}
- +
- +static int __sched __down_write_common(struct rw_semaphore *sem, int state)
- +{
- + struct rt_mutex *m = &sem->rtmutex;
- + unsigned long flags;
- +
- + /* Take the rtmutex as a first step */
- + if (rt_mutex_lock_state(m, state))
- + return -EINTR;
- +
- + /* Force readers into slow path */
- + atomic_sub(READER_BIAS, &sem->readers);
- + might_sleep();
- +
- + set_current_state(state);
- + for (;;) {
- + raw_spin_lock_irqsave(&m->wait_lock, flags);
- + /* Have all readers left the critical region? */
- + if (!atomic_read(&sem->readers)) {
- + atomic_set(&sem->readers, WRITER_BIAS);
- + __set_current_state(TASK_RUNNING);
- + raw_spin_unlock_irqrestore(&m->wait_lock, flags);
- + return 0;
- + }
- +
- + if (signal_pending_state(state, current)) {
- + __set_current_state(TASK_RUNNING);
- + __up_write_unlock(sem, 0, flags);
- + return -EINTR;
- + }
- + raw_spin_unlock_irqrestore(&m->wait_lock, flags);
- +
- + if (atomic_read(&sem->readers) != 0) {
- + schedule();
- + set_current_state(state);
- + }
- + }
- +}
- +
- +void __sched __down_write(struct rw_semaphore *sem)
- +{
- + __down_write_common(sem, TASK_UNINTERRUPTIBLE);
- +}
- +
- +int __sched __down_write_killable(struct rw_semaphore *sem)
- +{
- + return __down_write_common(sem, TASK_KILLABLE);
- +}
- +
- +int __down_write_trylock(struct rw_semaphore *sem)
- +{
- + struct rt_mutex *m = &sem->rtmutex;
- + unsigned long flags;
- +
- + if (!rt_mutex_trylock(m))
- + return 0;
- +
- + atomic_sub(READER_BIAS, &sem->readers);
- +
- + raw_spin_lock_irqsave(&m->wait_lock, flags);
- + if (!atomic_read(&sem->readers)) {
- + atomic_set(&sem->readers, WRITER_BIAS);
- + raw_spin_unlock_irqrestore(&m->wait_lock, flags);
- + return 1;
- + }
- + __up_write_unlock(sem, 0, flags);
- + return 0;
- +}
- +
- +void __up_write(struct rw_semaphore *sem)
- +{
- + struct rt_mutex *m = &sem->rtmutex;
- + unsigned long flags;
- +
- + raw_spin_lock_irqsave(&m->wait_lock, flags);
- + __up_write_unlock(sem, WRITER_BIAS, flags);
- +}
- +
- +void __downgrade_write(struct rw_semaphore *sem)
- +{
- + struct rt_mutex *m = &sem->rtmutex;
- + unsigned long flags;
- +
- + raw_spin_lock_irqsave(&m->wait_lock, flags);
- + /* Release it and account current as reader */
- + __up_write_unlock(sem, WRITER_BIAS - 1, flags);
- +}
- diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
- index db3ccb1dd614..909779647bd1 100644
- --- a/kernel/locking/spinlock.c
- +++ b/kernel/locking/spinlock.c
- @@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
- * __[spin|read|write]_lock_bh()
- */
- BUILD_LOCK_OPS(spin, raw_spinlock);
- +
- +#ifndef CONFIG_PREEMPT_RT_FULL
- BUILD_LOCK_OPS(read, rwlock);
- BUILD_LOCK_OPS(write, rwlock);
- +#endif
-
- #endif
-
- @@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
- EXPORT_SYMBOL(_raw_spin_unlock_bh);
- #endif
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +
- #ifndef CONFIG_INLINE_READ_TRYLOCK
- int __lockfunc _raw_read_trylock(rwlock_t *lock)
- {
- @@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
- EXPORT_SYMBOL(_raw_write_unlock_bh);
- #endif
-
- +#endif /* !PREEMPT_RT_FULL */
- +
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
-
- void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
- diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
- index 9aa0fccd5d43..76d0b40d9193 100644
- --- a/kernel/locking/spinlock_debug.c
- +++ b/kernel/locking/spinlock_debug.c
- @@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
-
- EXPORT_SYMBOL(__raw_spin_lock_init);
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- void __rwlock_init(rwlock_t *lock, const char *name,
- struct lock_class_key *key)
- {
- @@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
- }
-
- EXPORT_SYMBOL(__rwlock_init);
- +#endif
-
- static void spin_dump(raw_spinlock_t *lock, const char *msg)
- {
- @@ -135,6 +137,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
- arch_spin_unlock(&lock->raw_lock);
- }
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- static void rwlock_bug(rwlock_t *lock, const char *msg)
- {
- if (!debug_locks_off())
- @@ -224,3 +227,5 @@ void do_raw_write_unlock(rwlock_t *lock)
- debug_write_unlock(lock);
- arch_write_unlock(&lock->raw_lock);
- }
- +
- +#endif
- diff --git a/kernel/module.c b/kernel/module.c
- index 0e54d5bf0097..f27764fbfa24 100644
- --- a/kernel/module.c
- +++ b/kernel/module.c
- @@ -660,16 +660,7 @@ static void percpu_modcopy(struct module *mod,
- memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
- }
-
- -/**
- - * is_module_percpu_address - test whether address is from module static percpu
- - * @addr: address to test
- - *
- - * Test whether @addr belongs to module static percpu area.
- - *
- - * RETURNS:
- - * %true if @addr is from module static percpu area
- - */
- -bool is_module_percpu_address(unsigned long addr)
- +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
- {
- struct module *mod;
- unsigned int cpu;
- @@ -683,9 +674,15 @@ bool is_module_percpu_address(unsigned long addr)
- continue;
- for_each_possible_cpu(cpu) {
- void *start = per_cpu_ptr(mod->percpu, cpu);
- -
- - if ((void *)addr >= start &&
- - (void *)addr < start + mod->percpu_size) {
- + void *va = (void *)addr;
- +
- + if (va >= start && va < start + mod->percpu_size) {
- + if (can_addr) {
- + *can_addr = (unsigned long) (va - start);
- + *can_addr += (unsigned long)
- + per_cpu_ptr(mod->percpu,
- + get_boot_cpu_id());
- + }
- preempt_enable();
- return true;
- }
- @@ -696,6 +693,20 @@ bool is_module_percpu_address(unsigned long addr)
- return false;
- }
-
- +/**
- + * is_module_percpu_address - test whether address is from module static percpu
- + * @addr: address to test
- + *
- + * Test whether @addr belongs to module static percpu area.
- + *
- + * RETURNS:
- + * %true if @addr is from module static percpu area
- + */
- +bool is_module_percpu_address(unsigned long addr)
- +{
- + return __is_module_percpu_address(addr, NULL);
- +}
- +
- #else /* ... !CONFIG_SMP */
-
- static inline void __percpu *mod_percpu(struct module *mod)
- @@ -727,6 +738,11 @@ bool is_module_percpu_address(unsigned long addr)
- return false;
- }
-
- +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
- +{
- + return false;
- +}
- +
- #endif /* CONFIG_SMP */
-
- #define MODINFO_ATTR(field) \
- diff --git a/kernel/panic.c b/kernel/panic.c
- index dbec387099b1..b67a4803ff2b 100644
- --- a/kernel/panic.c
- +++ b/kernel/panic.c
- @@ -482,9 +482,11 @@ static u64 oops_id;
-
- static int init_oops_id(void)
- {
- +#ifndef CONFIG_PREEMPT_RT_FULL
- if (!oops_id)
- get_random_bytes(&oops_id, sizeof(oops_id));
- else
- +#endif
- oops_id++;
-
- return 0;
- diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
- index b26dbc48c75b..968255f27a33 100644
- --- a/kernel/power/hibernate.c
- +++ b/kernel/power/hibernate.c
- @@ -286,6 +286,8 @@ static int create_image(int platform_mode)
-
- local_irq_disable();
-
- + system_state = SYSTEM_SUSPEND;
- +
- error = syscore_suspend();
- if (error) {
- printk(KERN_ERR "PM: Some system devices failed to power down, "
- @@ -317,6 +319,7 @@ static int create_image(int platform_mode)
- syscore_resume();
-
- Enable_irqs:
- + system_state = SYSTEM_RUNNING;
- local_irq_enable();
-
- Enable_cpus:
- @@ -446,6 +449,7 @@ static int resume_target_kernel(bool platform_mode)
- goto Enable_cpus;
-
- local_irq_disable();
- + system_state = SYSTEM_SUSPEND;
-
- error = syscore_suspend();
- if (error)
- @@ -479,6 +483,7 @@ static int resume_target_kernel(bool platform_mode)
- syscore_resume();
-
- Enable_irqs:
- + system_state = SYSTEM_RUNNING;
- local_irq_enable();
-
- Enable_cpus:
- @@ -564,6 +569,7 @@ int hibernation_platform_enter(void)
- goto Enable_cpus;
-
- local_irq_disable();
- + system_state = SYSTEM_SUSPEND;
- syscore_suspend();
- if (pm_wakeup_pending()) {
- error = -EAGAIN;
- @@ -576,6 +582,7 @@ int hibernation_platform_enter(void)
-
- Power_up:
- syscore_resume();
- + system_state = SYSTEM_RUNNING;
- local_irq_enable();
-
- Enable_cpus:
- @@ -676,6 +683,10 @@ static int load_image_and_restore(void)
- return error;
- }
-
- +#ifndef CONFIG_SUSPEND
- +bool pm_in_action;
- +#endif
- +
- /**
- * hibernate - Carry out system hibernation, including saving the image.
- */
- @@ -689,6 +700,8 @@ int hibernate(void)
- return -EPERM;
- }
-
- + pm_in_action = true;
- +
- lock_system_sleep();
- /* The snapshot device should not be opened while we're running */
- if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
- @@ -766,6 +779,7 @@ int hibernate(void)
- atomic_inc(&snapshot_device_available);
- Unlock:
- unlock_system_sleep();
- + pm_in_action = false;
- return error;
- }
-
- diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
- index 6ccb08f57fcb..c8cbb5ed2fe3 100644
- --- a/kernel/power/suspend.c
- +++ b/kernel/power/suspend.c
- @@ -369,6 +369,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
- arch_suspend_disable_irqs();
- BUG_ON(!irqs_disabled());
-
- + system_state = SYSTEM_SUSPEND;
- +
- error = syscore_suspend();
- if (!error) {
- *wakeup = pm_wakeup_pending();
- @@ -385,6 +387,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
- syscore_resume();
- }
-
- + system_state = SYSTEM_RUNNING;
- +
- arch_suspend_enable_irqs();
- BUG_ON(irqs_disabled());
-
- @@ -527,6 +531,8 @@ static int enter_state(suspend_state_t state)
- return error;
- }
-
- +bool pm_in_action;
- +
- /**
- * pm_suspend - Externally visible function for suspending the system.
- * @state: System sleep state to enter.
- @@ -541,6 +547,8 @@ int pm_suspend(suspend_state_t state)
- if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
- return -EINVAL;
-
- + pm_in_action = true;
- +
- error = enter_state(state);
- if (error) {
- suspend_stats.fail++;
- @@ -548,6 +556,7 @@ int pm_suspend(suspend_state_t state)
- } else {
- suspend_stats.success++;
- }
- + pm_in_action = false;
- return error;
- }
- EXPORT_SYMBOL(pm_suspend);
- diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
- index 9c5b231684d0..cf15bdb6855b 100644
- --- a/kernel/printk/printk.c
- +++ b/kernel/printk/printk.c
- @@ -351,6 +351,65 @@ __packed __aligned(4)
- */
- DEFINE_RAW_SPINLOCK(logbuf_lock);
-
- +#ifdef CONFIG_EARLY_PRINTK
- +struct console *early_console;
- +
- +static void early_vprintk(const char *fmt, va_list ap)
- +{
- + if (early_console) {
- + char buf[512];
- + int n = vscnprintf(buf, sizeof(buf), fmt, ap);
- +
- + early_console->write(early_console, buf, n);
- + }
- +}
- +
- +asmlinkage void early_printk(const char *fmt, ...)
- +{
- + va_list ap;
- +
- + va_start(ap, fmt);
- + early_vprintk(fmt, ap);
- + va_end(ap);
- +}
- +
- +/*
- + * This is independent of any log levels - a global
- + * kill switch that turns off all of printk.
- + *
- + * Used by the NMI watchdog if early-printk is enabled.
- + */
- +static bool __read_mostly printk_killswitch;
- +
- +static int __init force_early_printk_setup(char *str)
- +{
- + printk_killswitch = true;
- + return 0;
- +}
- +early_param("force_early_printk", force_early_printk_setup);
- +
- +void printk_kill(void)
- +{
- + printk_killswitch = true;
- +}
- +
- +#ifdef CONFIG_PRINTK
- +static int forced_early_printk(const char *fmt, va_list ap)
- +{
- + if (!printk_killswitch)
- + return 0;
- + early_vprintk(fmt, ap);
- + return 1;
- +}
- +#endif
- +
- +#else
- +static inline int forced_early_printk(const char *fmt, va_list ap)
- +{
- + return 0;
- +}
- +#endif
- +
- #ifdef CONFIG_PRINTK
- DECLARE_WAIT_QUEUE_HEAD(log_wait);
- /* the next printk record to read by syslog(READ) or /proc/kmsg */
- @@ -1337,6 +1396,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
- {
- char *text;
- int len = 0;
- + int attempts = 0;
-
- text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
- if (!text)
- @@ -1348,6 +1408,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
- u64 seq;
- u32 idx;
- enum log_flags prev;
- + int num_msg;
- +try_again:
- + attempts++;
- + if (attempts > 10) {
- + len = -EBUSY;
- + goto out;
- + }
- + num_msg = 0;
-
- /*
- * Find first record that fits, including all following records,
- @@ -1363,6 +1431,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
- prev = msg->flags;
- idx = log_next(idx);
- seq++;
- + num_msg++;
- + if (num_msg > 5) {
- + num_msg = 0;
- + raw_spin_unlock_irq(&logbuf_lock);
- + raw_spin_lock_irq(&logbuf_lock);
- + if (clear_seq < log_first_seq)
- + goto try_again;
- + }
- }
-
- /* move first record forward until length fits into the buffer */
- @@ -1376,6 +1452,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
- prev = msg->flags;
- idx = log_next(idx);
- seq++;
- + num_msg++;
- + if (num_msg > 5) {
- + num_msg = 0;
- + raw_spin_unlock_irq(&logbuf_lock);
- + raw_spin_lock_irq(&logbuf_lock);
- + if (clear_seq < log_first_seq)
- + goto try_again;
- + }
- }
-
- /* last message fitting into this dump */
- @@ -1416,6 +1500,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
- clear_seq = log_next_seq;
- clear_idx = log_next_idx;
- }
- +out:
- raw_spin_unlock_irq(&logbuf_lock);
-
- kfree(text);
- @@ -1569,6 +1654,12 @@ static void call_console_drivers(int level,
- if (!console_drivers)
- return;
-
- + if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
- + if (in_irq() || in_nmi())
- + return;
- + }
- +
- + migrate_disable();
- for_each_console(con) {
- if (exclusive_console && con != exclusive_console)
- continue;
- @@ -1584,6 +1675,7 @@ static void call_console_drivers(int level,
- else
- con->write(con, text, len);
- }
- + migrate_enable();
- }
-
- /*
- @@ -1781,6 +1873,13 @@ asmlinkage int vprintk_emit(int facility, int level,
- /* cpu currently holding logbuf_lock in this function */
- static unsigned int logbuf_cpu = UINT_MAX;
-
- + /*
- + * Fall back to early_printk if a debugging subsystem has
- + * killed printk output
- + */
- + if (unlikely(forced_early_printk(fmt, args)))
- + return 1;
- +
- if (level == LOGLEVEL_SCHED) {
- level = LOGLEVEL_DEFAULT;
- in_sched = true;
- @@ -1885,13 +1984,23 @@ asmlinkage int vprintk_emit(int facility, int level,
-
- /* If called from the scheduler, we can not call up(). */
- if (!in_sched) {
- + int may_trylock = 1;
- +
- lockdep_off();
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + /*
- + * we can't take a sleeping lock with IRQs or preeption disabled
- + * so we can't print in these contexts
- + */
- + if (!(preempt_count() == 0 && !irqs_disabled()))
- + may_trylock = 0;
- +#endif
- /*
- * Try to acquire and then immediately release the console
- * semaphore. The release will print out buffers and wake up
- * /dev/kmsg and syslog() users.
- */
- - if (console_trylock())
- + if (may_trylock && console_trylock())
- console_unlock();
- lockdep_on();
- }
- @@ -2014,26 +2123,6 @@ DEFINE_PER_CPU(printk_func_t, printk_func);
-
- #endif /* CONFIG_PRINTK */
-
- -#ifdef CONFIG_EARLY_PRINTK
- -struct console *early_console;
- -
- -asmlinkage __visible void early_printk(const char *fmt, ...)
- -{
- - va_list ap;
- - char buf[512];
- - int n;
- -
- - if (!early_console)
- - return;
- -
- - va_start(ap, fmt);
- - n = vscnprintf(buf, sizeof(buf), fmt, ap);
- - va_end(ap);
- -
- - early_console->write(early_console, buf, n);
- -}
- -#endif
- -
- static int __add_preferred_console(char *name, int idx, char *options,
- char *brl_options)
- {
- @@ -2303,11 +2392,16 @@ static void console_cont_flush(char *text, size_t size)
- goto out;
-
- len = cont_print_text(text, size);
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + raw_spin_unlock_irqrestore(&logbuf_lock, flags);
- + call_console_drivers(cont.level, NULL, 0, text, len);
- +#else
- raw_spin_unlock(&logbuf_lock);
- stop_critical_timings();
- call_console_drivers(cont.level, NULL, 0, text, len);
- start_critical_timings();
- local_irq_restore(flags);
- +#endif
- return;
- out:
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
- @@ -2431,13 +2525,17 @@ void console_unlock(void)
- console_idx = log_next(console_idx);
- console_seq++;
- console_prev = msg->flags;
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + raw_spin_unlock_irqrestore(&logbuf_lock, flags);
- + call_console_drivers(level, ext_text, ext_len, text, len);
- +#else
- raw_spin_unlock(&logbuf_lock);
-
- stop_critical_timings(); /* don't trace print latency */
- call_console_drivers(level, ext_text, ext_len, text, len);
- start_critical_timings();
- local_irq_restore(flags);
- -
- +#endif
- if (do_cond_resched)
- cond_resched();
- }
- @@ -2489,6 +2587,11 @@ void console_unblank(void)
- {
- struct console *c;
-
- + if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) {
- + if (in_irq() || in_nmi())
- + return;
- + }
- +
- /*
- * console_unblank can no longer be called in interrupt context unless
- * oops_in_progress is set to 1..
- diff --git a/kernel/ptrace.c b/kernel/ptrace.c
- index f39a7be98fc1..583ce3aad891 100644
- --- a/kernel/ptrace.c
- +++ b/kernel/ptrace.c
- @@ -172,7 +172,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
-
- spin_lock_irq(&task->sighand->siglock);
- if (task_is_traced(task) && !__fatal_signal_pending(task)) {
- - task->state = __TASK_TRACED;
- + unsigned long flags;
- +
- + raw_spin_lock_irqsave(&task->pi_lock, flags);
- + if (task->state & __TASK_TRACED)
- + task->state = __TASK_TRACED;
- + else
- + task->saved_state = __TASK_TRACED;
- + raw_spin_unlock_irqrestore(&task->pi_lock, flags);
- ret = true;
- }
- spin_unlock_irq(&task->sighand->siglock);
- diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
- index bf08fee53dc7..eeb8ce4ad7b6 100644
- --- a/kernel/rcu/rcutorture.c
- +++ b/kernel/rcu/rcutorture.c
- @@ -404,6 +404,7 @@ static struct rcu_torture_ops rcu_ops = {
- .name = "rcu"
- };
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * Definitions for rcu_bh torture testing.
- */
- @@ -443,6 +444,12 @@ static struct rcu_torture_ops rcu_bh_ops = {
- .name = "rcu_bh"
- };
-
- +#else
- +static struct rcu_torture_ops rcu_bh_ops = {
- + .ttype = INVALID_RCU_FLAVOR,
- +};
- +#endif
- +
- /*
- * Don't even think about trying any of these in real life!!!
- * The names includes "busted", and they really means it!
- diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
- index d1a02877a42c..a7b11a29e03a 100644
- --- a/kernel/rcu/tree.c
- +++ b/kernel/rcu/tree.c
- @@ -55,6 +55,11 @@
- #include <linux/random.h>
- #include <linux/trace_events.h>
- #include <linux/suspend.h>
- +#include <linux/delay.h>
- +#include <linux/gfp.h>
- +#include <linux/oom.h>
- +#include <linux/smpboot.h>
- +#include "../time/tick-internal.h"
-
- #include "tree.h"
- #include "rcu.h"
- @@ -260,6 +265,19 @@ void rcu_sched_qs(void)
- this_cpu_ptr(&rcu_sched_data), true);
- }
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +static void rcu_preempt_qs(void);
- +
- +void rcu_bh_qs(void)
- +{
- + unsigned long flags;
- +
- + /* Callers to this function, rcu_preempt_qs(), must disable irqs. */
- + local_irq_save(flags);
- + rcu_preempt_qs();
- + local_irq_restore(flags);
- +}
- +#else
- void rcu_bh_qs(void)
- {
- if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
- @@ -269,6 +287,7 @@ void rcu_bh_qs(void)
- __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
- }
- }
- +#endif
-
- static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
-
- @@ -449,11 +468,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
- /*
- * Return the number of RCU BH batches started thus far for debug & stats.
- */
- +#ifndef CONFIG_PREEMPT_RT_FULL
- unsigned long rcu_batches_started_bh(void)
- {
- return rcu_bh_state.gpnum;
- }
- EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
- +#endif
-
- /*
- * Return the number of RCU batches completed thus far for debug & stats.
- @@ -473,6 +494,7 @@ unsigned long rcu_batches_completed_sched(void)
- }
- EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * Return the number of RCU BH batches completed thus far for debug & stats.
- */
- @@ -481,6 +503,7 @@ unsigned long rcu_batches_completed_bh(void)
- return rcu_bh_state.completed;
- }
- EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
- +#endif
-
- /*
- * Return the number of RCU expedited batches completed thus far for
- @@ -504,6 +527,7 @@ unsigned long rcu_exp_batches_completed_sched(void)
- }
- EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * Force a quiescent state.
- */
- @@ -522,6 +546,13 @@ void rcu_bh_force_quiescent_state(void)
- }
- EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
-
- +#else
- +void rcu_force_quiescent_state(void)
- +{
- +}
- +EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
- +#endif
- +
- /*
- * Force a quiescent state for RCU-sched.
- */
- @@ -572,9 +603,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
- case RCU_FLAVOR:
- rsp = rcu_state_p;
- break;
- +#ifndef CONFIG_PREEMPT_RT_FULL
- case RCU_BH_FLAVOR:
- rsp = &rcu_bh_state;
- break;
- +#endif
- case RCU_SCHED_FLAVOR:
- rsp = &rcu_sched_state;
- break;
- @@ -3026,18 +3059,17 @@ __rcu_process_callbacks(struct rcu_state *rsp)
- /*
- * Do RCU core processing for the current CPU.
- */
- -static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
- +static __latent_entropy void rcu_process_callbacks(void)
- {
- struct rcu_state *rsp;
-
- if (cpu_is_offline(smp_processor_id()))
- return;
- - trace_rcu_utilization(TPS("Start RCU core"));
- for_each_rcu_flavor(rsp)
- __rcu_process_callbacks(rsp);
- - trace_rcu_utilization(TPS("End RCU core"));
- }
-
- +static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
- /*
- * Schedule RCU callback invocation. If the specified type of RCU
- * does not support RCU priority boosting, just do a direct call,
- @@ -3049,19 +3081,106 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
- {
- if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
- return;
- - if (likely(!rsp->boost)) {
- - rcu_do_batch(rsp, rdp);
- - return;
- - }
- - invoke_rcu_callbacks_kthread();
- + rcu_do_batch(rsp, rdp);
- +}
- +
- +static void rcu_wake_cond(struct task_struct *t, int status)
- +{
- + /*
- + * If the thread is yielding, only wake it when this
- + * is invoked from idle
- + */
- + if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
- + wake_up_process(t);
- }
-
- +/*
- + * Wake up this CPU's rcuc kthread to do RCU core processing.
- + */
- static void invoke_rcu_core(void)
- {
- - if (cpu_online(smp_processor_id()))
- - raise_softirq(RCU_SOFTIRQ);
- + unsigned long flags;
- + struct task_struct *t;
- +
- + if (!cpu_online(smp_processor_id()))
- + return;
- + local_irq_save(flags);
- + __this_cpu_write(rcu_cpu_has_work, 1);
- + t = __this_cpu_read(rcu_cpu_kthread_task);
- + if (t != NULL && current != t)
- + rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
- + local_irq_restore(flags);
- +}
- +
- +static void rcu_cpu_kthread_park(unsigned int cpu)
- +{
- + per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
- +}
- +
- +static int rcu_cpu_kthread_should_run(unsigned int cpu)
- +{
- + return __this_cpu_read(rcu_cpu_has_work);
- }
-
- +/*
- + * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
- + * RCU softirq used in flavors and configurations of RCU that do not
- + * support RCU priority boosting.
- + */
- +static void rcu_cpu_kthread(unsigned int cpu)
- +{
- + unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
- + char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
- + int spincnt;
- +
- + for (spincnt = 0; spincnt < 10; spincnt++) {
- + trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
- + local_bh_disable();
- + *statusp = RCU_KTHREAD_RUNNING;
- + this_cpu_inc(rcu_cpu_kthread_loops);
- + local_irq_disable();
- + work = *workp;
- + *workp = 0;
- + local_irq_enable();
- + if (work)
- + rcu_process_callbacks();
- + local_bh_enable();
- + if (*workp == 0) {
- + trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
- + *statusp = RCU_KTHREAD_WAITING;
- + return;
- + }
- + }
- + *statusp = RCU_KTHREAD_YIELDING;
- + trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
- + schedule_timeout_interruptible(2);
- + trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
- + *statusp = RCU_KTHREAD_WAITING;
- +}
- +
- +static struct smp_hotplug_thread rcu_cpu_thread_spec = {
- + .store = &rcu_cpu_kthread_task,
- + .thread_should_run = rcu_cpu_kthread_should_run,
- + .thread_fn = rcu_cpu_kthread,
- + .thread_comm = "rcuc/%u",
- + .setup = rcu_cpu_kthread_setup,
- + .park = rcu_cpu_kthread_park,
- +};
- +
- +/*
- + * Spawn per-CPU RCU core processing kthreads.
- + */
- +static int __init rcu_spawn_core_kthreads(void)
- +{
- + int cpu;
- +
- + for_each_possible_cpu(cpu)
- + per_cpu(rcu_cpu_has_work, cpu) = 0;
- + BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
- + return 0;
- +}
- +early_initcall(rcu_spawn_core_kthreads);
- +
- /*
- * Handle any core-RCU processing required by a call_rcu() invocation.
- */
- @@ -3205,6 +3324,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
- }
- EXPORT_SYMBOL_GPL(call_rcu_sched);
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * Queue an RCU callback for invocation after a quicker grace period.
- */
- @@ -3213,6 +3333,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
- __call_rcu(head, func, &rcu_bh_state, -1, 0);
- }
- EXPORT_SYMBOL_GPL(call_rcu_bh);
- +#endif
-
- /*
- * Queue an RCU callback for lazy invocation after a grace period.
- @@ -3304,6 +3425,7 @@ void synchronize_sched(void)
- }
- EXPORT_SYMBOL_GPL(synchronize_sched);
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- /**
- * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
- *
- @@ -3330,6 +3452,7 @@ void synchronize_rcu_bh(void)
- wait_rcu_gp(call_rcu_bh);
- }
- EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
- +#endif
-
- /**
- * get_state_synchronize_rcu - Snapshot current RCU state
- @@ -3708,6 +3831,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
- mutex_unlock(&rsp->barrier_mutex);
- }
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- /**
- * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
- */
- @@ -3716,6 +3840,7 @@ void rcu_barrier_bh(void)
- _rcu_barrier(&rcu_bh_state);
- }
- EXPORT_SYMBOL_GPL(rcu_barrier_bh);
- +#endif
-
- /**
- * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
- @@ -4237,12 +4362,13 @@ void __init rcu_init(void)
-
- rcu_bootup_announce();
- rcu_init_geometry();
- +#ifndef CONFIG_PREEMPT_RT_FULL
- rcu_init_one(&rcu_bh_state);
- +#endif
- rcu_init_one(&rcu_sched_state);
- if (dump_tree)
- rcu_dump_rcu_node_tree(&rcu_sched_state);
- __rcu_init_preempt();
- - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
-
- /*
- * We don't need protection against CPU-hotplug here because
- diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
- index e99a5234d9ed..958ac107062c 100644
- --- a/kernel/rcu/tree.h
- +++ b/kernel/rcu/tree.h
- @@ -588,18 +588,18 @@ extern struct list_head rcu_struct_flavors;
- */
- extern struct rcu_state rcu_sched_state;
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- extern struct rcu_state rcu_bh_state;
- +#endif
-
- #ifdef CONFIG_PREEMPT_RCU
- extern struct rcu_state rcu_preempt_state;
- #endif /* #ifdef CONFIG_PREEMPT_RCU */
-
- -#ifdef CONFIG_RCU_BOOST
- DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
- DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
- DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
- DECLARE_PER_CPU(char, rcu_cpu_has_work);
- -#endif /* #ifdef CONFIG_RCU_BOOST */
-
- #ifndef RCU_TREE_NONCORE
-
- @@ -619,10 +619,9 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
- static void __init __rcu_init_preempt(void);
- static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
- static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
- -static void invoke_rcu_callbacks_kthread(void);
- static bool rcu_is_callbacks_kthread(void);
- +static void rcu_cpu_kthread_setup(unsigned int cpu);
- #ifdef CONFIG_RCU_BOOST
- -static void rcu_preempt_do_callbacks(void);
- static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
- struct rcu_node *rnp);
- #endif /* #ifdef CONFIG_RCU_BOOST */
- diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
- index e3944c4b072d..be12d1aac840 100644
- --- a/kernel/rcu/tree_plugin.h
- +++ b/kernel/rcu/tree_plugin.h
- @@ -24,25 +24,10 @@
- * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
- */
-
- -#include <linux/delay.h>
- -#include <linux/gfp.h>
- -#include <linux/oom.h>
- -#include <linux/smpboot.h>
- -#include "../time/tick-internal.h"
- -
- #ifdef CONFIG_RCU_BOOST
-
- #include "../locking/rtmutex_common.h"
-
- -/*
- - * Control variables for per-CPU and per-rcu_node kthreads. These
- - * handle all flavors of RCU.
- - */
- -static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
- -DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
- -DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
- -DEFINE_PER_CPU(char, rcu_cpu_has_work);
- -
- #else /* #ifdef CONFIG_RCU_BOOST */
-
- /*
- @@ -55,6 +40,14 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
-
- #endif /* #else #ifdef CONFIG_RCU_BOOST */
-
- +/*
- + * Control variables for per-CPU and per-rcu_node kthreads. These
- + * handle all flavors of RCU.
- + */
- +DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
- +DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
- +DEFINE_PER_CPU(char, rcu_cpu_has_work);
- +
- #ifdef CONFIG_RCU_NOCB_CPU
- static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
- static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
- @@ -426,7 +419,7 @@ void rcu_read_unlock_special(struct task_struct *t)
- }
-
- /* Hardware IRQ handlers cannot block, complain if they get here. */
- - if (in_irq() || in_serving_softirq()) {
- + if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
- lockdep_rcu_suspicious(__FILE__, __LINE__,
- "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
- pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
- @@ -632,15 +625,6 @@ static void rcu_preempt_check_callbacks(void)
- t->rcu_read_unlock_special.b.need_qs = true;
- }
-
- -#ifdef CONFIG_RCU_BOOST
- -
- -static void rcu_preempt_do_callbacks(void)
- -{
- - rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p));
- -}
- -
- -#endif /* #ifdef CONFIG_RCU_BOOST */
- -
- /*
- * Queue a preemptible-RCU callback for invocation after a grace period.
- */
- @@ -829,6 +813,19 @@ void exit_rcu(void)
-
- #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
- +/*
- + * If boosting, set rcuc kthreads to realtime priority.
- + */
- +static void rcu_cpu_kthread_setup(unsigned int cpu)
- +{
- +#ifdef CONFIG_RCU_BOOST
- + struct sched_param sp;
- +
- + sp.sched_priority = kthread_prio;
- + sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
- +#endif /* #ifdef CONFIG_RCU_BOOST */
- +}
- +
- #ifdef CONFIG_RCU_BOOST
-
- #include "../locking/rtmutex_common.h"
- @@ -860,16 +857,6 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
-
- #endif /* #else #ifdef CONFIG_RCU_TRACE */
-
- -static void rcu_wake_cond(struct task_struct *t, int status)
- -{
- - /*
- - * If the thread is yielding, only wake it when this
- - * is invoked from idle
- - */
- - if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
- - wake_up_process(t);
- -}
- -
- /*
- * Carry out RCU priority boosting on the task indicated by ->exp_tasks
- * or ->boost_tasks, advancing the pointer to the next task in the
- @@ -1012,23 +999,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
- }
- }
-
- -/*
- - * Wake up the per-CPU kthread to invoke RCU callbacks.
- - */
- -static void invoke_rcu_callbacks_kthread(void)
- -{
- - unsigned long flags;
- -
- - local_irq_save(flags);
- - __this_cpu_write(rcu_cpu_has_work, 1);
- - if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
- - current != __this_cpu_read(rcu_cpu_kthread_task)) {
- - rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
- - __this_cpu_read(rcu_cpu_kthread_status));
- - }
- - local_irq_restore(flags);
- -}
- -
- /*
- * Is the current CPU running the RCU-callbacks kthread?
- * Caller must have preemption disabled.
- @@ -1083,67 +1053,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
- return 0;
- }
-
- -static void rcu_kthread_do_work(void)
- -{
- - rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
- - rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
- - rcu_preempt_do_callbacks();
- -}
- -
- -static void rcu_cpu_kthread_setup(unsigned int cpu)
- -{
- - struct sched_param sp;
- -
- - sp.sched_priority = kthread_prio;
- - sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
- -}
- -
- -static void rcu_cpu_kthread_park(unsigned int cpu)
- -{
- - per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
- -}
- -
- -static int rcu_cpu_kthread_should_run(unsigned int cpu)
- -{
- - return __this_cpu_read(rcu_cpu_has_work);
- -}
- -
- -/*
- - * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
- - * RCU softirq used in flavors and configurations of RCU that do not
- - * support RCU priority boosting.
- - */
- -static void rcu_cpu_kthread(unsigned int cpu)
- -{
- - unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
- - char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
- - int spincnt;
- -
- - for (spincnt = 0; spincnt < 10; spincnt++) {
- - trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
- - local_bh_disable();
- - *statusp = RCU_KTHREAD_RUNNING;
- - this_cpu_inc(rcu_cpu_kthread_loops);
- - local_irq_disable();
- - work = *workp;
- - *workp = 0;
- - local_irq_enable();
- - if (work)
- - rcu_kthread_do_work();
- - local_bh_enable();
- - if (*workp == 0) {
- - trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
- - *statusp = RCU_KTHREAD_WAITING;
- - return;
- - }
- - }
- - *statusp = RCU_KTHREAD_YIELDING;
- - trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
- - schedule_timeout_interruptible(2);
- - trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
- - *statusp = RCU_KTHREAD_WAITING;
- -}
- -
- /*
- * Set the per-rcu_node kthread's affinity to cover all CPUs that are
- * served by the rcu_node in question. The CPU hotplug lock is still
- @@ -1174,26 +1083,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
- free_cpumask_var(cm);
- }
-
- -static struct smp_hotplug_thread rcu_cpu_thread_spec = {
- - .store = &rcu_cpu_kthread_task,
- - .thread_should_run = rcu_cpu_kthread_should_run,
- - .thread_fn = rcu_cpu_kthread,
- - .thread_comm = "rcuc/%u",
- - .setup = rcu_cpu_kthread_setup,
- - .park = rcu_cpu_kthread_park,
- -};
- -
- /*
- * Spawn boost kthreads -- called as soon as the scheduler is running.
- */
- static void __init rcu_spawn_boost_kthreads(void)
- {
- struct rcu_node *rnp;
- - int cpu;
- -
- - for_each_possible_cpu(cpu)
- - per_cpu(rcu_cpu_has_work, cpu) = 0;
- - BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
- rcu_for_each_leaf_node(rcu_state_p, rnp)
- (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
- }
- @@ -1216,11 +1111,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- }
-
- -static void invoke_rcu_callbacks_kthread(void)
- -{
- - WARN_ON_ONCE(1);
- -}
- -
- static bool rcu_is_callbacks_kthread(void)
- {
- return false;
- @@ -1244,7 +1134,7 @@ static void rcu_prepare_kthreads(int cpu)
-
- #endif /* #else #ifdef CONFIG_RCU_BOOST */
-
- -#if !defined(CONFIG_RCU_FAST_NO_HZ)
- +#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
-
- /*
- * Check to see if any future RCU-related work will need to be done
- @@ -1261,7 +1151,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
- return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
- ? 0 : rcu_cpu_has_callbacks(NULL);
- }
- +#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
-
- +#if !defined(CONFIG_RCU_FAST_NO_HZ)
- /*
- * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
- * after it.
- @@ -1357,6 +1249,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
- return cbs_ready;
- }
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +
- /*
- * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
- * to invoke. If the CPU has callbacks, try to advance them. Tell the
- @@ -1402,6 +1296,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
- *nextevt = basemono + dj * TICK_NSEC;
- return 0;
- }
- +#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
-
- /*
- * Prepare a CPU for idle from an RCU perspective. The first major task
- diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
- index 4f6db7e6a117..ee02e1e1b3e5 100644
- --- a/kernel/rcu/update.c
- +++ b/kernel/rcu/update.c
- @@ -62,7 +62,7 @@
- #ifndef CONFIG_TINY_RCU
- module_param(rcu_expedited, int, 0);
- module_param(rcu_normal, int, 0);
- -static int rcu_normal_after_boot;
- +static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
- module_param(rcu_normal_after_boot, int, 0);
- #endif /* #ifndef CONFIG_TINY_RCU */
-
- @@ -132,8 +132,7 @@ bool rcu_gp_is_normal(void)
- }
- EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
-
- -static atomic_t rcu_expedited_nesting =
- - ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
- +static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
-
- /*
- * Should normal grace-period primitives be expedited? Intended for
- @@ -182,8 +181,7 @@ EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
- */
- void rcu_end_inkernel_boot(void)
- {
- - if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
- - rcu_unexpedite_gp();
- + rcu_unexpedite_gp();
- if (rcu_normal_after_boot)
- WRITE_ONCE(rcu_normal, 1);
- }
- @@ -298,6 +296,7 @@ int rcu_read_lock_held(void)
- }
- EXPORT_SYMBOL_GPL(rcu_read_lock_held);
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- /**
- * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
- *
- @@ -324,6 +323,7 @@ int rcu_read_lock_bh_held(void)
- return in_softirq() || irqs_disabled();
- }
- EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
- +#endif
-
- #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-
- diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
- index 5e59b832ae2b..7337a7f60e3f 100644
- --- a/kernel/sched/Makefile
- +++ b/kernel/sched/Makefile
- @@ -17,7 +17,7 @@ endif
-
- obj-y += core.o loadavg.o clock.o cputime.o
- obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
- -obj-y += wait.o swait.o completion.o idle.o
- +obj-y += wait.o swait.o swork.o completion.o idle.o
- obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
- obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
- obj-$(CONFIG_SCHEDSTATS) += stats.o
- diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
- index 8d0f35debf35..b62cf6400fe0 100644
- --- a/kernel/sched/completion.c
- +++ b/kernel/sched/completion.c
- @@ -30,10 +30,10 @@ void complete(struct completion *x)
- {
- unsigned long flags;
-
- - spin_lock_irqsave(&x->wait.lock, flags);
- + raw_spin_lock_irqsave(&x->wait.lock, flags);
- x->done++;
- - __wake_up_locked(&x->wait, TASK_NORMAL, 1);
- - spin_unlock_irqrestore(&x->wait.lock, flags);
- + swake_up_locked(&x->wait);
- + raw_spin_unlock_irqrestore(&x->wait.lock, flags);
- }
- EXPORT_SYMBOL(complete);
-
- @@ -50,10 +50,10 @@ void complete_all(struct completion *x)
- {
- unsigned long flags;
-
- - spin_lock_irqsave(&x->wait.lock, flags);
- + raw_spin_lock_irqsave(&x->wait.lock, flags);
- x->done += UINT_MAX/2;
- - __wake_up_locked(&x->wait, TASK_NORMAL, 0);
- - spin_unlock_irqrestore(&x->wait.lock, flags);
- + swake_up_all_locked(&x->wait);
- + raw_spin_unlock_irqrestore(&x->wait.lock, flags);
- }
- EXPORT_SYMBOL(complete_all);
-
- @@ -62,20 +62,20 @@ do_wait_for_common(struct completion *x,
- long (*action)(long), long timeout, int state)
- {
- if (!x->done) {
- - DECLARE_WAITQUEUE(wait, current);
- + DECLARE_SWAITQUEUE(wait);
-
- - __add_wait_queue_tail_exclusive(&x->wait, &wait);
- + __prepare_to_swait(&x->wait, &wait);
- do {
- if (signal_pending_state(state, current)) {
- timeout = -ERESTARTSYS;
- break;
- }
- __set_current_state(state);
- - spin_unlock_irq(&x->wait.lock);
- + raw_spin_unlock_irq(&x->wait.lock);
- timeout = action(timeout);
- - spin_lock_irq(&x->wait.lock);
- + raw_spin_lock_irq(&x->wait.lock);
- } while (!x->done && timeout);
- - __remove_wait_queue(&x->wait, &wait);
- + __finish_swait(&x->wait, &wait);
- if (!x->done)
- return timeout;
- }
- @@ -89,9 +89,9 @@ __wait_for_common(struct completion *x,
- {
- might_sleep();
-
- - spin_lock_irq(&x->wait.lock);
- + raw_spin_lock_irq(&x->wait.lock);
- timeout = do_wait_for_common(x, action, timeout, state);
- - spin_unlock_irq(&x->wait.lock);
- + raw_spin_unlock_irq(&x->wait.lock);
- return timeout;
- }
-
- @@ -277,12 +277,12 @@ bool try_wait_for_completion(struct completion *x)
- if (!READ_ONCE(x->done))
- return 0;
-
- - spin_lock_irqsave(&x->wait.lock, flags);
- + raw_spin_lock_irqsave(&x->wait.lock, flags);
- if (!x->done)
- ret = 0;
- else
- x->done--;
- - spin_unlock_irqrestore(&x->wait.lock, flags);
- + raw_spin_unlock_irqrestore(&x->wait.lock, flags);
- return ret;
- }
- EXPORT_SYMBOL(try_wait_for_completion);
- @@ -311,7 +311,7 @@ bool completion_done(struct completion *x)
- * after it's acquired the lock.
- */
- smp_rmb();
- - spin_unlock_wait(&x->wait.lock);
- + raw_spin_unlock_wait(&x->wait.lock);
- return true;
- }
- EXPORT_SYMBOL(completion_done);
- diff --git a/kernel/sched/core.c b/kernel/sched/core.c
- index e5066955cc3a..ed1ebcc2ff3d 100644
- --- a/kernel/sched/core.c
- +++ b/kernel/sched/core.c
- @@ -129,7 +129,11 @@ const_debug unsigned int sysctl_sched_features =
- * Number of tasks to iterate in a single balance run.
- * Limited because this is done with IRQs disabled.
- */
- +#ifndef CONFIG_PREEMPT_RT_FULL
- const_debug unsigned int sysctl_sched_nr_migrate = 32;
- +#else
- +const_debug unsigned int sysctl_sched_nr_migrate = 8;
- +#endif
-
- /*
- * period over which we average the RT time consumption, measured
- @@ -345,6 +349,7 @@ static void init_rq_hrtick(struct rq *rq)
-
- hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- rq->hrtick_timer.function = hrtick;
- + rq->hrtick_timer.irqsafe = 1;
- }
- #else /* CONFIG_SCHED_HRTICK */
- static inline void hrtick_clear(struct rq *rq)
- @@ -425,9 +430,15 @@ static bool set_nr_if_polling(struct task_struct *p)
- #endif
- #endif
-
- -void wake_q_add(struct wake_q_head *head, struct task_struct *task)
- +void __wake_q_add(struct wake_q_head *head, struct task_struct *task,
- + bool sleeper)
- {
- - struct wake_q_node *node = &task->wake_q;
- + struct wake_q_node *node;
- +
- + if (sleeper)
- + node = &task->wake_q_sleeper;
- + else
- + node = &task->wake_q;
-
- /*
- * Atomically grab the task, if ->wake_q is !nil already it means
- @@ -449,24 +460,33 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
- head->lastp = &node->next;
- }
-
- -void wake_up_q(struct wake_q_head *head)
- +void __wake_up_q(struct wake_q_head *head, bool sleeper)
- {
- struct wake_q_node *node = head->first;
-
- while (node != WAKE_Q_TAIL) {
- struct task_struct *task;
-
- - task = container_of(node, struct task_struct, wake_q);
- + if (sleeper)
- + task = container_of(node, struct task_struct, wake_q_sleeper);
- + else
- + task = container_of(node, struct task_struct, wake_q);
- BUG_ON(!task);
- /* task can safely be re-inserted now */
- node = node->next;
- - task->wake_q.next = NULL;
- + if (sleeper)
- + task->wake_q_sleeper.next = NULL;
- + else
- + task->wake_q.next = NULL;
-
- /*
- * wake_up_process() implies a wmb() to pair with the queueing
- * in wake_q_add() so as not to miss wakeups.
- */
- - wake_up_process(task);
- + if (sleeper)
- + wake_up_lock_sleeper(task);
- + else
- + wake_up_process(task);
- put_task_struct(task);
- }
- }
- @@ -502,6 +522,38 @@ void resched_curr(struct rq *rq)
- trace_sched_wake_idle_without_ipi(cpu);
- }
-
- +#ifdef CONFIG_PREEMPT_LAZY
- +void resched_curr_lazy(struct rq *rq)
- +{
- + struct task_struct *curr = rq->curr;
- + int cpu;
- +
- + if (!sched_feat(PREEMPT_LAZY)) {
- + resched_curr(rq);
- + return;
- + }
- +
- + lockdep_assert_held(&rq->lock);
- +
- + if (test_tsk_need_resched(curr))
- + return;
- +
- + if (test_tsk_need_resched_lazy(curr))
- + return;
- +
- + set_tsk_need_resched_lazy(curr);
- +
- + cpu = cpu_of(rq);
- + if (cpu == smp_processor_id())
- + return;
- +
- + /* NEED_RESCHED_LAZY must be visible before we test polling */
- + smp_mb();
- + if (!tsk_is_polling(curr))
- + smp_send_reschedule(cpu);
- +}
- +#endif
- +
- void resched_cpu(int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
- @@ -524,11 +576,14 @@ void resched_cpu(int cpu)
- */
- int get_nohz_timer_target(void)
- {
- - int i, cpu = smp_processor_id();
- + int i, cpu;
- struct sched_domain *sd;
-
- + preempt_disable_rt();
- + cpu = smp_processor_id();
- +
- if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
- - return cpu;
- + goto preempt_en_rt;
-
- rcu_read_lock();
- for_each_domain(cpu, sd) {
- @@ -547,6 +602,8 @@ int get_nohz_timer_target(void)
- cpu = housekeeping_any_cpu();
- unlock:
- rcu_read_unlock();
- +preempt_en_rt:
- + preempt_enable_rt();
- return cpu;
- }
- /*
- @@ -1092,7 +1149,8 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma
- p->nr_cpus_allowed = cpumask_weight(new_mask);
- }
-
- -void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
- +static void __do_set_cpus_allowed_tail(struct task_struct *p,
- + const struct cpumask *new_mask)
- {
- struct rq *rq = task_rq(p);
- bool queued, running;
- @@ -1121,6 +1179,98 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
- set_curr_task(rq, p);
- }
-
- +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
- +{
- + if (__migrate_disabled(p)) {
- + lockdep_assert_held(&p->pi_lock);
- +
- + cpumask_copy(&p->cpus_allowed, new_mask);
- +#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
- + p->migrate_disable_update = 1;
- +#endif
- + return;
- + }
- + __do_set_cpus_allowed_tail(p, new_mask);
- +}
- +
- +static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
- +static DEFINE_MUTEX(sched_down_mutex);
- +static cpumask_t sched_down_cpumask;
- +
- +void tell_sched_cpu_down_begin(int cpu)
- +{
- + mutex_lock(&sched_down_mutex);
- + cpumask_set_cpu(cpu, &sched_down_cpumask);
- + mutex_unlock(&sched_down_mutex);
- +}
- +
- +void tell_sched_cpu_down_done(int cpu)
- +{
- + mutex_lock(&sched_down_mutex);
- + cpumask_clear_cpu(cpu, &sched_down_cpumask);
- + mutex_unlock(&sched_down_mutex);
- +}
- +
- +/**
- + * migrate_me - try to move the current task off this cpu
- + *
- + * Used by the pin_current_cpu() code to try to get tasks
- + * to move off the current CPU as it is going down.
- + * It will only move the task if the task isn't pinned to
- + * the CPU (with migrate_disable, affinity or NO_SETAFFINITY)
- + * and the task has to be in a RUNNING state. Otherwise the
- + * movement of the task will wake it up (change its state
- + * to running) when the task did not expect it.
- + *
- + * Returns 1 if it succeeded in moving the current task
- + * 0 otherwise.
- + */
- +int migrate_me(void)
- +{
- + struct task_struct *p = current;
- + struct migration_arg arg;
- + struct cpumask *cpumask;
- + struct cpumask *mask;
- + unsigned int dest_cpu;
- + struct rq_flags rf;
- + struct rq *rq;
- +
- + /*
- + * We can not migrate tasks bounded to a CPU or tasks not
- + * running. The movement of the task will wake it up.
- + */
- + if (p->flags & PF_NO_SETAFFINITY || p->state)
- + return 0;
- +
- + mutex_lock(&sched_down_mutex);
- + rq = task_rq_lock(p, &rf);
- +
- + cpumask = this_cpu_ptr(&sched_cpumasks);
- + mask = &p->cpus_allowed;
- +
- + cpumask_andnot(cpumask, mask, &sched_down_cpumask);
- +
- + if (!cpumask_weight(cpumask)) {
- + /* It's only on this CPU? */
- + task_rq_unlock(rq, p, &rf);
- + mutex_unlock(&sched_down_mutex);
- + return 0;
- + }
- +
- + dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
- +
- + arg.task = p;
- + arg.dest_cpu = dest_cpu;
- +
- + task_rq_unlock(rq, p, &rf);
- +
- + stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
- + tlb_migrate_finish(p->mm);
- + mutex_unlock(&sched_down_mutex);
- +
- + return 1;
- +}
- +
- /*
- * Change a given task's CPU affinity. Migrate the thread to a
- * proper CPU and schedule it away if the CPU it's executing on
- @@ -1179,7 +1329,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
- }
-
- /* Can the task run on the task's current CPU? If so, we're done */
- - if (cpumask_test_cpu(task_cpu(p), new_mask))
- + if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
- goto out;
-
- dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
- @@ -1366,6 +1516,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
- return ret;
- }
-
- +static bool check_task_state(struct task_struct *p, long match_state)
- +{
- + bool match = false;
- +
- + raw_spin_lock_irq(&p->pi_lock);
- + if (p->state == match_state || p->saved_state == match_state)
- + match = true;
- + raw_spin_unlock_irq(&p->pi_lock);
- +
- + return match;
- +}
- +
- /*
- * wait_task_inactive - wait for a thread to unschedule.
- *
- @@ -1410,7 +1572,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
- * is actually now running somewhere else!
- */
- while (task_running(rq, p)) {
- - if (match_state && unlikely(p->state != match_state))
- + if (match_state && !check_task_state(p, match_state))
- return 0;
- cpu_relax();
- }
- @@ -1425,7 +1587,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
- running = task_running(rq, p);
- queued = task_on_rq_queued(p);
- ncsw = 0;
- - if (!match_state || p->state == match_state)
- + if (!match_state || p->state == match_state ||
- + p->saved_state == match_state)
- ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
- task_rq_unlock(rq, p, &rf);
-
- @@ -1680,10 +1843,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
- {
- activate_task(rq, p, en_flags);
- p->on_rq = TASK_ON_RQ_QUEUED;
- -
- - /* if a worker is waking up, notify workqueue */
- - if (p->flags & PF_WQ_WORKER)
- - wq_worker_waking_up(p, cpu_of(rq));
- }
-
- /*
- @@ -2018,8 +2177,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
- */
- smp_mb__before_spinlock();
- raw_spin_lock_irqsave(&p->pi_lock, flags);
- - if (!(p->state & state))
- + if (!(p->state & state)) {
- + /*
- + * The task might be running due to a spinlock sleeper
- + * wakeup. Check the saved state and set it to running
- + * if the wakeup condition is true.
- + */
- + if (!(wake_flags & WF_LOCK_SLEEPER)) {
- + if (p->saved_state & state) {
- + p->saved_state = TASK_RUNNING;
- + success = 1;
- + }
- + }
- goto out;
- + }
- +
- + /*
- + * If this is a regular wakeup, then we can unconditionally
- + * clear the saved state of a "lock sleeper".
- + */
- + if (!(wake_flags & WF_LOCK_SLEEPER))
- + p->saved_state = TASK_RUNNING;
-
- trace_sched_waking(p);
-
- @@ -2101,53 +2279,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
- return success;
- }
-
- -/**
- - * try_to_wake_up_local - try to wake up a local task with rq lock held
- - * @p: the thread to be awakened
- - * @cookie: context's cookie for pinning
- - *
- - * Put @p on the run-queue if it's not already there. The caller must
- - * ensure that this_rq() is locked, @p is bound to this_rq() and not
- - * the current task.
- - */
- -static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
- -{
- - struct rq *rq = task_rq(p);
- -
- - if (WARN_ON_ONCE(rq != this_rq()) ||
- - WARN_ON_ONCE(p == current))
- - return;
- -
- - lockdep_assert_held(&rq->lock);
- -
- - if (!raw_spin_trylock(&p->pi_lock)) {
- - /*
- - * This is OK, because current is on_cpu, which avoids it being
- - * picked for load-balance and preemption/IRQs are still
- - * disabled avoiding further scheduler activity on it and we've
- - * not yet picked a replacement task.
- - */
- - lockdep_unpin_lock(&rq->lock, cookie);
- - raw_spin_unlock(&rq->lock);
- - raw_spin_lock(&p->pi_lock);
- - raw_spin_lock(&rq->lock);
- - lockdep_repin_lock(&rq->lock, cookie);
- - }
- -
- - if (!(p->state & TASK_NORMAL))
- - goto out;
- -
- - trace_sched_waking(p);
- -
- - if (!task_on_rq_queued(p))
- - ttwu_activate(rq, p, ENQUEUE_WAKEUP);
- -
- - ttwu_do_wakeup(rq, p, 0, cookie);
- - ttwu_stat(p, smp_processor_id(), 0);
- -out:
- - raw_spin_unlock(&p->pi_lock);
- -}
- -
- /**
- * wake_up_process - Wake up a specific process
- * @p: The process to be woken up.
- @@ -2166,6 +2297,18 @@ int wake_up_process(struct task_struct *p)
- }
- EXPORT_SYMBOL(wake_up_process);
-
- +/**
- + * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
- + * @p: The process to be woken up.
- + *
- + * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
- + * the nature of the wakeup.
- + */
- +int wake_up_lock_sleeper(struct task_struct *p)
- +{
- + return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER);
- +}
- +
- int wake_up_state(struct task_struct *p, unsigned int state)
- {
- return try_to_wake_up(p, state, 0);
- @@ -2442,6 +2585,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
- p->on_cpu = 0;
- #endif
- init_task_preempt_count(p);
- +#ifdef CONFIG_HAVE_PREEMPT_LAZY
- + task_thread_info(p)->preempt_lazy_count = 0;
- +#endif
- #ifdef CONFIG_SMP
- plist_node_init(&p->pushable_tasks, MAX_PRIO);
- RB_CLEAR_NODE(&p->pushable_dl_tasks);
- @@ -2770,21 +2916,16 @@ static struct rq *finish_task_switch(struct task_struct *prev)
- finish_arch_post_lock_switch();
-
- fire_sched_in_preempt_notifiers(current);
- + /*
- + * We use mmdrop_delayed() here so we don't have to do the
- + * full __mmdrop() when we are the last user.
- + */
- if (mm)
- - mmdrop(mm);
- + mmdrop_delayed(mm);
- if (unlikely(prev_state == TASK_DEAD)) {
- if (prev->sched_class->task_dead)
- prev->sched_class->task_dead(prev);
-
- - /*
- - * Remove function-return probe instances associated with this
- - * task and put them back on the free list.
- - */
- - kprobe_flush_task(prev);
- -
- - /* Task is done with its stack. */
- - put_task_stack(prev);
- -
- put_task_struct(prev);
- }
-
- @@ -3252,6 +3393,114 @@ static inline void schedule_debug(struct task_struct *prev)
- schedstat_inc(this_rq()->sched_count);
- }
-
- +#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
- +
- +void migrate_disable(void)
- +{
- + struct task_struct *p = current;
- +
- + if (in_atomic() || irqs_disabled()) {
- +#ifdef CONFIG_SCHED_DEBUG
- + p->migrate_disable_atomic++;
- +#endif
- + return;
- + }
- +
- +#ifdef CONFIG_SCHED_DEBUG
- + if (unlikely(p->migrate_disable_atomic)) {
- + tracing_off();
- + WARN_ON_ONCE(1);
- + }
- +#endif
- +
- + if (p->migrate_disable) {
- + p->migrate_disable++;
- + return;
- + }
- +
- + preempt_disable();
- + preempt_lazy_disable();
- + pin_current_cpu();
- + p->migrate_disable = 1;
- + preempt_enable();
- +}
- +EXPORT_SYMBOL(migrate_disable);
- +
- +void migrate_enable(void)
- +{
- + struct task_struct *p = current;
- +
- + if (in_atomic() || irqs_disabled()) {
- +#ifdef CONFIG_SCHED_DEBUG
- + p->migrate_disable_atomic--;
- +#endif
- + return;
- + }
- +
- +#ifdef CONFIG_SCHED_DEBUG
- + if (unlikely(p->migrate_disable_atomic)) {
- + tracing_off();
- + WARN_ON_ONCE(1);
- + }
- +#endif
- + WARN_ON_ONCE(p->migrate_disable <= 0);
- +
- + if (p->migrate_disable > 1) {
- + p->migrate_disable--;
- + return;
- + }
- +
- + preempt_disable();
- + /*
- + * Clearing migrate_disable causes tsk_cpus_allowed to
- + * show the tasks original cpu affinity.
- + */
- + p->migrate_disable = 0;
- +
- + if (p->migrate_disable_update) {
- + struct rq *rq;
- + struct rq_flags rf;
- +
- + rq = task_rq_lock(p, &rf);
- + update_rq_clock(rq);
- +
- + __do_set_cpus_allowed_tail(p, &p->cpus_allowed);
- + task_rq_unlock(rq, p, &rf);
- +
- + p->migrate_disable_update = 0;
- +
- + WARN_ON(smp_processor_id() != task_cpu(p));
- + if (!cpumask_test_cpu(task_cpu(p), &p->cpus_allowed)) {
- + const struct cpumask *cpu_valid_mask = cpu_active_mask;
- + struct migration_arg arg;
- + unsigned int dest_cpu;
- +
- + if (p->flags & PF_KTHREAD) {
- + /*
- + * Kernel threads are allowed on online && !active CPUs
- + */
- + cpu_valid_mask = cpu_online_mask;
- + }
- + dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_allowed);
- + arg.task = p;
- + arg.dest_cpu = dest_cpu;
- +
- + unpin_current_cpu();
- + preempt_lazy_enable();
- + preempt_enable();
- + stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
- + tlb_migrate_finish(p->mm);
- + return;
- + }
- + }
- +
- + unpin_current_cpu();
- + preempt_enable();
- + preempt_lazy_enable();
- +}
- +EXPORT_SYMBOL(migrate_enable);
- +#endif
- +
- /*
- * Pick up the highest-prio task:
- */
- @@ -3368,19 +3617,6 @@ static void __sched notrace __schedule(bool preempt)
- } else {
- deactivate_task(rq, prev, DEQUEUE_SLEEP);
- prev->on_rq = 0;
- -
- - /*
- - * If a worker went to sleep, notify and ask workqueue
- - * whether it wants to wake up a task to maintain
- - * concurrency.
- - */
- - if (prev->flags & PF_WQ_WORKER) {
- - struct task_struct *to_wakeup;
- -
- - to_wakeup = wq_worker_sleeping(prev);
- - if (to_wakeup)
- - try_to_wake_up_local(to_wakeup, cookie);
- - }
- }
- switch_count = &prev->nvcsw;
- }
- @@ -3390,6 +3626,7 @@ static void __sched notrace __schedule(bool preempt)
-
- next = pick_next_task(rq, prev, cookie);
- clear_tsk_need_resched(prev);
- + clear_tsk_need_resched_lazy(prev);
- clear_preempt_need_resched();
- rq->clock_skip_update = 0;
-
- @@ -3437,8 +3674,19 @@ void __noreturn do_task_dead(void)
-
- static inline void sched_submit_work(struct task_struct *tsk)
- {
- - if (!tsk->state || tsk_is_pi_blocked(tsk))
- + if (!tsk->state)
- return;
- + /*
- + * If a worker went to sleep, notify and ask workqueue whether
- + * it wants to wake up a task to maintain concurrency.
- + */
- + if (tsk->flags & PF_WQ_WORKER)
- + wq_worker_sleeping(tsk);
- +
- +
- + if (tsk_is_pi_blocked(tsk))
- + return;
- +
- /*
- * If we are going to sleep and we have plugged IO queued,
- * make sure to submit it to avoid deadlocks.
- @@ -3447,6 +3695,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
- blk_schedule_flush_plug(tsk);
- }
-
- +static void sched_update_worker(struct task_struct *tsk)
- +{
- + if (tsk->flags & PF_WQ_WORKER)
- + wq_worker_running(tsk);
- +}
- +
- asmlinkage __visible void __sched schedule(void)
- {
- struct task_struct *tsk = current;
- @@ -3457,6 +3711,7 @@ asmlinkage __visible void __sched schedule(void)
- __schedule(false);
- sched_preempt_enable_no_resched();
- } while (need_resched());
- + sched_update_worker(tsk);
- }
- EXPORT_SYMBOL(schedule);
-
- @@ -3520,6 +3775,30 @@ static void __sched notrace preempt_schedule_common(void)
- } while (need_resched());
- }
-
- +#ifdef CONFIG_PREEMPT_LAZY
- +/*
- + * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is
- + * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as
- + * preempt_lazy_count counter >0.
- + */
- +static __always_inline int preemptible_lazy(void)
- +{
- + if (test_thread_flag(TIF_NEED_RESCHED))
- + return 1;
- + if (current_thread_info()->preempt_lazy_count)
- + return 0;
- + return 1;
- +}
- +
- +#else
- +
- +static inline int preemptible_lazy(void)
- +{
- + return 1;
- +}
- +
- +#endif
- +
- #ifdef CONFIG_PREEMPT
- /*
- * this is the entry point to schedule() from in-kernel preemption
- @@ -3534,7 +3813,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
- */
- if (likely(!preemptible()))
- return;
- -
- + if (!preemptible_lazy())
- + return;
- preempt_schedule_common();
- }
- NOKPROBE_SYMBOL(preempt_schedule);
- @@ -3561,6 +3841,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
- if (likely(!preemptible()))
- return;
-
- + if (!preemptible_lazy())
- + return;
- +
- do {
- /*
- * Because the function tracer can trace preempt_count_sub()
- @@ -3583,7 +3866,16 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
- * an infinite recursion.
- */
- prev_ctx = exception_enter();
- + /*
- + * The add/subtract must not be traced by the function
- + * tracer. But we still want to account for the
- + * preempt off latency tracer. Since the _notrace versions
- + * of add/subtract skip the accounting for latency tracer
- + * we must force it manually.
- + */
- + start_critical_timings();
- __schedule(true);
- + stop_critical_timings();
- exception_exit(prev_ctx);
-
- preempt_latency_stop(1);
- @@ -3629,10 +3921,25 @@ EXPORT_SYMBOL(default_wake_function);
-
- #ifdef CONFIG_RT_MUTEXES
-
- +static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
- +{
- + if (pi_task)
- + prio = min(prio, pi_task->prio);
- +
- + return prio;
- +}
- +
- +static inline int rt_effective_prio(struct task_struct *p, int prio)
- +{
- + struct task_struct *pi_task = rt_mutex_get_top_task(p);
- +
- + return __rt_effective_prio(pi_task, prio);
- +}
- +
- /*
- * rt_mutex_setprio - set the current priority of a task
- - * @p: task
- - * @prio: prio value (kernel-internal form)
- + * @p: task to boost
- + * @pi_task: donor task
- *
- * This function changes the 'effective' priority of a task. It does
- * not touch ->normal_prio like __setscheduler().
- @@ -3640,16 +3947,40 @@ EXPORT_SYMBOL(default_wake_function);
- * Used by the rt_mutex code to implement priority inheritance
- * logic. Call site only calls if the priority of the task changed.
- */
- -void rt_mutex_setprio(struct task_struct *p, int prio)
- +void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
- {
- - int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
- + int prio, oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
- const struct sched_class *prev_class;
- struct rq_flags rf;
- struct rq *rq;
-
- - BUG_ON(prio > MAX_PRIO);
- + /* XXX used to be waiter->prio, not waiter->task->prio */
- + prio = __rt_effective_prio(pi_task, p->normal_prio);
- +
- + /*
- + * If nothing changed; bail early.
- + */
- + if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio))
- + return;
-
- rq = __task_rq_lock(p, &rf);
- + /*
- + * Set under pi_lock && rq->lock, such that the value can be used under
- + * either lock.
- + *
- + * Note that there is loads of tricky to make this pointer cache work
- + * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
- + * ensure a task is de-boosted (pi_task is set to NULL) before the
- + * task is allowed to run again (and can exit). This ensures the pointer
- + * points to a blocked task -- which guaratees the task is present.
- + */
- + p->pi_top_task = pi_task;
- +
- + /*
- + * For FIFO/RR we only need to set prio, if that matches we're done.
- + */
- + if (prio == p->prio && !dl_prio(prio))
- + goto out_unlock;
-
- /*
- * Idle task boosting is a nono in general. There is one
- @@ -3669,7 +4000,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
- goto out_unlock;
- }
-
- - trace_sched_pi_setprio(p, prio);
- + trace_sched_pi_setprio(p, pi_task);
- oldprio = p->prio;
-
- if (oldprio == prio)
- @@ -3693,7 +4024,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
- * running task
- */
- if (dl_prio(prio)) {
- - struct task_struct *pi_task = rt_mutex_get_top_task(p);
- if (!dl_prio(p->normal_prio) ||
- (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
- p->dl.dl_boosted = 1;
- @@ -3730,6 +4060,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
- balance_callback(rq);
- preempt_enable();
- }
- +#else
- +static inline int rt_effective_prio(struct task_struct *p, int prio)
- +{
- + return prio;
- +}
- #endif
-
- void set_user_nice(struct task_struct *p, long nice)
- @@ -3974,10 +4309,9 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
- * Keep a potential priority boosting if called from
- * sched_setscheduler().
- */
- + p->prio = normal_prio(p);
- if (keep_boost)
- - p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
- - else
- - p->prio = normal_prio(p);
- + p->prio = rt_effective_prio(p, p->prio);
-
- if (dl_prio(p->prio))
- p->sched_class = &dl_sched_class;
- @@ -4264,7 +4598,7 @@ static int __sched_setscheduler(struct task_struct *p,
- * the runqueue. This will be done when the task deboost
- * itself.
- */
- - new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
- + new_effective_prio = rt_effective_prio(p, newprio);
- if (new_effective_prio == oldprio)
- queue_flags &= ~DEQUEUE_MOVE;
- }
- @@ -4939,6 +5273,7 @@ int __cond_resched_lock(spinlock_t *lock)
- }
- EXPORT_SYMBOL(__cond_resched_lock);
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- int __sched __cond_resched_softirq(void)
- {
- BUG_ON(!in_softirq());
- @@ -4952,6 +5287,7 @@ int __sched __cond_resched_softirq(void)
- return 0;
- }
- EXPORT_SYMBOL(__cond_resched_softirq);
- +#endif
-
- /**
- * yield - yield the current processor to other threads.
- @@ -5315,7 +5651,9 @@ void init_idle(struct task_struct *idle, int cpu)
-
- /* Set the preempt count _outside_ the spinlocks! */
- init_idle_preempt_count(idle, cpu);
- -
- +#ifdef CONFIG_HAVE_PREEMPT_LAZY
- + task_thread_info(idle)->preempt_lazy_count = 0;
- +#endif
- /*
- * The idle tasks have their own, simple scheduling class:
- */
- @@ -5458,6 +5796,8 @@ void sched_setnuma(struct task_struct *p, int nid)
- #endif /* CONFIG_NUMA_BALANCING */
-
- #ifdef CONFIG_HOTPLUG_CPU
- +static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
- +
- /*
- * Ensures that the idle task is using init_mm right before its cpu goes
- * offline.
- @@ -5472,7 +5812,12 @@ void idle_task_exit(void)
- switch_mm(mm, &init_mm, current);
- finish_arch_post_lock_switch();
- }
- - mmdrop(mm);
- + /*
- + * Defer the cleanup to an alive cpu. On RT we can neither
- + * call mmdrop() nor mmdrop_delayed() from here.
- + */
- + per_cpu(idle_last_mm, smp_processor_id()) = mm;
- +
- }
-
- /*
- @@ -5881,6 +6226,7 @@ static int init_rootdomain(struct root_domain *rd)
- rd->rto_cpu = -1;
- raw_spin_lock_init(&rd->rto_lock);
- init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
- + rd->rto_push_work.flags |= IRQ_WORK_HARD_IRQ;
- #endif
-
- init_dl_bw(&rd->dl_bw);
- @@ -7439,6 +7785,10 @@ int sched_cpu_dying(unsigned int cpu)
- update_max_interval();
- nohz_balance_exit_idle(cpu);
- hrtick_clear(rq);
- + if (per_cpu(idle_last_mm, cpu)) {
- + mmdrop_delayed(per_cpu(idle_last_mm, cpu));
- + per_cpu(idle_last_mm, cpu) = NULL;
- + }
- return 0;
- }
- #endif
- @@ -7700,7 +8050,7 @@ void __init sched_init(void)
- #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- static inline int preempt_count_equals(int preempt_offset)
- {
- - int nested = preempt_count() + rcu_preempt_depth();
- + int nested = preempt_count() + sched_rcu_preempt_depth();
-
- return (nested == preempt_offset);
- }
- diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
- index df5c32a0c6ed..c77fd444dc3c 100644
- --- a/kernel/sched/deadline.c
- +++ b/kernel/sched/deadline.c
- @@ -693,6 +693,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
-
- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- timer->function = dl_task_timer;
- + timer->irqsafe = 1;
- }
-
- /*
- diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
- index fa178b62ea79..935224123441 100644
- --- a/kernel/sched/debug.c
- +++ b/kernel/sched/debug.c
- @@ -558,6 +558,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
- P(rt_throttled);
- PN(rt_time);
- PN(rt_runtime);
- +#ifdef CONFIG_SMP
- + P(rt_nr_migratory);
- +#endif
-
- #undef PN
- #undef P
- @@ -953,6 +956,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
- #endif
- P(policy);
- P(prio);
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + P(migrate_disable);
- +#endif
- + P(nr_cpus_allowed);
- #undef PN_SCHEDSTAT
- #undef PN
- #undef __PN
- diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
- index 3d862f5b0331..c6db32c0c557 100644
- --- a/kernel/sched/fair.c
- +++ b/kernel/sched/fair.c
- @@ -3518,7 +3518,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
- ideal_runtime = sched_slice(cfs_rq, curr);
- delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
- if (delta_exec > ideal_runtime) {
- - resched_curr(rq_of(cfs_rq));
- + resched_curr_lazy(rq_of(cfs_rq));
- /*
- * The current task ran long enough, ensure it doesn't get
- * re-elected due to buddy favours.
- @@ -3542,7 +3542,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
- return;
-
- if (delta > ideal_runtime)
- - resched_curr(rq_of(cfs_rq));
- + resched_curr_lazy(rq_of(cfs_rq));
- }
-
- static void
- @@ -3684,7 +3684,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
- * validating it and just reschedule.
- */
- if (queued) {
- - resched_curr(rq_of(cfs_rq));
- + resched_curr_lazy(rq_of(cfs_rq));
- return;
- }
- /*
- @@ -3866,7 +3866,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
- * hierarchy can be throttled
- */
- if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
- - resched_curr(rq_of(cfs_rq));
- + resched_curr_lazy(rq_of(cfs_rq));
- }
-
- static __always_inline
- @@ -4494,7 +4494,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
-
- if (delta < 0) {
- if (rq->curr == p)
- - resched_curr(rq);
- + resched_curr_lazy(rq);
- return;
- }
- hrtick_start(rq, delta);
- @@ -5862,7 +5862,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
- return;
-
- preempt:
- - resched_curr(rq);
- + resched_curr_lazy(rq);
- /*
- * Only set the backward buddy when the current task is still
- * on the rq. This can happen when a wakeup gets interleaved
- @@ -8588,7 +8588,7 @@ static void task_fork_fair(struct task_struct *p)
- * 'current' within the tree based on its new key value.
- */
- swap(curr->vruntime, se->vruntime);
- - resched_curr(rq);
- + resched_curr_lazy(rq);
- }
-
- se->vruntime -= cfs_rq->min_vruntime;
- @@ -8612,7 +8612,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
- */
- if (rq->curr == p) {
- if (p->prio > oldprio)
- - resched_curr(rq);
- + resched_curr_lazy(rq);
- } else
- check_preempt_curr(rq, p, 0);
- }
- diff --git a/kernel/sched/features.h b/kernel/sched/features.h
- index 1b3c8189b286..36086f74e011 100644
- --- a/kernel/sched/features.h
- +++ b/kernel/sched/features.h
- @@ -45,11 +45,19 @@ SCHED_FEAT(LB_BIAS, true)
- */
- SCHED_FEAT(NONTASK_CAPACITY, true)
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +SCHED_FEAT(TTWU_QUEUE, false)
- +# ifdef CONFIG_PREEMPT_LAZY
- +SCHED_FEAT(PREEMPT_LAZY, true)
- +# endif
- +#else
- +
- /*
- * Queue remote wakeups on the target CPU and process them
- * using the scheduler IPI. Reduces rq->lock contention/bounces.
- */
- SCHED_FEAT(TTWU_QUEUE, true)
- +#endif
-
- /*
- * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
- diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
- index 7a360d6f6798..d361629c0f96 100644
- --- a/kernel/sched/rt.c
- +++ b/kernel/sched/rt.c
- @@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
-
- hrtimer_init(&rt_b->rt_period_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- + rt_b->rt_period_timer.irqsafe = 1;
- rt_b->rt_period_timer.function = sched_rt_period_timer;
- }
-
- diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
- index cff985feb6e7..280c7d5a7657 100644
- --- a/kernel/sched/sched.h
- +++ b/kernel/sched/sched.h
- @@ -1162,6 +1162,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
- #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
- #define WF_FORK 0x02 /* child wakeup after fork */
- #define WF_MIGRATED 0x4 /* internal use, task got migrated */
- +#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
-
- /*
- * To aid in avoiding the subversion of "niceness" due to uneven distribution
- @@ -1345,6 +1346,15 @@ extern void init_sched_fair_class(void);
- extern void resched_curr(struct rq *rq);
- extern void resched_cpu(int cpu);
-
- +#ifdef CONFIG_PREEMPT_LAZY
- +extern void resched_curr_lazy(struct rq *rq);
- +#else
- +static inline void resched_curr_lazy(struct rq *rq)
- +{
- + resched_curr(rq);
- +}
- +#endif
- +
- extern struct rt_bandwidth def_rt_bandwidth;
- extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
-
- diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
- index 82f0dff90030..ef027ff3250a 100644
- --- a/kernel/sched/swait.c
- +++ b/kernel/sched/swait.c
- @@ -1,5 +1,6 @@
- #include <linux/sched.h>
- #include <linux/swait.h>
- +#include <linux/suspend.h>
-
- void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
- struct lock_class_key *key)
- @@ -29,6 +30,25 @@ void swake_up_locked(struct swait_queue_head *q)
- }
- EXPORT_SYMBOL(swake_up_locked);
-
- +void swake_up_all_locked(struct swait_queue_head *q)
- +{
- + struct swait_queue *curr;
- + int wakes = 0;
- +
- + while (!list_empty(&q->task_list)) {
- +
- + curr = list_first_entry(&q->task_list, typeof(*curr),
- + task_list);
- + wake_up_process(curr->task);
- + list_del_init(&curr->task_list);
- + wakes++;
- + }
- + if (pm_in_action)
- + return;
- + WARN(wakes > 2, "complete_all() with %d waiters\n", wakes);
- +}
- +EXPORT_SYMBOL(swake_up_all_locked);
- +
- void swake_up(struct swait_queue_head *q)
- {
- unsigned long flags;
- @@ -54,6 +74,7 @@ void swake_up_all(struct swait_queue_head *q)
- if (!swait_active(q))
- return;
-
- + WARN_ON(irqs_disabled());
- raw_spin_lock_irq(&q->lock);
- list_splice_init(&q->task_list, &tmp);
- while (!list_empty(&tmp)) {
- diff --git a/kernel/sched/swork.c b/kernel/sched/swork.c
- new file mode 100644
- index 000000000000..1950f40ca725
- --- /dev/null
- +++ b/kernel/sched/swork.c
- @@ -0,0 +1,173 @@
- +/*
- + * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
- + *
- + * Provides a framework for enqueuing callbacks from irq context
- + * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
- + */
- +
- +#include <linux/swait.h>
- +#include <linux/swork.h>
- +#include <linux/kthread.h>
- +#include <linux/slab.h>
- +#include <linux/spinlock.h>
- +#include <linux/export.h>
- +
- +#define SWORK_EVENT_PENDING (1 << 0)
- +
- +static DEFINE_MUTEX(worker_mutex);
- +static struct sworker *glob_worker;
- +
- +struct sworker {
- + struct list_head events;
- + struct swait_queue_head wq;
- +
- + raw_spinlock_t lock;
- +
- + struct task_struct *task;
- + int refs;
- +};
- +
- +static bool swork_readable(struct sworker *worker)
- +{
- + bool r;
- +
- + if (kthread_should_stop())
- + return true;
- +
- + raw_spin_lock_irq(&worker->lock);
- + r = !list_empty(&worker->events);
- + raw_spin_unlock_irq(&worker->lock);
- +
- + return r;
- +}
- +
- +static int swork_kthread(void *arg)
- +{
- + struct sworker *worker = arg;
- +
- + for (;;) {
- + swait_event_interruptible(worker->wq,
- + swork_readable(worker));
- + if (kthread_should_stop())
- + break;
- +
- + raw_spin_lock_irq(&worker->lock);
- + while (!list_empty(&worker->events)) {
- + struct swork_event *sev;
- +
- + sev = list_first_entry(&worker->events,
- + struct swork_event, item);
- + list_del(&sev->item);
- + raw_spin_unlock_irq(&worker->lock);
- +
- + WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
- + &sev->flags));
- + sev->func(sev);
- + raw_spin_lock_irq(&worker->lock);
- + }
- + raw_spin_unlock_irq(&worker->lock);
- + }
- + return 0;
- +}
- +
- +static struct sworker *swork_create(void)
- +{
- + struct sworker *worker;
- +
- + worker = kzalloc(sizeof(*worker), GFP_KERNEL);
- + if (!worker)
- + return ERR_PTR(-ENOMEM);
- +
- + INIT_LIST_HEAD(&worker->events);
- + raw_spin_lock_init(&worker->lock);
- + init_swait_queue_head(&worker->wq);
- +
- + worker->task = kthread_run(swork_kthread, worker, "kswork");
- + if (IS_ERR(worker->task)) {
- + kfree(worker);
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + return worker;
- +}
- +
- +static void swork_destroy(struct sworker *worker)
- +{
- + kthread_stop(worker->task);
- +
- + WARN_ON(!list_empty(&worker->events));
- + kfree(worker);
- +}
- +
- +/**
- + * swork_queue - queue swork
- + *
- + * Returns %false if @work was already on a queue, %true otherwise.
- + *
- + * The work is queued and processed on a random CPU
- + */
- +bool swork_queue(struct swork_event *sev)
- +{
- + unsigned long flags;
- +
- + if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
- + return false;
- +
- + raw_spin_lock_irqsave(&glob_worker->lock, flags);
- + list_add_tail(&sev->item, &glob_worker->events);
- + raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
- +
- + swake_up(&glob_worker->wq);
- + return true;
- +}
- +EXPORT_SYMBOL_GPL(swork_queue);
- +
- +/**
- + * swork_get - get an instance of the sworker
- + *
- + * Returns an negative error code if the initialization if the worker did not
- + * work, %0 otherwise.
- + *
- + */
- +int swork_get(void)
- +{
- + struct sworker *worker;
- +
- + mutex_lock(&worker_mutex);
- + if (!glob_worker) {
- + worker = swork_create();
- + if (IS_ERR(worker)) {
- + mutex_unlock(&worker_mutex);
- + return -ENOMEM;
- + }
- +
- + glob_worker = worker;
- + }
- +
- + glob_worker->refs++;
- + mutex_unlock(&worker_mutex);
- +
- + return 0;
- +}
- +EXPORT_SYMBOL_GPL(swork_get);
- +
- +/**
- + * swork_put - puts an instance of the sworker
- + *
- + * Will destroy the sworker thread. This function must not be called until all
- + * queued events have been completed.
- + */
- +void swork_put(void)
- +{
- + mutex_lock(&worker_mutex);
- +
- + glob_worker->refs--;
- + if (glob_worker->refs > 0)
- + goto out;
- +
- + swork_destroy(glob_worker);
- + glob_worker = NULL;
- +out:
- + mutex_unlock(&worker_mutex);
- +}
- +EXPORT_SYMBOL_GPL(swork_put);
- diff --git a/kernel/signal.c b/kernel/signal.c
- index 7ebe236a5364..4d094ae3a625 100644
- --- a/kernel/signal.c
- +++ b/kernel/signal.c
- @@ -14,6 +14,7 @@
- #include <linux/export.h>
- #include <linux/init.h>
- #include <linux/sched.h>
- +#include <linux/sched/rt.h>
- #include <linux/fs.h>
- #include <linux/tty.h>
- #include <linux/binfmts.h>
- @@ -354,13 +355,30 @@ static bool task_participate_group_stop(struct task_struct *task)
- return false;
- }
-
- +static inline struct sigqueue *get_task_cache(struct task_struct *t)
- +{
- + struct sigqueue *q = t->sigqueue_cache;
- +
- + if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
- + return NULL;
- + return q;
- +}
- +
- +static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
- +{
- + if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
- + return 0;
- + return 1;
- +}
- +
- /*
- * allocate a new signal queue record
- * - this may be called without locks if and only if t == current, otherwise an
- * appropriate lock must be held to stop the target task from exiting
- */
- static struct sigqueue *
- -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
- +__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
- + int override_rlimit, int fromslab)
- {
- struct sigqueue *q = NULL;
- struct user_struct *user;
- @@ -377,7 +395,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
- if (override_rlimit ||
- atomic_read(&user->sigpending) <=
- task_rlimit(t, RLIMIT_SIGPENDING)) {
- - q = kmem_cache_alloc(sigqueue_cachep, flags);
- + if (!fromslab)
- + q = get_task_cache(t);
- + if (!q)
- + q = kmem_cache_alloc(sigqueue_cachep, flags);
- } else {
- print_dropped_signal(sig);
- }
- @@ -394,6 +415,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
- return q;
- }
-
- +static struct sigqueue *
- +__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
- + int override_rlimit)
- +{
- + return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
- +}
- +
- static void __sigqueue_free(struct sigqueue *q)
- {
- if (q->flags & SIGQUEUE_PREALLOC)
- @@ -403,6 +431,21 @@ static void __sigqueue_free(struct sigqueue *q)
- kmem_cache_free(sigqueue_cachep, q);
- }
-
- +static void sigqueue_free_current(struct sigqueue *q)
- +{
- + struct user_struct *up;
- +
- + if (q->flags & SIGQUEUE_PREALLOC)
- + return;
- +
- + up = q->user;
- + if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
- + atomic_dec(&up->sigpending);
- + free_uid(up);
- + } else
- + __sigqueue_free(q);
- +}
- +
- void flush_sigqueue(struct sigpending *queue)
- {
- struct sigqueue *q;
- @@ -415,6 +458,21 @@ void flush_sigqueue(struct sigpending *queue)
- }
- }
-
- +/*
- + * Called from __exit_signal. Flush tsk->pending and
- + * tsk->sigqueue_cache
- + */
- +void flush_task_sigqueue(struct task_struct *tsk)
- +{
- + struct sigqueue *q;
- +
- + flush_sigqueue(&tsk->pending);
- +
- + q = get_task_cache(tsk);
- + if (q)
- + kmem_cache_free(sigqueue_cachep, q);
- +}
- +
- /*
- * Flush all pending signals for this kthread.
- */
- @@ -534,7 +592,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
- (info->si_code == SI_TIMER) &&
- (info->si_sys_private);
-
- - __sigqueue_free(first);
- + sigqueue_free_current(first);
- } else {
- /*
- * Ok, it wasn't in the queue. This must be
- @@ -570,6 +628,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
- bool resched_timer = false;
- int signr;
-
- + WARN_ON_ONCE(tsk != current);
- +
- /* We only dequeue private signals from ourselves, we don't let
- * signalfd steal them
- */
- @@ -1166,8 +1226,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
- * We don't want to have recursive SIGSEGV's etc, for example,
- * that is why we also clear SIGNAL_UNKILLABLE.
- */
- -int
- -force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
- +static int
- +do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
- {
- unsigned long int flags;
- int ret, blocked, ignored;
- @@ -1192,6 +1252,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
- return ret;
- }
-
- +int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
- +{
- +/*
- + * On some archs, PREEMPT_RT has to delay sending a signal from a trap
- + * since it can not enable preemption, and the signal code's spin_locks
- + * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
- + * send the signal on exit of the trap.
- + */
- +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
- + if (in_atomic()) {
- + if (WARN_ON_ONCE(t != current))
- + return 0;
- + if (WARN_ON_ONCE(t->forced_info.si_signo))
- + return 0;
- +
- + if (is_si_special(info)) {
- + WARN_ON_ONCE(info != SEND_SIG_PRIV);
- + t->forced_info.si_signo = sig;
- + t->forced_info.si_errno = 0;
- + t->forced_info.si_code = SI_KERNEL;
- + t->forced_info.si_pid = 0;
- + t->forced_info.si_uid = 0;
- + } else {
- + t->forced_info = *info;
- + }
- +
- + set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
- + return 0;
- + }
- +#endif
- + return do_force_sig_info(sig, info, t);
- +}
- +
- /*
- * Nuke all other threads in the group.
- */
- @@ -1226,12 +1319,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
- * Disable interrupts early to avoid deadlocks.
- * See rcu_read_unlock() comment header for details.
- */
- - local_irq_save(*flags);
- + local_irq_save_nort(*flags);
- rcu_read_lock();
- sighand = rcu_dereference(tsk->sighand);
- if (unlikely(sighand == NULL)) {
- rcu_read_unlock();
- - local_irq_restore(*flags);
- + local_irq_restore_nort(*flags);
- break;
- }
- /*
- @@ -1252,7 +1345,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
- }
- spin_unlock(&sighand->siglock);
- rcu_read_unlock();
- - local_irq_restore(*flags);
- + local_irq_restore_nort(*flags);
- }
-
- return sighand;
- @@ -1495,7 +1588,8 @@ EXPORT_SYMBOL(kill_pid);
- */
- struct sigqueue *sigqueue_alloc(void)
- {
- - struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
- + /* Preallocated sigqueue objects always from the slabcache ! */
- + struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
-
- if (q)
- q->flags |= SIGQUEUE_PREALLOC;
- @@ -1856,15 +1950,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
- if (gstop_done && ptrace_reparented(current))
- do_notify_parent_cldstop(current, false, why);
-
- - /*
- - * Don't want to allow preemption here, because
- - * sys_ptrace() needs this task to be inactive.
- - *
- - * XXX: implement read_unlock_no_resched().
- - */
- - preempt_disable();
- read_unlock(&tasklist_lock);
- - preempt_enable_no_resched();
- freezable_schedule();
- } else {
- /*
- diff --git a/kernel/softirq.c b/kernel/softirq.c
- index 744fa611cae0..819bd7cf5ad0 100644
- --- a/kernel/softirq.c
- +++ b/kernel/softirq.c
- @@ -21,10 +21,12 @@
- #include <linux/freezer.h>
- #include <linux/kthread.h>
- #include <linux/rcupdate.h>
- +#include <linux/delay.h>
- #include <linux/ftrace.h>
- #include <linux/smp.h>
- #include <linux/smpboot.h>
- #include <linux/tick.h>
- +#include <linux/locallock.h>
- #include <linux/irq.h>
-
- #define CREATE_TRACE_POINTS
- @@ -56,12 +58,108 @@ EXPORT_SYMBOL(irq_stat);
- static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
-
- DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ))
- +DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd);
- +#endif
-
- const char * const softirq_to_name[NR_SOFTIRQS] = {
- "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
- "TASKLET", "SCHED", "HRTIMER", "RCU"
- };
-
- +#ifdef CONFIG_NO_HZ_COMMON
- +# ifdef CONFIG_PREEMPT_RT_FULL
- +
- +struct softirq_runner {
- + struct task_struct *runner[NR_SOFTIRQS];
- +};
- +
- +static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
- +
- +static inline void softirq_set_runner(unsigned int sirq)
- +{
- + struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
- +
- + sr->runner[sirq] = current;
- +}
- +
- +static inline void softirq_clr_runner(unsigned int sirq)
- +{
- + struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
- +
- + sr->runner[sirq] = NULL;
- +}
- +
- +/*
- + * On preempt-rt a softirq running context might be blocked on a
- + * lock. There might be no other runnable task on this CPU because the
- + * lock owner runs on some other CPU. So we have to go into idle with
- + * the pending bit set. Therefor we need to check this otherwise we
- + * warn about false positives which confuses users and defeats the
- + * whole purpose of this test.
- + *
- + * This code is called with interrupts disabled.
- + */
- +void softirq_check_pending_idle(void)
- +{
- + static int rate_limit;
- + struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
- + u32 warnpending;
- + int i;
- +
- + if (rate_limit >= 10)
- + return;
- +
- + warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
- + for (i = 0; i < NR_SOFTIRQS; i++) {
- + struct task_struct *tsk = sr->runner[i];
- +
- + /*
- + * The wakeup code in rtmutex.c wakes up the task
- + * _before_ it sets pi_blocked_on to NULL under
- + * tsk->pi_lock. So we need to check for both: state
- + * and pi_blocked_on.
- + */
- + if (tsk) {
- + raw_spin_lock(&tsk->pi_lock);
- + if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
- + /* Clear all bits pending in that task */
- + warnpending &= ~(tsk->softirqs_raised);
- + warnpending &= ~(1 << i);
- + }
- + raw_spin_unlock(&tsk->pi_lock);
- + }
- + }
- +
- + if (warnpending) {
- + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
- + warnpending);
- + rate_limit++;
- + }
- +}
- +# else
- +/*
- + * On !PREEMPT_RT we just printk rate limited:
- + */
- +void softirq_check_pending_idle(void)
- +{
- + static int rate_limit;
- +
- + if (rate_limit < 10 &&
- + (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
- + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
- + local_softirq_pending());
- + rate_limit++;
- + }
- +}
- +# endif
- +
- +#else /* !CONFIG_NO_HZ_COMMON */
- +static inline void softirq_set_runner(unsigned int sirq) { }
- +static inline void softirq_clr_runner(unsigned int sirq) { }
- +#endif
- +
- /*
- * we cannot loop indefinitely here to avoid userspace starvation,
- * but we also don't want to introduce a worst case 1/HZ latency
- @@ -77,6 +175,38 @@ static void wakeup_softirqd(void)
- wake_up_process(tsk);
- }
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +static void wakeup_timer_softirqd(void)
- +{
- + /* Interrupts are disabled: no need to stop preemption */
- + struct task_struct *tsk = __this_cpu_read(ktimer_softirqd);
- +
- + if (tsk && tsk->state != TASK_RUNNING)
- + wake_up_process(tsk);
- +}
- +#endif
- +
- +static void handle_softirq(unsigned int vec_nr)
- +{
- + struct softirq_action *h = softirq_vec + vec_nr;
- + int prev_count;
- +
- + prev_count = preempt_count();
- +
- + kstat_incr_softirqs_this_cpu(vec_nr);
- +
- + trace_softirq_entry(vec_nr);
- + h->action(h);
- + trace_softirq_exit(vec_nr);
- + if (unlikely(prev_count != preempt_count())) {
- + pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
- + vec_nr, softirq_to_name[vec_nr], h->action,
- + prev_count, preempt_count());
- + preempt_count_set(prev_count);
- + }
- +}
- +
- +#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * If ksoftirqd is scheduled, we do not want to process pending softirqs
- * right now. Let ksoftirqd handle this at its own rate, to get fairness.
- @@ -88,6 +218,47 @@ static bool ksoftirqd_running(void)
- return tsk && (tsk->state == TASK_RUNNING);
- }
-
- +static inline int ksoftirqd_softirq_pending(void)
- +{
- + return local_softirq_pending();
- +}
- +
- +static void handle_pending_softirqs(u32 pending)
- +{
- + struct softirq_action *h = softirq_vec;
- + int softirq_bit;
- +
- + local_irq_enable();
- +
- + h = softirq_vec;
- +
- + while ((softirq_bit = ffs(pending))) {
- + unsigned int vec_nr;
- +
- + h += softirq_bit - 1;
- + vec_nr = h - softirq_vec;
- + handle_softirq(vec_nr);
- +
- + h++;
- + pending >>= softirq_bit;
- + }
- +
- + rcu_bh_qs();
- + local_irq_disable();
- +}
- +
- +static void run_ksoftirqd(unsigned int cpu)
- +{
- + local_irq_disable();
- + if (ksoftirqd_softirq_pending()) {
- + __do_softirq();
- + local_irq_enable();
- + cond_resched_rcu_qs();
- + return;
- + }
- + local_irq_enable();
- +}
- +
- /*
- * preempt_count and SOFTIRQ_OFFSET usage:
- * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
- @@ -243,10 +414,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
- unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
- unsigned long old_flags = current->flags;
- int max_restart = MAX_SOFTIRQ_RESTART;
- - struct softirq_action *h;
- bool in_hardirq;
- __u32 pending;
- - int softirq_bit;
-
- /*
- * Mask out PF_MEMALLOC s current task context is borrowed for the
- @@ -265,36 +434,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
- /* Reset the pending bitmask before enabling irqs */
- set_softirq_pending(0);
-
- - local_irq_enable();
- -
- - h = softirq_vec;
- -
- - while ((softirq_bit = ffs(pending))) {
- - unsigned int vec_nr;
- - int prev_count;
- -
- - h += softirq_bit - 1;
- -
- - vec_nr = h - softirq_vec;
- - prev_count = preempt_count();
- -
- - kstat_incr_softirqs_this_cpu(vec_nr);
- -
- - trace_softirq_entry(vec_nr);
- - h->action(h);
- - trace_softirq_exit(vec_nr);
- - if (unlikely(prev_count != preempt_count())) {
- - pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
- - vec_nr, softirq_to_name[vec_nr], h->action,
- - prev_count, preempt_count());
- - preempt_count_set(prev_count);
- - }
- - h++;
- - pending >>= softirq_bit;
- - }
- -
- - rcu_bh_qs();
- - local_irq_disable();
- + handle_pending_softirqs(pending);
-
- pending = local_softirq_pending();
- if (pending) {
- @@ -330,6 +470,309 @@ asmlinkage __visible void do_softirq(void)
- local_irq_restore(flags);
- }
-
- +/*
- + * This function must run with irqs disabled!
- + */
- +void raise_softirq_irqoff(unsigned int nr)
- +{
- + __raise_softirq_irqoff(nr);
- +
- + /*
- + * If we're in an interrupt or softirq, we're done
- + * (this also catches softirq-disabled code). We will
- + * actually run the softirq once we return from
- + * the irq or softirq.
- + *
- + * Otherwise we wake up ksoftirqd to make sure we
- + * schedule the softirq soon.
- + */
- + if (!in_interrupt())
- + wakeup_softirqd();
- +}
- +
- +void __raise_softirq_irqoff(unsigned int nr)
- +{
- + trace_softirq_raise(nr);
- + or_softirq_pending(1UL << nr);
- +}
- +
- +static inline void local_bh_disable_nort(void) { local_bh_disable(); }
- +static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
- +static void ksoftirqd_set_sched_params(unsigned int cpu) { }
- +
- +#else /* !PREEMPT_RT_FULL */
- +
- +/*
- + * On RT we serialize softirq execution with a cpu local lock per softirq
- + */
- +static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
- +
- +void __init softirq_early_init(void)
- +{
- + int i;
- +
- + for (i = 0; i < NR_SOFTIRQS; i++)
- + local_irq_lock_init(local_softirq_locks[i]);
- +}
- +
- +static void lock_softirq(int which)
- +{
- + local_lock(local_softirq_locks[which]);
- +}
- +
- +static void unlock_softirq(int which)
- +{
- + local_unlock(local_softirq_locks[which]);
- +}
- +
- +static void do_single_softirq(int which)
- +{
- + unsigned long old_flags = current->flags;
- +
- + current->flags &= ~PF_MEMALLOC;
- + vtime_account_irq_enter(current);
- + current->flags |= PF_IN_SOFTIRQ;
- + lockdep_softirq_enter();
- + local_irq_enable();
- + handle_softirq(which);
- + local_irq_disable();
- + lockdep_softirq_exit();
- + current->flags &= ~PF_IN_SOFTIRQ;
- + vtime_account_irq_enter(current);
- + tsk_restore_flags(current, old_flags, PF_MEMALLOC);
- +}
- +
- +/*
- + * Called with interrupts disabled. Process softirqs which were raised
- + * in current context (or on behalf of ksoftirqd).
- + */
- +static void do_current_softirqs(void)
- +{
- + while (current->softirqs_raised) {
- + int i = __ffs(current->softirqs_raised);
- + unsigned int pending, mask = (1U << i);
- +
- + current->softirqs_raised &= ~mask;
- + local_irq_enable();
- +
- + /*
- + * If the lock is contended, we boost the owner to
- + * process the softirq or leave the critical section
- + * now.
- + */
- + lock_softirq(i);
- + local_irq_disable();
- + softirq_set_runner(i);
- + /*
- + * Check with the local_softirq_pending() bits,
- + * whether we need to process this still or if someone
- + * else took care of it.
- + */
- + pending = local_softirq_pending();
- + if (pending & mask) {
- + set_softirq_pending(pending & ~mask);
- + do_single_softirq(i);
- + }
- + softirq_clr_runner(i);
- + WARN_ON(current->softirq_nestcnt != 1);
- + local_irq_enable();
- + unlock_softirq(i);
- + local_irq_disable();
- + }
- +}
- +
- +void __local_bh_disable(void)
- +{
- + if (++current->softirq_nestcnt == 1)
- + migrate_disable();
- +}
- +EXPORT_SYMBOL(__local_bh_disable);
- +
- +void __local_bh_enable(void)
- +{
- + if (WARN_ON(current->softirq_nestcnt == 0))
- + return;
- +
- + local_irq_disable();
- + if (current->softirq_nestcnt == 1 && current->softirqs_raised)
- + do_current_softirqs();
- + local_irq_enable();
- +
- + if (--current->softirq_nestcnt == 0)
- + migrate_enable();
- +}
- +EXPORT_SYMBOL(__local_bh_enable);
- +
- +void _local_bh_enable(void)
- +{
- + if (WARN_ON(current->softirq_nestcnt == 0))
- + return;
- + if (--current->softirq_nestcnt == 0)
- + migrate_enable();
- +}
- +EXPORT_SYMBOL(_local_bh_enable);
- +
- +int in_serving_softirq(void)
- +{
- + return current->flags & PF_IN_SOFTIRQ;
- +}
- +EXPORT_SYMBOL(in_serving_softirq);
- +
- +/* Called with preemption disabled */
- +static void run_ksoftirqd(unsigned int cpu)
- +{
- + local_irq_disable();
- + current->softirq_nestcnt++;
- +
- + do_current_softirqs();
- + current->softirq_nestcnt--;
- + local_irq_enable();
- + cond_resched_rcu_qs();
- +}
- +
- +/*
- + * Called from netif_rx_ni(). Preemption enabled, but migration
- + * disabled. So the cpu can't go away under us.
- + */
- +void thread_do_softirq(void)
- +{
- + if (!in_serving_softirq() && current->softirqs_raised) {
- + current->softirq_nestcnt++;
- + do_current_softirqs();
- + current->softirq_nestcnt--;
- + }
- +}
- +
- +static void do_raise_softirq_irqoff(unsigned int nr)
- +{
- + unsigned int mask;
- +
- + mask = 1UL << nr;
- +
- + trace_softirq_raise(nr);
- + or_softirq_pending(mask);
- +
- + /*
- + * If we are not in a hard interrupt and inside a bh disabled
- + * region, we simply raise the flag on current. local_bh_enable()
- + * will make sure that the softirq is executed. Otherwise we
- + * delegate it to ksoftirqd.
- + */
- + if (!in_irq() && current->softirq_nestcnt)
- + current->softirqs_raised |= mask;
- + else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd))
- + return;
- +
- + if (mask & TIMER_SOFTIRQS)
- + __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
- + else
- + __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
- +}
- +
- +static void wakeup_proper_softirq(unsigned int nr)
- +{
- + if ((1UL << nr) & TIMER_SOFTIRQS)
- + wakeup_timer_softirqd();
- + else
- + wakeup_softirqd();
- +}
- +
- +void __raise_softirq_irqoff(unsigned int nr)
- +{
- + do_raise_softirq_irqoff(nr);
- + if (!in_irq() && !current->softirq_nestcnt)
- + wakeup_proper_softirq(nr);
- +}
- +
- +/*
- + * Same as __raise_softirq_irqoff() but will process them in ksoftirqd
- + */
- +void __raise_softirq_irqoff_ksoft(unsigned int nr)
- +{
- + unsigned int mask;
- +
- + if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) ||
- + !__this_cpu_read(ktimer_softirqd)))
- + return;
- + mask = 1UL << nr;
- +
- + trace_softirq_raise(nr);
- + or_softirq_pending(mask);
- + if (mask & TIMER_SOFTIRQS)
- + __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
- + else
- + __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
- + wakeup_proper_softirq(nr);
- +}
- +
- +/*
- + * This function must run with irqs disabled!
- + */
- +void raise_softirq_irqoff(unsigned int nr)
- +{
- + do_raise_softirq_irqoff(nr);
- +
- + /*
- + * If we're in an hard interrupt we let irq return code deal
- + * with the wakeup of ksoftirqd.
- + */
- + if (in_irq())
- + return;
- + /*
- + * If we are in thread context but outside of a bh disabled
- + * region, we need to wake ksoftirqd as well.
- + *
- + * CHECKME: Some of the places which do that could be wrapped
- + * into local_bh_disable/enable pairs. Though it's unclear
- + * whether this is worth the effort. To find those places just
- + * raise a WARN() if the condition is met.
- + */
- + if (!current->softirq_nestcnt)
- + wakeup_proper_softirq(nr);
- +}
- +
- +static inline int ksoftirqd_softirq_pending(void)
- +{
- + return current->softirqs_raised;
- +}
- +
- +static inline void local_bh_disable_nort(void) { }
- +static inline void _local_bh_enable_nort(void) { }
- +
- +static inline void ksoftirqd_set_sched_params(unsigned int cpu)
- +{
- + /* Take over all but timer pending softirqs when starting */
- + local_irq_disable();
- + current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS;
- + local_irq_enable();
- +}
- +
- +static inline void ktimer_softirqd_set_sched_params(unsigned int cpu)
- +{
- + struct sched_param param = { .sched_priority = 1 };
- +
- + sched_setscheduler(current, SCHED_FIFO, ¶m);
- +
- + /* Take over timer pending softirqs when starting */
- + local_irq_disable();
- + current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS;
- + local_irq_enable();
- +}
- +
- +static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu,
- + bool online)
- +{
- + struct sched_param param = { .sched_priority = 0 };
- +
- + sched_setscheduler(current, SCHED_NORMAL, ¶m);
- +}
- +
- +static int ktimer_softirqd_should_run(unsigned int cpu)
- +{
- + return current->softirqs_raised;
- +}
- +
- +#endif /* PREEMPT_RT_FULL */
- /*
- * Enter an interrupt context.
- */
- @@ -341,9 +784,9 @@ void irq_enter(void)
- * Prevent raise_softirq from needlessly waking up ksoftirqd
- * here, as softirq will be serviced on return from interrupt.
- */
- - local_bh_disable();
- + local_bh_disable_nort();
- tick_irq_enter();
- - _local_bh_enable();
- + _local_bh_enable_nort();
- }
-
- __irq_enter();
- @@ -351,6 +794,7 @@ void irq_enter(void)
-
- static inline void invoke_softirq(void)
- {
- +#ifndef CONFIG_PREEMPT_RT_FULL
- if (ksoftirqd_running())
- return;
-
- @@ -373,6 +817,18 @@ static inline void invoke_softirq(void)
- } else {
- wakeup_softirqd();
- }
- +#else /* PREEMPT_RT_FULL */
- + unsigned long flags;
- +
- + local_irq_save(flags);
- + if (__this_cpu_read(ksoftirqd) &&
- + __this_cpu_read(ksoftirqd)->softirqs_raised)
- + wakeup_softirqd();
- + if (__this_cpu_read(ktimer_softirqd) &&
- + __this_cpu_read(ktimer_softirqd)->softirqs_raised)
- + wakeup_timer_softirqd();
- + local_irq_restore(flags);
- +#endif
- }
-
- static inline void tick_irq_exit(void)
- @@ -409,26 +865,6 @@ void irq_exit(void)
- trace_hardirq_exit(); /* must be last! */
- }
-
- -/*
- - * This function must run with irqs disabled!
- - */
- -inline void raise_softirq_irqoff(unsigned int nr)
- -{
- - __raise_softirq_irqoff(nr);
- -
- - /*
- - * If we're in an interrupt or softirq, we're done
- - * (this also catches softirq-disabled code). We will
- - * actually run the softirq once we return from
- - * the irq or softirq.
- - *
- - * Otherwise we wake up ksoftirqd to make sure we
- - * schedule the softirq soon.
- - */
- - if (!in_interrupt())
- - wakeup_softirqd();
- -}
- -
- void raise_softirq(unsigned int nr)
- {
- unsigned long flags;
- @@ -438,12 +874,6 @@ void raise_softirq(unsigned int nr)
- local_irq_restore(flags);
- }
-
- -void __raise_softirq_irqoff(unsigned int nr)
- -{
- - trace_softirq_raise(nr);
- - or_softirq_pending(1UL << nr);
- -}
- -
- void open_softirq(int nr, void (*action)(struct softirq_action *))
- {
- softirq_vec[nr].action = action;
- @@ -460,15 +890,45 @@ struct tasklet_head {
- static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
- static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
-
- +static void inline
- +__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
- +{
- + if (tasklet_trylock(t)) {
- +again:
- + /* We may have been preempted before tasklet_trylock
- + * and __tasklet_action may have already run.
- + * So double check the sched bit while the takslet
- + * is locked before adding it to the list.
- + */
- + if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
- + t->next = NULL;
- + *head->tail = t;
- + head->tail = &(t->next);
- + raise_softirq_irqoff(nr);
- + tasklet_unlock(t);
- + } else {
- + /* This is subtle. If we hit the corner case above
- + * It is possible that we get preempted right here,
- + * and another task has successfully called
- + * tasklet_schedule(), then this function, and
- + * failed on the trylock. Thus we must be sure
- + * before releasing the tasklet lock, that the
- + * SCHED_BIT is clear. Otherwise the tasklet
- + * may get its SCHED_BIT set, but not added to the
- + * list
- + */
- + if (!tasklet_tryunlock(t))
- + goto again;
- + }
- + }
- +}
- +
- void __tasklet_schedule(struct tasklet_struct *t)
- {
- unsigned long flags;
-
- local_irq_save(flags);
- - t->next = NULL;
- - *__this_cpu_read(tasklet_vec.tail) = t;
- - __this_cpu_write(tasklet_vec.tail, &(t->next));
- - raise_softirq_irqoff(TASKLET_SOFTIRQ);
- + __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
- local_irq_restore(flags);
- }
- EXPORT_SYMBOL(__tasklet_schedule);
- @@ -478,10 +938,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
- unsigned long flags;
-
- local_irq_save(flags);
- - t->next = NULL;
- - *__this_cpu_read(tasklet_hi_vec.tail) = t;
- - __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
- - raise_softirq_irqoff(HI_SOFTIRQ);
- + __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
- local_irq_restore(flags);
- }
- EXPORT_SYMBOL(__tasklet_hi_schedule);
- @@ -490,82 +947,122 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
- {
- BUG_ON(!irqs_disabled());
-
- - t->next = __this_cpu_read(tasklet_hi_vec.head);
- - __this_cpu_write(tasklet_hi_vec.head, t);
- - __raise_softirq_irqoff(HI_SOFTIRQ);
- + __tasklet_hi_schedule(t);
- }
- EXPORT_SYMBOL(__tasklet_hi_schedule_first);
-
- -static __latent_entropy void tasklet_action(struct softirq_action *a)
- +void tasklet_enable(struct tasklet_struct *t)
- {
- - struct tasklet_struct *list;
- + if (!atomic_dec_and_test(&t->count))
- + return;
- + if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
- + tasklet_schedule(t);
- +}
- +EXPORT_SYMBOL(tasklet_enable);
-
- - local_irq_disable();
- - list = __this_cpu_read(tasklet_vec.head);
- - __this_cpu_write(tasklet_vec.head, NULL);
- - __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
- - local_irq_enable();
- +static void __tasklet_action(struct softirq_action *a,
- + struct tasklet_struct *list)
- +{
- + int loops = 1000000;
-
- while (list) {
- struct tasklet_struct *t = list;
-
- list = list->next;
-
- - if (tasklet_trylock(t)) {
- - if (!atomic_read(&t->count)) {
- - if (!test_and_clear_bit(TASKLET_STATE_SCHED,
- - &t->state))
- - BUG();
- - t->func(t->data);
- - tasklet_unlock(t);
- - continue;
- - }
- - tasklet_unlock(t);
- + /*
- + * Should always succeed - after a tasklist got on the
- + * list (after getting the SCHED bit set from 0 to 1),
- + * nothing but the tasklet softirq it got queued to can
- + * lock it:
- + */
- + if (!tasklet_trylock(t)) {
- + WARN_ON(1);
- + continue;
- }
-
- - local_irq_disable();
- t->next = NULL;
- - *__this_cpu_read(tasklet_vec.tail) = t;
- - __this_cpu_write(tasklet_vec.tail, &(t->next));
- - __raise_softirq_irqoff(TASKLET_SOFTIRQ);
- - local_irq_enable();
- +
- + /*
- + * If we cannot handle the tasklet because it's disabled,
- + * mark it as pending. tasklet_enable() will later
- + * re-schedule the tasklet.
- + */
- + if (unlikely(atomic_read(&t->count))) {
- +out_disabled:
- + /* implicit unlock: */
- + wmb();
- + t->state = TASKLET_STATEF_PENDING;
- + continue;
- + }
- +
- + /*
- + * After this point on the tasklet might be rescheduled
- + * on another CPU, but it can only be added to another
- + * CPU's tasklet list if we unlock the tasklet (which we
- + * dont do yet).
- + */
- + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- + WARN_ON(1);
- +
- +again:
- + t->func(t->data);
- +
- + /*
- + * Try to unlock the tasklet. We must use cmpxchg, because
- + * another CPU might have scheduled or disabled the tasklet.
- + * We only allow the STATE_RUN -> 0 transition here.
- + */
- + while (!tasklet_tryunlock(t)) {
- + /*
- + * If it got disabled meanwhile, bail out:
- + */
- + if (atomic_read(&t->count))
- + goto out_disabled;
- + /*
- + * If it got scheduled meanwhile, re-execute
- + * the tasklet function:
- + */
- + if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- + goto again;
- + if (!--loops) {
- + printk("hm, tasklet state: %08lx\n", t->state);
- + WARN_ON(1);
- + tasklet_unlock(t);
- + break;
- + }
- + }
- }
- }
-
- +static void tasklet_action(struct softirq_action *a)
- +{
- + struct tasklet_struct *list;
- +
- + local_irq_disable();
- +
- + list = __this_cpu_read(tasklet_vec.head);
- + __this_cpu_write(tasklet_vec.head, NULL);
- + __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
- +
- + local_irq_enable();
- +
- + __tasklet_action(a, list);
- +}
- +
- static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
- {
- struct tasklet_struct *list;
-
- local_irq_disable();
- +
- list = __this_cpu_read(tasklet_hi_vec.head);
- __this_cpu_write(tasklet_hi_vec.head, NULL);
- __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
- - local_irq_enable();
- -
- - while (list) {
- - struct tasklet_struct *t = list;
-
- - list = list->next;
- -
- - if (tasklet_trylock(t)) {
- - if (!atomic_read(&t->count)) {
- - if (!test_and_clear_bit(TASKLET_STATE_SCHED,
- - &t->state))
- - BUG();
- - t->func(t->data);
- - tasklet_unlock(t);
- - continue;
- - }
- - tasklet_unlock(t);
- - }
- + local_irq_enable();
-
- - local_irq_disable();
- - t->next = NULL;
- - *__this_cpu_read(tasklet_hi_vec.tail) = t;
- - __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
- - __raise_softirq_irqoff(HI_SOFTIRQ);
- - local_irq_enable();
- - }
- + __tasklet_action(a, list);
- }
-
- void tasklet_init(struct tasklet_struct *t,
- @@ -586,7 +1083,7 @@ void tasklet_kill(struct tasklet_struct *t)
-
- while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
- do {
- - yield();
- + msleep(1);
- } while (test_bit(TASKLET_STATE_SCHED, &t->state));
- }
- tasklet_unlock_wait(t);
- @@ -660,25 +1157,26 @@ void __init softirq_init(void)
- open_softirq(HI_SOFTIRQ, tasklet_hi_action);
- }
-
- -static int ksoftirqd_should_run(unsigned int cpu)
- +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
- +void tasklet_unlock_wait(struct tasklet_struct *t)
- {
- - return local_softirq_pending();
- -}
- -
- -static void run_ksoftirqd(unsigned int cpu)
- -{
- - local_irq_disable();
- - if (local_softirq_pending()) {
- + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
- /*
- - * We can safely run softirq on inline stack, as we are not deep
- - * in the task stack here.
- + * Hack for now to avoid this busy-loop:
- */
- - __do_softirq();
- - local_irq_enable();
- - cond_resched_rcu_qs();
- - return;
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + msleep(1);
- +#else
- + barrier();
- +#endif
- }
- - local_irq_enable();
- +}
- +EXPORT_SYMBOL(tasklet_unlock_wait);
- +#endif
- +
- +static int ksoftirqd_should_run(unsigned int cpu)
- +{
- + return ksoftirqd_softirq_pending();
- }
-
- #ifdef CONFIG_HOTPLUG_CPU
- @@ -745,17 +1243,31 @@ static int takeover_tasklets(unsigned int cpu)
-
- static struct smp_hotplug_thread softirq_threads = {
- .store = &ksoftirqd,
- + .setup = ksoftirqd_set_sched_params,
- .thread_should_run = ksoftirqd_should_run,
- .thread_fn = run_ksoftirqd,
- .thread_comm = "ksoftirqd/%u",
- };
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +static struct smp_hotplug_thread softirq_timer_threads = {
- + .store = &ktimer_softirqd,
- + .setup = ktimer_softirqd_set_sched_params,
- + .cleanup = ktimer_softirqd_clr_sched_params,
- + .thread_should_run = ktimer_softirqd_should_run,
- + .thread_fn = run_ksoftirqd,
- + .thread_comm = "ktimersoftd/%u",
- +};
- +#endif
- +
- static __init int spawn_ksoftirqd(void)
- {
- cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
- takeover_tasklets);
- BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
- -
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads));
- +#endif
- return 0;
- }
- early_initcall(spawn_ksoftirqd);
- diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
- index ec9ab2f01489..8b89dbedeaff 100644
- --- a/kernel/stop_machine.c
- +++ b/kernel/stop_machine.c
- @@ -36,7 +36,7 @@ struct cpu_stop_done {
- struct cpu_stopper {
- struct task_struct *thread;
-
- - spinlock_t lock;
- + raw_spinlock_t lock;
- bool enabled; /* is this stopper enabled? */
- struct list_head works; /* list of pending works */
-
- @@ -78,14 +78,14 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
- unsigned long flags;
- bool enabled;
-
- - spin_lock_irqsave(&stopper->lock, flags);
- + raw_spin_lock_irqsave(&stopper->lock, flags);
- enabled = stopper->enabled;
- if (enabled)
- __cpu_stop_queue_work(stopper, work);
- else if (work->done)
- cpu_stop_signal_done(work->done);
- - spin_unlock_irqrestore(&stopper->lock, flags);
-
- + raw_spin_unlock_irqrestore(&stopper->lock, flags);
- return enabled;
- }
-
- @@ -231,8 +231,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
- struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
- int err;
- retry:
- - spin_lock_irq(&stopper1->lock);
- - spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
- + raw_spin_lock_irq(&stopper1->lock);
- + raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
-
- err = -ENOENT;
- if (!stopper1->enabled || !stopper2->enabled)
- @@ -255,8 +255,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
- __cpu_stop_queue_work(stopper1, work1);
- __cpu_stop_queue_work(stopper2, work2);
- unlock:
- - spin_unlock(&stopper2->lock);
- - spin_unlock_irq(&stopper1->lock);
- + raw_spin_unlock(&stopper2->lock);
- + raw_spin_unlock_irq(&stopper1->lock);
-
- if (unlikely(err == -EDEADLK)) {
- while (stop_cpus_in_progress)
- @@ -448,9 +448,9 @@ static int cpu_stop_should_run(unsigned int cpu)
- unsigned long flags;
- int run;
-
- - spin_lock_irqsave(&stopper->lock, flags);
- + raw_spin_lock_irqsave(&stopper->lock, flags);
- run = !list_empty(&stopper->works);
- - spin_unlock_irqrestore(&stopper->lock, flags);
- + raw_spin_unlock_irqrestore(&stopper->lock, flags);
- return run;
- }
-
- @@ -461,13 +461,13 @@ static void cpu_stopper_thread(unsigned int cpu)
-
- repeat:
- work = NULL;
- - spin_lock_irq(&stopper->lock);
- + raw_spin_lock_irq(&stopper->lock);
- if (!list_empty(&stopper->works)) {
- work = list_first_entry(&stopper->works,
- struct cpu_stop_work, list);
- list_del_init(&work->list);
- }
- - spin_unlock_irq(&stopper->lock);
- + raw_spin_unlock_irq(&stopper->lock);
-
- if (work) {
- cpu_stop_fn_t fn = work->fn;
- @@ -475,6 +475,8 @@ static void cpu_stopper_thread(unsigned int cpu)
- struct cpu_stop_done *done = work->done;
- int ret;
-
- + /* XXX */
- +
- /* cpu stop callbacks must not sleep, make in_atomic() == T */
- preempt_count_inc();
- ret = fn(arg);
- @@ -541,7 +543,7 @@ static int __init cpu_stop_init(void)
- for_each_possible_cpu(cpu) {
- struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-
- - spin_lock_init(&stopper->lock);
- + raw_spin_lock_init(&stopper->lock);
- INIT_LIST_HEAD(&stopper->works);
- }
-
- diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
- index eeb7f2f5698d..369203af6406 100644
- --- a/kernel/time/hrtimer.c
- +++ b/kernel/time/hrtimer.c
- @@ -53,6 +53,7 @@
- #include <asm/uaccess.h>
-
- #include <trace/events/timer.h>
- +#include <trace/events/hist.h>
-
- #include "tick-internal.h"
-
- @@ -693,6 +694,29 @@ static void hrtimer_switch_to_hres(void)
- retrigger_next_event(NULL);
- }
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +
- +static struct swork_event clock_set_delay_work;
- +
- +static void run_clock_set_delay(struct swork_event *event)
- +{
- + clock_was_set();
- +}
- +
- +void clock_was_set_delayed(void)
- +{
- + swork_queue(&clock_set_delay_work);
- +}
- +
- +static __init int create_clock_set_delay_thread(void)
- +{
- + WARN_ON(swork_get());
- + INIT_SWORK(&clock_set_delay_work, run_clock_set_delay);
- + return 0;
- +}
- +early_initcall(create_clock_set_delay_thread);
- +#else /* PREEMPT_RT_FULL */
- +
- static void clock_was_set_work(struct work_struct *work)
- {
- clock_was_set();
- @@ -708,6 +732,7 @@ void clock_was_set_delayed(void)
- {
- schedule_work(&hrtimer_work);
- }
- +#endif
-
- #else
-
- @@ -717,11 +742,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; }
- static inline void hrtimer_switch_to_hres(void) { }
- static inline void
- hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
- -static inline int hrtimer_reprogram(struct hrtimer *timer,
- - struct hrtimer_clock_base *base)
- -{
- - return 0;
- -}
- +static inline void hrtimer_reprogram(struct hrtimer *timer,
- + struct hrtimer_clock_base *base) { }
- static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
- static inline void retrigger_next_event(void *arg) { }
-
- @@ -853,6 +875,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
- }
- EXPORT_SYMBOL_GPL(hrtimer_forward);
-
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
- +
- +/**
- + * hrtimer_wait_for_timer - Wait for a running timer
- + *
- + * @timer: timer to wait for
- + *
- + * The function waits in case the timers callback function is
- + * currently executed on the waitqueue of the timer base. The
- + * waitqueue is woken up after the timer callback function has
- + * finished execution.
- + */
- +void hrtimer_wait_for_timer(const struct hrtimer *timer)
- +{
- + struct hrtimer_clock_base *base = timer->base;
- +
- + if (base && base->cpu_base && !timer->irqsafe)
- + wait_event(base->cpu_base->wait,
- + !(hrtimer_callback_running(timer)));
- +}
- +
- +#else
- +# define wake_up_timer_waiters(b) do { } while (0)
- +#endif
- +
- /*
- * enqueue_hrtimer - internal function to (re)start a timer
- *
- @@ -894,6 +942,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
- if (!(state & HRTIMER_STATE_ENQUEUED))
- return;
-
- + if (unlikely(!list_empty(&timer->cb_entry))) {
- + list_del_init(&timer->cb_entry);
- + return;
- + }
- +
- if (!timerqueue_del(&base->active, &timer->node))
- cpu_base->active_bases &= ~(1 << base->index);
-
- @@ -989,7 +1042,16 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
- new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
-
- timer_stats_hrtimer_set_start_info(timer);
- +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- + {
- + ktime_t now = new_base->get_time();
-
- + if (ktime_to_ns(tim) < ktime_to_ns(now))
- + timer->praecox = now;
- + else
- + timer->praecox = ktime_set(0, 0);
- + }
- +#endif
- leftmost = enqueue_hrtimer(timer, new_base);
- if (!leftmost)
- goto unlock;
- @@ -1061,7 +1123,7 @@ int hrtimer_cancel(struct hrtimer *timer)
-
- if (ret >= 0)
- return ret;
- - cpu_relax();
- + hrtimer_wait_for_timer(timer);
- }
- }
- EXPORT_SYMBOL_GPL(hrtimer_cancel);
- @@ -1137,6 +1199,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
-
- base = hrtimer_clockid_to_base(clock_id);
- timer->base = &cpu_base->clock_base[base];
- + INIT_LIST_HEAD(&timer->cb_entry);
- timerqueue_init(&timer->node);
-
- #ifdef CONFIG_TIMER_STATS
- @@ -1177,6 +1240,7 @@ bool hrtimer_active(const struct hrtimer *timer)
- seq = raw_read_seqcount_begin(&cpu_base->seq);
-
- if (timer->state != HRTIMER_STATE_INACTIVE ||
- + cpu_base->running_soft == timer ||
- cpu_base->running == timer)
- return true;
-
- @@ -1275,10 +1339,112 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
- cpu_base->running = NULL;
- }
-
- -static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
- + struct hrtimer_clock_base *base)
- +{
- + int leftmost;
- +
- + if (restart != HRTIMER_NORESTART &&
- + !(timer->state & HRTIMER_STATE_ENQUEUED)) {
- +
- + leftmost = enqueue_hrtimer(timer, base);
- + if (!leftmost)
- + return;
- +#ifdef CONFIG_HIGH_RES_TIMERS
- + if (!hrtimer_is_hres_active(timer)) {
- + /*
- + * Kick to reschedule the next tick to handle the new timer
- + * on dynticks target.
- + */
- + if (base->cpu_base->nohz_active)
- + wake_up_nohz_cpu(base->cpu_base->cpu);
- + } else {
- +
- + hrtimer_reprogram(timer, base);
- + }
- +#endif
- + }
- +}
- +
- +/*
- + * The changes in mainline which removed the callback modes from
- + * hrtimer are not yet working with -rt. The non wakeup_process()
- + * based callbacks which involve sleeping locks need to be treated
- + * seperately.
- + */
- +static void hrtimer_rt_run_pending(void)
- +{
- + enum hrtimer_restart (*fn)(struct hrtimer *);
- + struct hrtimer_cpu_base *cpu_base;
- + struct hrtimer_clock_base *base;
- + struct hrtimer *timer;
- + int index, restart;
- +
- + local_irq_disable();
- + cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
- +
- + raw_spin_lock(&cpu_base->lock);
- +
- + for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
- + base = &cpu_base->clock_base[index];
- +
- + while (!list_empty(&base->expired)) {
- + timer = list_first_entry(&base->expired,
- + struct hrtimer, cb_entry);
- +
- + /*
- + * Same as the above __run_hrtimer function
- + * just we run with interrupts enabled.
- + */
- + debug_deactivate(timer);
- + cpu_base->running_soft = timer;
- + raw_write_seqcount_barrier(&cpu_base->seq);
- +
- + __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
- + timer_stats_account_hrtimer(timer);
- + fn = timer->function;
- +
- + raw_spin_unlock_irq(&cpu_base->lock);
- + restart = fn(timer);
- + raw_spin_lock_irq(&cpu_base->lock);
- +
- + hrtimer_rt_reprogram(restart, timer, base);
- + raw_write_seqcount_barrier(&cpu_base->seq);
- +
- + WARN_ON_ONCE(cpu_base->running_soft != timer);
- + cpu_base->running_soft = NULL;
- + }
- + }
- +
- + raw_spin_unlock_irq(&cpu_base->lock);
- +
- + wake_up_timer_waiters(cpu_base);
- +}
- +
- +static int hrtimer_rt_defer(struct hrtimer *timer)
- +{
- + if (timer->irqsafe)
- + return 0;
- +
- + __remove_hrtimer(timer, timer->base, timer->state, 0);
- + list_add_tail(&timer->cb_entry, &timer->base->expired);
- + return 1;
- +}
- +
- +#else
- +
- +static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
- +
- +#endif
- +
- +static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
- +
- +static int __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
- {
- struct hrtimer_clock_base *base = cpu_base->clock_base;
- unsigned int active = cpu_base->active_bases;
- + int raise = 0;
-
- for (; active; base++, active >>= 1) {
- struct timerqueue_node *node;
- @@ -1294,6 +1460,15 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
-
- timer = container_of(node, struct hrtimer, node);
-
- + trace_hrtimer_interrupt(raw_smp_processor_id(),
- + ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ?
- + timer->praecox : hrtimer_get_expires(timer),
- + basenow)),
- + current,
- + timer->function == hrtimer_wakeup ?
- + container_of(timer, struct hrtimer_sleeper,
- + timer)->task : NULL);
- +
- /*
- * The immediate goal for using the softexpires is
- * minimizing wakeups, not running timers at the
- @@ -1309,9 +1484,13 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
- if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
- break;
-
- - __run_hrtimer(cpu_base, base, timer, &basenow);
- + if (!hrtimer_rt_defer(timer))
- + __run_hrtimer(cpu_base, base, timer, &basenow);
- + else
- + raise = 1;
- }
- }
- + return raise;
- }
-
- #ifdef CONFIG_HIGH_RES_TIMERS
- @@ -1325,6 +1504,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
- struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
- ktime_t expires_next, now, entry_time, delta;
- int retries = 0;
- + int raise;
-
- BUG_ON(!cpu_base->hres_active);
- cpu_base->nr_events++;
- @@ -1343,7 +1523,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
- */
- cpu_base->expires_next.tv64 = KTIME_MAX;
-
- - __hrtimer_run_queues(cpu_base, now);
- + raise = __hrtimer_run_queues(cpu_base, now);
-
- /* Reevaluate the clock bases for the next expiry */
- expires_next = __hrtimer_get_next_event(cpu_base);
- @@ -1354,6 +1534,8 @@ void hrtimer_interrupt(struct clock_event_device *dev)
- cpu_base->expires_next = expires_next;
- cpu_base->in_hrtirq = 0;
- raw_spin_unlock(&cpu_base->lock);
- + if (raise)
- + raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-
- /* Reprogramming necessary ? */
- if (!tick_program_event(expires_next, 0)) {
- @@ -1433,6 +1615,7 @@ void hrtimer_run_queues(void)
- {
- struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
- ktime_t now;
- + int raise;
-
- if (__hrtimer_hres_active(cpu_base))
- return;
- @@ -1451,8 +1634,10 @@ void hrtimer_run_queues(void)
-
- raw_spin_lock(&cpu_base->lock);
- now = hrtimer_update_base(cpu_base);
- - __hrtimer_run_queues(cpu_base, now);
- + raise = __hrtimer_run_queues(cpu_base, now);
- raw_spin_unlock(&cpu_base->lock);
- + if (raise)
- + raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- }
-
- /*
- @@ -1474,16 +1659,18 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
- void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
- {
- sl->timer.function = hrtimer_wakeup;
- + sl->timer.irqsafe = 1;
- sl->task = task;
- }
- EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-
- -static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
- +static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode,
- + unsigned long state)
- {
- hrtimer_init_sleeper(t, current);
-
- do {
- - set_current_state(TASK_INTERRUPTIBLE);
- + set_current_state(state);
- hrtimer_start_expires(&t->timer, mode);
-
- if (likely(t->task))
- @@ -1525,7 +1712,8 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
- HRTIMER_MODE_ABS);
- hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
-
- - if (do_nanosleep(&t, HRTIMER_MODE_ABS))
- + /* cpu_chill() does not care about restart state. */
- + if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE))
- goto out;
-
- rmtp = restart->nanosleep.rmtp;
- @@ -1542,8 +1730,10 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
- return ret;
- }
-
- -long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
- - const enum hrtimer_mode mode, const clockid_t clockid)
- +static long
- +__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
- + const enum hrtimer_mode mode, const clockid_t clockid,
- + unsigned long state)
- {
- struct restart_block *restart;
- struct hrtimer_sleeper t;
- @@ -1556,7 +1746,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
-
- hrtimer_init_on_stack(&t.timer, clockid, mode);
- hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
- - if (do_nanosleep(&t, mode))
- + if (do_nanosleep(&t, mode, state))
- goto out;
-
- /* Absolute timers do not update the rmtp value and restart: */
- @@ -1583,6 +1773,12 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
- return ret;
- }
-
- +long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
- + const enum hrtimer_mode mode, const clockid_t clockid)
- +{
- + return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE);
- +}
- +
- SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
- struct timespec __user *, rmtp)
- {
- @@ -1597,6 +1793,26 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
- return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
- }
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +/*
- + * Sleep for 1 ms in hope whoever holds what we want will let it go.
- + */
- +void cpu_chill(void)
- +{
- + struct timespec tu = {
- + .tv_nsec = NSEC_PER_MSEC,
- + };
- + unsigned int freeze_flag = current->flags & PF_NOFREEZE;
- +
- + current->flags |= PF_NOFREEZE;
- + __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC,
- + TASK_UNINTERRUPTIBLE);
- + if (!freeze_flag)
- + current->flags &= ~PF_NOFREEZE;
- +}
- +EXPORT_SYMBOL(cpu_chill);
- +#endif
- +
- /*
- * Functions related to boot-time initialization:
- */
- @@ -1608,16 +1824,20 @@ int hrtimers_prepare_cpu(unsigned int cpu)
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- cpu_base->clock_base[i].cpu_base = cpu_base;
- timerqueue_init_head(&cpu_base->clock_base[i].active);
- + INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
- }
-
- cpu_base->cpu = cpu;
- hrtimer_init_hres(cpu_base);
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + init_waitqueue_head(&cpu_base->wait);
- +#endif
- return 0;
- }
-
- #ifdef CONFIG_HOTPLUG_CPU
-
- -static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
- +static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
- struct hrtimer_clock_base *new_base)
- {
- struct hrtimer *timer;
- @@ -1645,12 +1865,21 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
- */
- enqueue_hrtimer(timer, new_base);
- }
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + list_splice_tail(&old_base->expired, &new_base->expired);
- + /*
- + * Tell the caller to raise HRTIMER_SOFTIRQ. We can't safely
- + * acquire ktimersoftd->pi_lock while the base lock is held.
- + */
- + return !list_empty(&new_base->expired);
- +#endif
- + return 0;
- }
-
- int hrtimers_dead_cpu(unsigned int scpu)
- {
- struct hrtimer_cpu_base *old_base, *new_base;
- - int i;
- + int i, raise = 0;
-
- BUG_ON(cpu_online(scpu));
- tick_cancel_sched_timer(scpu);
- @@ -1666,13 +1895,16 @@ int hrtimers_dead_cpu(unsigned int scpu)
- raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
-
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- - migrate_hrtimer_list(&old_base->clock_base[i],
- - &new_base->clock_base[i]);
- + raise |= migrate_hrtimer_list(&old_base->clock_base[i],
- + &new_base->clock_base[i]);
- }
-
- raw_spin_unlock(&old_base->lock);
- raw_spin_unlock(&new_base->lock);
-
- + if (raise)
- + raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- +
- /* Check, if we got expired work to do */
- __hrtimer_peek_ahead_timers();
- local_irq_enable();
- @@ -1681,9 +1913,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
-
- #endif /* CONFIG_HOTPLUG_CPU */
-
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +
- +static void run_hrtimer_softirq(struct softirq_action *h)
- +{
- + hrtimer_rt_run_pending();
- +}
- +
- +static void hrtimers_open_softirq(void)
- +{
- + open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
- +}
- +
- +#else
- +static void hrtimers_open_softirq(void) { }
- +#endif
- +
- void __init hrtimers_init(void)
- {
- hrtimers_prepare_cpu(smp_processor_id());
- + hrtimers_open_softirq();
- }
-
- /**
- diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
- index 1d5c7204ddc9..184de6751180 100644
- --- a/kernel/time/itimer.c
- +++ b/kernel/time/itimer.c
- @@ -213,6 +213,7 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
- /* We are sharing ->siglock with it_real_fn() */
- if (hrtimer_try_to_cancel(timer) < 0) {
- spin_unlock_irq(&tsk->sighand->siglock);
- + hrtimer_wait_for_timer(&tsk->signal->real_timer);
- goto again;
- }
- expires = timeval_to_ktime(value->it_value);
- diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
- index 555e21f7b966..a5d6435fabbb 100644
- --- a/kernel/time/jiffies.c
- +++ b/kernel/time/jiffies.c
- @@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = {
- .max_cycles = 10,
- };
-
- -__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
- +__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
- +__cacheline_aligned_in_smp seqcount_t jiffies_seq;
-
- #if (BITS_PER_LONG < 64)
- u64 get_jiffies_64(void)
- @@ -83,9 +84,9 @@ u64 get_jiffies_64(void)
- u64 ret;
-
- do {
- - seq = read_seqbegin(&jiffies_lock);
- + seq = read_seqcount_begin(&jiffies_seq);
- ret = jiffies_64;
- - } while (read_seqretry(&jiffies_lock, seq));
- + } while (read_seqcount_retry(&jiffies_seq, seq));
- return ret;
- }
- EXPORT_SYMBOL(get_jiffies_64);
- diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
- index 6df8927c58a5..05b7391bf9bd 100644
- --- a/kernel/time/ntp.c
- +++ b/kernel/time/ntp.c
- @@ -17,6 +17,7 @@
- #include <linux/module.h>
- #include <linux/rtc.h>
- #include <linux/math64.h>
- +#include <linux/swork.h>
-
- #include "ntp_internal.h"
- #include "timekeeping_internal.h"
- @@ -568,10 +569,35 @@ static void sync_cmos_clock(struct work_struct *work)
- &sync_cmos_work, timespec64_to_jiffies(&next));
- }
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +
- +static void run_clock_set_delay(struct swork_event *event)
- +{
- + queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
- +}
- +
- +static struct swork_event ntp_cmos_swork;
- +
- +void ntp_notify_cmos_timer(void)
- +{
- + swork_queue(&ntp_cmos_swork);
- +}
- +
- +static __init int create_cmos_delay_thread(void)
- +{
- + WARN_ON(swork_get());
- + INIT_SWORK(&ntp_cmos_swork, run_clock_set_delay);
- + return 0;
- +}
- +early_initcall(create_cmos_delay_thread);
- +
- +#else
- +
- void ntp_notify_cmos_timer(void)
- {
- queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
- }
- +#endif /* CONFIG_PREEMPT_RT_FULL */
-
- #else
- void ntp_notify_cmos_timer(void) { }
- diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
- index 39008d78927a..633f4eaca9e7 100644
- --- a/kernel/time/posix-cpu-timers.c
- +++ b/kernel/time/posix-cpu-timers.c
- @@ -3,6 +3,7 @@
- */
-
- #include <linux/sched.h>
- +#include <linux/sched/rt.h>
- #include <linux/posix-timers.h>
- #include <linux/errno.h>
- #include <linux/math64.h>
- @@ -620,7 +621,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
- /*
- * Disarm any old timer after extracting its expiry time.
- */
- - WARN_ON_ONCE(!irqs_disabled());
- + WARN_ON_ONCE_NONRT(!irqs_disabled());
-
- ret = 0;
- old_incr = timer->it.cpu.incr;
- @@ -1064,7 +1065,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
- /*
- * Now re-arm for the new expiry time.
- */
- - WARN_ON_ONCE(!irqs_disabled());
- + WARN_ON_ONCE_NONRT(!irqs_disabled());
- arm_timer(timer);
- unlock_task_sighand(p, &flags);
-
- @@ -1153,13 +1154,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
- * already updated our counts. We need to check if any timers fire now.
- * Interrupts are disabled.
- */
- -void run_posix_cpu_timers(struct task_struct *tsk)
- +static void __run_posix_cpu_timers(struct task_struct *tsk)
- {
- LIST_HEAD(firing);
- struct k_itimer *timer, *next;
- unsigned long flags;
-
- - WARN_ON_ONCE(!irqs_disabled());
- + WARN_ON_ONCE_NONRT(!irqs_disabled());
-
- /*
- * The fast path checks that there are no expired thread or thread
- @@ -1213,6 +1214,190 @@ void run_posix_cpu_timers(struct task_struct *tsk)
- }
- }
-
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +#include <linux/kthread.h>
- +#include <linux/cpu.h>
- +DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
- +DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
- +
- +static int posix_cpu_timers_thread(void *data)
- +{
- + int cpu = (long)data;
- +
- + BUG_ON(per_cpu(posix_timer_task,cpu) != current);
- +
- + while (!kthread_should_stop()) {
- + struct task_struct *tsk = NULL;
- + struct task_struct *next = NULL;
- +
- + if (cpu_is_offline(cpu))
- + goto wait_to_die;
- +
- + /* grab task list */
- + raw_local_irq_disable();
- + tsk = per_cpu(posix_timer_tasklist, cpu);
- + per_cpu(posix_timer_tasklist, cpu) = NULL;
- + raw_local_irq_enable();
- +
- + /* its possible the list is empty, just return */
- + if (!tsk) {
- + set_current_state(TASK_INTERRUPTIBLE);
- + schedule();
- + __set_current_state(TASK_RUNNING);
- + continue;
- + }
- +
- + /* Process task list */
- + while (1) {
- + /* save next */
- + next = tsk->posix_timer_list;
- +
- + /* run the task timers, clear its ptr and
- + * unreference it
- + */
- + __run_posix_cpu_timers(tsk);
- + tsk->posix_timer_list = NULL;
- + put_task_struct(tsk);
- +
- + /* check if this is the last on the list */
- + if (next == tsk)
- + break;
- + tsk = next;
- + }
- + }
- + return 0;
- +
- +wait_to_die:
- + /* Wait for kthread_stop */
- + set_current_state(TASK_INTERRUPTIBLE);
- + while (!kthread_should_stop()) {
- + schedule();
- + set_current_state(TASK_INTERRUPTIBLE);
- + }
- + __set_current_state(TASK_RUNNING);
- + return 0;
- +}
- +
- +static inline int __fastpath_timer_check(struct task_struct *tsk)
- +{
- + /* tsk == current, ensure it is safe to use ->signal/sighand */
- + if (unlikely(tsk->exit_state))
- + return 0;
- +
- + if (!task_cputime_zero(&tsk->cputime_expires))
- + return 1;
- +
- + if (!task_cputime_zero(&tsk->signal->cputime_expires))
- + return 1;
- +
- + return 0;
- +}
- +
- +void run_posix_cpu_timers(struct task_struct *tsk)
- +{
- + unsigned long cpu = smp_processor_id();
- + struct task_struct *tasklist;
- +
- + BUG_ON(!irqs_disabled());
- + if(!per_cpu(posix_timer_task, cpu))
- + return;
- + /* get per-cpu references */
- + tasklist = per_cpu(posix_timer_tasklist, cpu);
- +
- + /* check to see if we're already queued */
- + if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
- + get_task_struct(tsk);
- + if (tasklist) {
- + tsk->posix_timer_list = tasklist;
- + } else {
- + /*
- + * The list is terminated by a self-pointing
- + * task_struct
- + */
- + tsk->posix_timer_list = tsk;
- + }
- + per_cpu(posix_timer_tasklist, cpu) = tsk;
- +
- + wake_up_process(per_cpu(posix_timer_task, cpu));
- + }
- +}
- +
- +/*
- + * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
- + * Here we can start up the necessary migration thread for the new CPU.
- + */
- +static int posix_cpu_thread_call(struct notifier_block *nfb,
- + unsigned long action, void *hcpu)
- +{
- + int cpu = (long)hcpu;
- + struct task_struct *p;
- + struct sched_param param;
- +
- + switch (action) {
- + case CPU_UP_PREPARE:
- + p = kthread_create(posix_cpu_timers_thread, hcpu,
- + "posixcputmr/%d",cpu);
- + if (IS_ERR(p))
- + return NOTIFY_BAD;
- + p->flags |= PF_NOFREEZE;
- + kthread_bind(p, cpu);
- + /* Must be high prio to avoid getting starved */
- + param.sched_priority = MAX_RT_PRIO-1;
- + sched_setscheduler(p, SCHED_FIFO, ¶m);
- + per_cpu(posix_timer_task,cpu) = p;
- + break;
- + case CPU_ONLINE:
- + /* Strictly unneccessary, as first user will wake it. */
- + wake_up_process(per_cpu(posix_timer_task,cpu));
- + break;
- +#ifdef CONFIG_HOTPLUG_CPU
- + case CPU_UP_CANCELED:
- + /* Unbind it from offline cpu so it can run. Fall thru. */
- + kthread_bind(per_cpu(posix_timer_task, cpu),
- + cpumask_any(cpu_online_mask));
- + kthread_stop(per_cpu(posix_timer_task,cpu));
- + per_cpu(posix_timer_task,cpu) = NULL;
- + break;
- + case CPU_DEAD:
- + kthread_stop(per_cpu(posix_timer_task,cpu));
- + per_cpu(posix_timer_task,cpu) = NULL;
- + break;
- +#endif
- + }
- + return NOTIFY_OK;
- +}
- +
- +/* Register at highest priority so that task migration (migrate_all_tasks)
- + * happens before everything else.
- + */
- +static struct notifier_block posix_cpu_thread_notifier = {
- + .notifier_call = posix_cpu_thread_call,
- + .priority = 10
- +};
- +
- +static int __init posix_cpu_thread_init(void)
- +{
- + void *hcpu = (void *)(long)smp_processor_id();
- + /* Start one for boot CPU. */
- + unsigned long cpu;
- +
- + /* init the per-cpu posix_timer_tasklets */
- + for_each_possible_cpu(cpu)
- + per_cpu(posix_timer_tasklist, cpu) = NULL;
- +
- + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
- + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
- + register_cpu_notifier(&posix_cpu_thread_notifier);
- + return 0;
- +}
- +early_initcall(posix_cpu_thread_init);
- +#else /* CONFIG_PREEMPT_RT_BASE */
- +void run_posix_cpu_timers(struct task_struct *tsk)
- +{
- + __run_posix_cpu_timers(tsk);
- +}
- +#endif /* CONFIG_PREEMPT_RT_BASE */
- +
- /*
- * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
- * The tsk->sighand->siglock must be held by the caller.
- diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
- index f2826c35e918..464a98155a0e 100644
- --- a/kernel/time/posix-timers.c
- +++ b/kernel/time/posix-timers.c
- @@ -506,6 +506,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
- static struct pid *good_sigevent(sigevent_t * event)
- {
- struct task_struct *rtn = current->group_leader;
- + int sig = event->sigev_signo;
-
- if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
- (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
- @@ -514,7 +515,8 @@ static struct pid *good_sigevent(sigevent_t * event)
- return NULL;
-
- if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
- - ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
- + (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
- + sig_kernel_coredump(sig)))
- return NULL;
-
- return task_pid(rtn);
- @@ -826,6 +828,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
- return overrun;
- }
-
- +/*
- + * Protected by RCU!
- + */
- +static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
- +{
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + if (kc->timer_set == common_timer_set)
- + hrtimer_wait_for_timer(&timr->it.real.timer);
- + else
- + /* FIXME: Whacky hack for posix-cpu-timers */
- + schedule_timeout(1);
- +#endif
- +}
- +
- /* Set a POSIX.1b interval timer. */
- /* timr->it_lock is taken. */
- static int
- @@ -903,6 +919,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
- if (!timr)
- return -EINVAL;
-
- + rcu_read_lock();
- kc = clockid_to_kclock(timr->it_clock);
- if (WARN_ON_ONCE(!kc || !kc->timer_set))
- error = -EINVAL;
- @@ -911,9 +928,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
-
- unlock_timer(timr, flag);
- if (error == TIMER_RETRY) {
- + timer_wait_for_callback(kc, timr);
- rtn = NULL; // We already got the old time...
- + rcu_read_unlock();
- goto retry;
- }
- + rcu_read_unlock();
-
- if (old_setting && !error &&
- copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
- @@ -951,10 +971,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
- if (!timer)
- return -EINVAL;
-
- + rcu_read_lock();
- if (timer_delete_hook(timer) == TIMER_RETRY) {
- unlock_timer(timer, flags);
- + timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
- + timer);
- + rcu_read_unlock();
- goto retry_delete;
- }
- + rcu_read_unlock();
-
- spin_lock(¤t->sighand->siglock);
- list_del(&timer->list);
- @@ -980,8 +1005,18 @@ static void itimer_delete(struct k_itimer *timer)
- retry_delete:
- spin_lock_irqsave(&timer->it_lock, flags);
-
- + /* On RT we can race with a deletion */
- + if (!timer->it_signal) {
- + unlock_timer(timer, flags);
- + return;
- + }
- +
- if (timer_delete_hook(timer) == TIMER_RETRY) {
- + rcu_read_lock();
- unlock_timer(timer, flags);
- + timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
- + timer);
- + rcu_read_unlock();
- goto retry_delete;
- }
- list_del(&timer->list);
- diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
- index 690b797f522e..fe8ba1619879 100644
- --- a/kernel/time/tick-broadcast-hrtimer.c
- +++ b/kernel/time/tick-broadcast-hrtimer.c
- @@ -107,5 +107,6 @@ void tick_setup_hrtimer_broadcast(void)
- {
- hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- bctimer.function = bc_handler;
- + bctimer.irqsafe = true;
- clockevents_register_device(&ce_broadcast_hrtimer);
- }
- diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
- index 4fcd99e12aa0..5a47f2e98faf 100644
- --- a/kernel/time/tick-common.c
- +++ b/kernel/time/tick-common.c
- @@ -79,13 +79,15 @@ int tick_is_oneshot_available(void)
- static void tick_periodic(int cpu)
- {
- if (tick_do_timer_cpu == cpu) {
- - write_seqlock(&jiffies_lock);
- + raw_spin_lock(&jiffies_lock);
- + write_seqcount_begin(&jiffies_seq);
-
- /* Keep track of the next tick event */
- tick_next_period = ktime_add(tick_next_period, tick_period);
-
- do_timer(1);
- - write_sequnlock(&jiffies_lock);
- + write_seqcount_end(&jiffies_seq);
- + raw_spin_unlock(&jiffies_lock);
- update_wall_time();
- }
-
- @@ -157,9 +159,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
- ktime_t next;
-
- do {
- - seq = read_seqbegin(&jiffies_lock);
- + seq = read_seqcount_begin(&jiffies_seq);
- next = tick_next_period;
- - } while (read_seqretry(&jiffies_lock, seq));
- + } while (read_seqcount_retry(&jiffies_seq, seq));
-
- clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
-
- diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
- index dae1a45be504..c573b1a848b6 100644
- --- a/kernel/time/tick-sched.c
- +++ b/kernel/time/tick-sched.c
- @@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(ktime_t now)
- return;
-
- /* Reevaluate with jiffies_lock held */
- - write_seqlock(&jiffies_lock);
- + raw_spin_lock(&jiffies_lock);
- + write_seqcount_begin(&jiffies_seq);
-
- delta = ktime_sub(now, last_jiffies_update);
- if (delta.tv64 >= tick_period.tv64) {
- @@ -85,10 +86,12 @@ static void tick_do_update_jiffies64(ktime_t now)
- /* Keep the tick_next_period variable up to date */
- tick_next_period = ktime_add(last_jiffies_update, tick_period);
- } else {
- - write_sequnlock(&jiffies_lock);
- + write_seqcount_end(&jiffies_seq);
- + raw_spin_unlock(&jiffies_lock);
- return;
- }
- - write_sequnlock(&jiffies_lock);
- + write_seqcount_end(&jiffies_seq);
- + raw_spin_unlock(&jiffies_lock);
- update_wall_time();
- }
-
- @@ -99,12 +102,14 @@ static ktime_t tick_init_jiffy_update(void)
- {
- ktime_t period;
-
- - write_seqlock(&jiffies_lock);
- + raw_spin_lock(&jiffies_lock);
- + write_seqcount_begin(&jiffies_seq);
- /* Did we start the jiffies update yet ? */
- if (last_jiffies_update.tv64 == 0)
- last_jiffies_update = tick_next_period;
- period = last_jiffies_update;
- - write_sequnlock(&jiffies_lock);
- + write_seqcount_end(&jiffies_seq);
- + raw_spin_unlock(&jiffies_lock);
- return period;
- }
-
- @@ -215,6 +220,7 @@ static void nohz_full_kick_func(struct irq_work *work)
-
- static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
- .func = nohz_full_kick_func,
- + .flags = IRQ_WORK_HARD_IRQ,
- };
-
- /*
- @@ -678,10 +684,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
-
- /* Read jiffies and the time when jiffies were updated last */
- do {
- - seq = read_seqbegin(&jiffies_lock);
- + seq = read_seqcount_begin(&jiffies_seq);
- basemono = last_jiffies_update.tv64;
- basejiff = jiffies;
- - } while (read_seqretry(&jiffies_lock, seq));
- + } while (read_seqcount_retry(&jiffies_seq, seq));
- ts->last_jiffies = basejiff;
-
- /*
- @@ -892,14 +898,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
- return false;
-
- if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
- - static int ratelimit;
- -
- - if (ratelimit < 10 &&
- - (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
- - pr_warn("NOHZ: local_softirq_pending %02x\n",
- - (unsigned int) local_softirq_pending());
- - ratelimit++;
- - }
- + softirq_check_pending_idle();
- return false;
- }
-
- @@ -1208,6 +1207,7 @@ void tick_setup_sched_timer(void)
- * Emulate tick processing via per-CPU hrtimers:
- */
- hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- + ts->sched_timer.irqsafe = 1;
- ts->sched_timer.function = tick_sched_timer;
-
- /* Get the next period (per-CPU) */
- diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
- index d831827d7ab0..76d982c11ac3 100644
- --- a/kernel/time/timekeeping.c
- +++ b/kernel/time/timekeeping.c
- @@ -2348,8 +2348,10 @@ EXPORT_SYMBOL(hardpps);
- */
- void xtime_update(unsigned long ticks)
- {
- - write_seqlock(&jiffies_lock);
- + raw_spin_lock(&jiffies_lock);
- + write_seqcount_begin(&jiffies_seq);
- do_timer(ticks);
- - write_sequnlock(&jiffies_lock);
- + write_seqcount_end(&jiffies_seq);
- + raw_spin_unlock(&jiffies_lock);
- update_wall_time();
- }
- diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
- index 704f595ce83f..763a3e5121ff 100644
- --- a/kernel/time/timekeeping.h
- +++ b/kernel/time/timekeeping.h
- @@ -19,7 +19,8 @@ extern void timekeeping_resume(void);
- extern void do_timer(unsigned long ticks);
- extern void update_wall_time(void);
-
- -extern seqlock_t jiffies_lock;
- +extern raw_spinlock_t jiffies_lock;
- +extern seqcount_t jiffies_seq;
-
- #define CS_NAME_LEN 32
-
- diff --git a/kernel/time/timer.c b/kernel/time/timer.c
- index e872f7f05e8a..8e75e7442aaa 100644
- --- a/kernel/time/timer.c
- +++ b/kernel/time/timer.c
- @@ -193,8 +193,11 @@ EXPORT_SYMBOL(jiffies_64);
- #endif
-
- struct timer_base {
- - spinlock_t lock;
- + raw_spinlock_t lock;
- struct timer_list *running_timer;
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + struct swait_queue_head wait_for_running_timer;
- +#endif
- unsigned long clk;
- unsigned long next_expiry;
- unsigned int cpu;
- @@ -953,10 +956,10 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
-
- if (!(tf & TIMER_MIGRATING)) {
- base = get_timer_base(tf);
- - spin_lock_irqsave(&base->lock, *flags);
- + raw_spin_lock_irqsave(&base->lock, *flags);
- if (timer->flags == tf)
- return base;
- - spin_unlock_irqrestore(&base->lock, *flags);
- + raw_spin_unlock_irqrestore(&base->lock, *flags);
- }
- cpu_relax();
- }
- @@ -1033,9 +1036,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
- /* See the comment in lock_timer_base() */
- timer->flags |= TIMER_MIGRATING;
-
- - spin_unlock(&base->lock);
- + raw_spin_unlock(&base->lock);
- base = new_base;
- - spin_lock(&base->lock);
- + raw_spin_lock(&base->lock);
- WRITE_ONCE(timer->flags,
- (timer->flags & ~TIMER_BASEMASK) | base->cpu);
- forward_timer_base(base);
- @@ -1060,7 +1063,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
- }
-
- out_unlock:
- - spin_unlock_irqrestore(&base->lock, flags);
- + raw_spin_unlock_irqrestore(&base->lock, flags);
-
- return ret;
- }
- @@ -1154,9 +1157,9 @@ void add_timer_on(struct timer_list *timer, int cpu)
- if (base != new_base) {
- timer->flags |= TIMER_MIGRATING;
-
- - spin_unlock(&base->lock);
- + raw_spin_unlock(&base->lock);
- base = new_base;
- - spin_lock(&base->lock);
- + raw_spin_lock(&base->lock);
- WRITE_ONCE(timer->flags,
- (timer->flags & ~TIMER_BASEMASK) | cpu);
- }
- @@ -1164,10 +1167,37 @@ void add_timer_on(struct timer_list *timer, int cpu)
-
- debug_activate(timer, timer->expires);
- internal_add_timer(base, timer);
- - spin_unlock_irqrestore(&base->lock, flags);
- + raw_spin_unlock_irqrestore(&base->lock, flags);
- }
- EXPORT_SYMBOL_GPL(add_timer_on);
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +/*
- + * Wait for a running timer
- + */
- +static void wait_for_running_timer(struct timer_list *timer)
- +{
- + struct timer_base *base;
- + u32 tf = timer->flags;
- +
- + if (tf & TIMER_MIGRATING)
- + return;
- +
- + base = get_timer_base(tf);
- + swait_event(base->wait_for_running_timer,
- + base->running_timer != timer);
- +}
- +
- +# define wakeup_timer_waiters(b) swake_up_all(&(b)->wait_for_running_timer)
- +#else
- +static inline void wait_for_running_timer(struct timer_list *timer)
- +{
- + cpu_relax();
- +}
- +
- +# define wakeup_timer_waiters(b) do { } while (0)
- +#endif
- +
- /**
- * del_timer - deactive a timer.
- * @timer: the timer to be deactivated
- @@ -1191,7 +1221,7 @@ int del_timer(struct timer_list *timer)
- if (timer_pending(timer)) {
- base = lock_timer_base(timer, &flags);
- ret = detach_if_pending(timer, base, true);
- - spin_unlock_irqrestore(&base->lock, flags);
- + raw_spin_unlock_irqrestore(&base->lock, flags);
- }
-
- return ret;
- @@ -1219,13 +1249,13 @@ int try_to_del_timer_sync(struct timer_list *timer)
- timer_stats_timer_clear_start_info(timer);
- ret = detach_if_pending(timer, base, true);
- }
- - spin_unlock_irqrestore(&base->lock, flags);
- + raw_spin_unlock_irqrestore(&base->lock, flags);
-
- return ret;
- }
- EXPORT_SYMBOL(try_to_del_timer_sync);
-
- -#ifdef CONFIG_SMP
- +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
- /**
- * del_timer_sync - deactivate a timer and wait for the handler to finish.
- * @timer: the timer to be deactivated
- @@ -1285,7 +1315,7 @@ int del_timer_sync(struct timer_list *timer)
- int ret = try_to_del_timer_sync(timer);
- if (ret >= 0)
- return ret;
- - cpu_relax();
- + wait_for_running_timer(timer);
- }
- }
- EXPORT_SYMBOL(del_timer_sync);
- @@ -1350,14 +1380,17 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
- fn = timer->function;
- data = timer->data;
-
- - if (timer->flags & TIMER_IRQSAFE) {
- - spin_unlock(&base->lock);
- + if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) &&
- + timer->flags & TIMER_IRQSAFE) {
- + raw_spin_unlock(&base->lock);
- call_timer_fn(timer, fn, data);
- - spin_lock(&base->lock);
- + base->running_timer = NULL;
- + raw_spin_lock(&base->lock);
- } else {
- - spin_unlock_irq(&base->lock);
- + raw_spin_unlock_irq(&base->lock);
- call_timer_fn(timer, fn, data);
- - spin_lock_irq(&base->lock);
- + base->running_timer = NULL;
- + raw_spin_lock_irq(&base->lock);
- }
- }
- }
- @@ -1526,7 +1559,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
- if (cpu_is_offline(smp_processor_id()))
- return expires;
-
- - spin_lock(&base->lock);
- + raw_spin_lock(&base->lock);
- nextevt = __next_timer_interrupt(base);
- is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
- base->next_expiry = nextevt;
- @@ -1560,7 +1593,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
- base->is_idle = true;
- }
- }
- - spin_unlock(&base->lock);
- + raw_spin_unlock(&base->lock);
-
- return cmp_next_hrtimer_event(basem, expires);
- }
- @@ -1625,13 +1658,13 @@ void update_process_times(int user_tick)
-
- /* Note: this timer irq context must be accounted for as well. */
- account_process_tick(p, user_tick);
- + scheduler_tick();
- run_local_timers();
- rcu_check_callbacks(user_tick);
- -#ifdef CONFIG_IRQ_WORK
- +#if defined(CONFIG_IRQ_WORK)
- if (in_irq())
- irq_work_tick();
- #endif
- - scheduler_tick();
- run_posix_cpu_timers(p);
- }
-
- @@ -1647,7 +1680,7 @@ static inline void __run_timers(struct timer_base *base)
- if (!time_after_eq(jiffies, base->clk))
- return;
-
- - spin_lock_irq(&base->lock);
- + raw_spin_lock_irq(&base->lock);
-
- while (time_after_eq(jiffies, base->clk)) {
-
- @@ -1657,8 +1690,8 @@ static inline void __run_timers(struct timer_base *base)
- while (levels--)
- expire_timers(base, heads + levels);
- }
- - base->running_timer = NULL;
- - spin_unlock_irq(&base->lock);
- + raw_spin_unlock_irq(&base->lock);
- + wakeup_timer_waiters(base);
- }
-
- /*
- @@ -1681,6 +1714,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
- */
- base->must_forward_clk = false;
-
- + irq_work_tick_soft();
- +
- __run_timers(base);
- if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
- __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
- @@ -1881,16 +1916,16 @@ int timers_dead_cpu(unsigned int cpu)
- * The caller is globally serialized and nobody else
- * takes two locks at once, deadlock is not possible.
- */
- - spin_lock_irq(&new_base->lock);
- - spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
- + raw_spin_lock_irq(&new_base->lock);
- + raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
-
- BUG_ON(old_base->running_timer);
-
- for (i = 0; i < WHEEL_SIZE; i++)
- migrate_timer_list(new_base, old_base->vectors + i);
-
- - spin_unlock(&old_base->lock);
- - spin_unlock_irq(&new_base->lock);
- + raw_spin_unlock(&old_base->lock);
- + raw_spin_unlock_irq(&new_base->lock);
- put_cpu_ptr(&timer_bases);
- }
- return 0;
- @@ -1906,8 +1941,11 @@ static void __init init_timer_cpu(int cpu)
- for (i = 0; i < NR_BASES; i++) {
- base = per_cpu_ptr(&timer_bases[i], cpu);
- base->cpu = cpu;
- - spin_lock_init(&base->lock);
- + raw_spin_lock_init(&base->lock);
- base->clk = jiffies;
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + init_swait_queue_head(&base->wait_for_running_timer);
- +#endif
- }
- }
-
- diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
- index 2a96b063d659..812e37237eb8 100644
- --- a/kernel/trace/Kconfig
- +++ b/kernel/trace/Kconfig
- @@ -182,6 +182,24 @@ config IRQSOFF_TRACER
- enabled. This option and the preempt-off timing option can be
- used together or separately.)
-
- +config INTERRUPT_OFF_HIST
- + bool "Interrupts-off Latency Histogram"
- + depends on IRQSOFF_TRACER
- + help
- + This option generates continuously updated histograms (one per cpu)
- + of the duration of time periods with interrupts disabled. The
- + histograms are disabled by default. To enable them, write a non-zero
- + number to
- +
- + /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
- +
- + If PREEMPT_OFF_HIST is also selected, additional histograms (one
- + per cpu) are generated that accumulate the duration of time periods
- + when both interrupts and preemption are disabled. The histogram data
- + will be located in the debug file system at
- +
- + /sys/kernel/debug/tracing/latency_hist/irqsoff
- +
- config PREEMPT_TRACER
- bool "Preemption-off Latency Tracer"
- default n
- @@ -206,6 +224,24 @@ config PREEMPT_TRACER
- enabled. This option and the irqs-off timing option can be
- used together or separately.)
-
- +config PREEMPT_OFF_HIST
- + bool "Preemption-off Latency Histogram"
- + depends on PREEMPT_TRACER
- + help
- + This option generates continuously updated histograms (one per cpu)
- + of the duration of time periods with preemption disabled. The
- + histograms are disabled by default. To enable them, write a non-zero
- + number to
- +
- + /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
- +
- + If INTERRUPT_OFF_HIST is also selected, additional histograms (one
- + per cpu) are generated that accumulate the duration of time periods
- + when both interrupts and preemption are disabled. The histogram data
- + will be located in the debug file system at
- +
- + /sys/kernel/debug/tracing/latency_hist/preemptoff
- +
- config SCHED_TRACER
- bool "Scheduling Latency Tracer"
- select GENERIC_TRACER
- @@ -251,6 +287,74 @@ config HWLAT_TRACER
- file. Every time a latency is greater than tracing_thresh, it will
- be recorded into the ring buffer.
-
- +config WAKEUP_LATENCY_HIST
- + bool "Scheduling Latency Histogram"
- + depends on SCHED_TRACER
- + help
- + This option generates continuously updated histograms (one per cpu)
- + of the scheduling latency of the highest priority task.
- + The histograms are disabled by default. To enable them, write a
- + non-zero number to
- +
- + /sys/kernel/debug/tracing/latency_hist/enable/wakeup
- +
- + Two different algorithms are used, one to determine the latency of
- + processes that exclusively use the highest priority of the system and
- + another one to determine the latency of processes that share the
- + highest system priority with other processes. The former is used to
- + improve hardware and system software, the latter to optimize the
- + priority design of a given system. The histogram data will be
- + located in the debug file system at
- +
- + /sys/kernel/debug/tracing/latency_hist/wakeup
- +
- + and
- +
- + /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
- +
- + If both Scheduling Latency Histogram and Missed Timer Offsets
- + Histogram are selected, additional histogram data will be collected
- + that contain, in addition to the wakeup latency, the timer latency, in
- + case the wakeup was triggered by an expired timer. These histograms
- + are available in the
- +
- + /sys/kernel/debug/tracing/latency_hist/timerandwakeup
- +
- + directory. They reflect the apparent interrupt and scheduling latency
- + and are best suitable to determine the worst-case latency of a given
- + system. To enable these histograms, write a non-zero number to
- +
- + /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
- +
- +config MISSED_TIMER_OFFSETS_HIST
- + depends on HIGH_RES_TIMERS
- + select GENERIC_TRACER
- + bool "Missed Timer Offsets Histogram"
- + help
- + Generate a histogram of missed timer offsets in microseconds. The
- + histograms are disabled by default. To enable them, write a non-zero
- + number to
- +
- + /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
- +
- + The histogram data will be located in the debug file system at
- +
- + /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
- +
- + If both Scheduling Latency Histogram and Missed Timer Offsets
- + Histogram are selected, additional histogram data will be collected
- + that contain, in addition to the wakeup latency, the timer latency, in
- + case the wakeup was triggered by an expired timer. These histograms
- + are available in the
- +
- + /sys/kernel/debug/tracing/latency_hist/timerandwakeup
- +
- + directory. They reflect the apparent interrupt and scheduling latency
- + and are best suitable to determine the worst-case latency of a given
- + system. To enable these histograms, write a non-zero number to
- +
- + /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
- +
- config ENABLE_DEFAULT_TRACERS
- bool "Trace process context switches and events"
- depends on !GENERIC_TRACER
- diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
- index e57980845549..83af000b783c 100644
- --- a/kernel/trace/Makefile
- +++ b/kernel/trace/Makefile
- @@ -38,6 +38,10 @@ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
- obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
- obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
- obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o
- +obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
- +obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
- +obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
- +obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
- obj-$(CONFIG_NOP_TRACER) += trace_nop.o
- obj-$(CONFIG_STACK_TRACER) += trace_stack.o
- obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
- diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
- new file mode 100644
- index 000000000000..7f6ee70dea41
- --- /dev/null
- +++ b/kernel/trace/latency_hist.c
- @@ -0,0 +1,1178 @@
- +/*
- + * kernel/trace/latency_hist.c
- + *
- + * Add support for histograms of preemption-off latency and
- + * interrupt-off latency and wakeup latency, it depends on
- + * Real-Time Preemption Support.
- + *
- + * Copyright (C) 2005 MontaVista Software, Inc.
- + * Yi Yang <yyang@ch.mvista.com>
- + *
- + * Converted to work with the new latency tracer.
- + * Copyright (C) 2008 Red Hat, Inc.
- + * Steven Rostedt <srostedt@redhat.com>
- + *
- + */
- +#include <linux/module.h>
- +#include <linux/debugfs.h>
- +#include <linux/seq_file.h>
- +#include <linux/percpu.h>
- +#include <linux/kallsyms.h>
- +#include <linux/uaccess.h>
- +#include <linux/sched.h>
- +#include <linux/sched/rt.h>
- +#include <linux/slab.h>
- +#include <linux/atomic.h>
- +#include <asm/div64.h>
- +
- +#include "trace.h"
- +#include <trace/events/sched.h>
- +
- +#define NSECS_PER_USECS 1000L
- +
- +#define CREATE_TRACE_POINTS
- +#include <trace/events/hist.h>
- +
- +enum {
- + IRQSOFF_LATENCY = 0,
- + PREEMPTOFF_LATENCY,
- + PREEMPTIRQSOFF_LATENCY,
- + WAKEUP_LATENCY,
- + WAKEUP_LATENCY_SHAREDPRIO,
- + MISSED_TIMER_OFFSETS,
- + TIMERANDWAKEUP_LATENCY,
- + MAX_LATENCY_TYPE,
- +};
- +
- +#define MAX_ENTRY_NUM 10240
- +
- +struct hist_data {
- + atomic_t hist_mode; /* 0 log, 1 don't log */
- + long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
- + long min_lat;
- + long max_lat;
- + unsigned long long below_hist_bound_samples;
- + unsigned long long above_hist_bound_samples;
- + long long accumulate_lat;
- + unsigned long long total_samples;
- + unsigned long long hist_array[MAX_ENTRY_NUM];
- +};
- +
- +struct enable_data {
- + int latency_type;
- + int enabled;
- +};
- +
- +static char *latency_hist_dir_root = "latency_hist";
- +
- +#ifdef CONFIG_INTERRUPT_OFF_HIST
- +static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
- +static char *irqsoff_hist_dir = "irqsoff";
- +static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
- +static DEFINE_PER_CPU(int, hist_irqsoff_counting);
- +#endif
- +
- +#ifdef CONFIG_PREEMPT_OFF_HIST
- +static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
- +static char *preemptoff_hist_dir = "preemptoff";
- +static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
- +static DEFINE_PER_CPU(int, hist_preemptoff_counting);
- +#endif
- +
- +#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
- +static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
- +static char *preemptirqsoff_hist_dir = "preemptirqsoff";
- +static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
- +static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
- +#endif
- +
- +#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
- +static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
- +static struct enable_data preemptirqsoff_enabled_data = {
- + .latency_type = PREEMPTIRQSOFF_LATENCY,
- + .enabled = 0,
- +};
- +#endif
- +
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- +struct maxlatproc_data {
- + char comm[FIELD_SIZEOF(struct task_struct, comm)];
- + char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
- + int pid;
- + int current_pid;
- + int prio;
- + int current_prio;
- + long latency;
- + long timeroffset;
- + cycle_t timestamp;
- +};
- +#endif
- +
- +#ifdef CONFIG_WAKEUP_LATENCY_HIST
- +static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
- +static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
- +static char *wakeup_latency_hist_dir = "wakeup";
- +static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
- +static notrace void probe_wakeup_latency_hist_start(void *v,
- + struct task_struct *p);
- +static notrace void probe_wakeup_latency_hist_stop(void *v,
- + bool preempt, struct task_struct *prev, struct task_struct *next);
- +static notrace void probe_sched_migrate_task(void *,
- + struct task_struct *task, int cpu);
- +static struct enable_data wakeup_latency_enabled_data = {
- + .latency_type = WAKEUP_LATENCY,
- + .enabled = 0,
- +};
- +static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
- +static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
- +static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
- +static DEFINE_PER_CPU(int, wakeup_sharedprio);
- +static unsigned long wakeup_pid;
- +#endif
- +
- +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- +static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
- +static char *missed_timer_offsets_dir = "missed_timer_offsets";
- +static notrace void probe_hrtimer_interrupt(void *v, int cpu,
- + long long offset, struct task_struct *curr, struct task_struct *task);
- +static struct enable_data missed_timer_offsets_enabled_data = {
- + .latency_type = MISSED_TIMER_OFFSETS,
- + .enabled = 0,
- +};
- +static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
- +static unsigned long missed_timer_offsets_pid;
- +#endif
- +
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- +static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
- +static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
- +static struct enable_data timerandwakeup_enabled_data = {
- + .latency_type = TIMERANDWAKEUP_LATENCY,
- + .enabled = 0,
- +};
- +static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
- +#endif
- +
- +void notrace latency_hist(int latency_type, int cpu, long latency,
- + long timeroffset, cycle_t stop,
- + struct task_struct *p)
- +{
- + struct hist_data *my_hist;
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- + struct maxlatproc_data *mp = NULL;
- +#endif
- +
- + if (!cpu_possible(cpu) || latency_type < 0 ||
- + latency_type >= MAX_LATENCY_TYPE)
- + return;
- +
- + switch (latency_type) {
- +#ifdef CONFIG_INTERRUPT_OFF_HIST
- + case IRQSOFF_LATENCY:
- + my_hist = &per_cpu(irqsoff_hist, cpu);
- + break;
- +#endif
- +#ifdef CONFIG_PREEMPT_OFF_HIST
- + case PREEMPTOFF_LATENCY:
- + my_hist = &per_cpu(preemptoff_hist, cpu);
- + break;
- +#endif
- +#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
- + case PREEMPTIRQSOFF_LATENCY:
- + my_hist = &per_cpu(preemptirqsoff_hist, cpu);
- + break;
- +#endif
- +#ifdef CONFIG_WAKEUP_LATENCY_HIST
- + case WAKEUP_LATENCY:
- + my_hist = &per_cpu(wakeup_latency_hist, cpu);
- + mp = &per_cpu(wakeup_maxlatproc, cpu);
- + break;
- + case WAKEUP_LATENCY_SHAREDPRIO:
- + my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
- + mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
- + break;
- +#endif
- +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- + case MISSED_TIMER_OFFSETS:
- + my_hist = &per_cpu(missed_timer_offsets, cpu);
- + mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
- + break;
- +#endif
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- + case TIMERANDWAKEUP_LATENCY:
- + my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
- + mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
- + break;
- +#endif
- +
- + default:
- + return;
- + }
- +
- + latency += my_hist->offset;
- +
- + if (atomic_read(&my_hist->hist_mode) == 0)
- + return;
- +
- + if (latency < 0 || latency >= MAX_ENTRY_NUM) {
- + if (latency < 0)
- + my_hist->below_hist_bound_samples++;
- + else
- + my_hist->above_hist_bound_samples++;
- + } else
- + my_hist->hist_array[latency]++;
- +
- + if (unlikely(latency > my_hist->max_lat ||
- + my_hist->min_lat == LONG_MAX)) {
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- + if (latency_type == WAKEUP_LATENCY ||
- + latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
- + latency_type == MISSED_TIMER_OFFSETS ||
- + latency_type == TIMERANDWAKEUP_LATENCY) {
- + strncpy(mp->comm, p->comm, sizeof(mp->comm));
- + strncpy(mp->current_comm, current->comm,
- + sizeof(mp->current_comm));
- + mp->pid = task_pid_nr(p);
- + mp->current_pid = task_pid_nr(current);
- + mp->prio = p->prio;
- + mp->current_prio = current->prio;
- + mp->latency = latency;
- + mp->timeroffset = timeroffset;
- + mp->timestamp = stop;
- + }
- +#endif
- + my_hist->max_lat = latency;
- + }
- + if (unlikely(latency < my_hist->min_lat))
- + my_hist->min_lat = latency;
- + my_hist->total_samples++;
- + my_hist->accumulate_lat += latency;
- +}
- +
- +static void *l_start(struct seq_file *m, loff_t *pos)
- +{
- + loff_t *index_ptr = NULL;
- + loff_t index = *pos;
- + struct hist_data *my_hist = m->private;
- +
- + if (index == 0) {
- + char minstr[32], avgstr[32], maxstr[32];
- +
- + atomic_dec(&my_hist->hist_mode);
- +
- + if (likely(my_hist->total_samples)) {
- + long avg = (long) div64_s64(my_hist->accumulate_lat,
- + my_hist->total_samples);
- + snprintf(minstr, sizeof(minstr), "%ld",
- + my_hist->min_lat - my_hist->offset);
- + snprintf(avgstr, sizeof(avgstr), "%ld",
- + avg - my_hist->offset);
- + snprintf(maxstr, sizeof(maxstr), "%ld",
- + my_hist->max_lat - my_hist->offset);
- + } else {
- + strcpy(minstr, "<undef>");
- + strcpy(avgstr, minstr);
- + strcpy(maxstr, minstr);
- + }
- +
- + seq_printf(m, "#Minimum latency: %s microseconds\n"
- + "#Average latency: %s microseconds\n"
- + "#Maximum latency: %s microseconds\n"
- + "#Total samples: %llu\n"
- + "#There are %llu samples lower than %ld"
- + " microseconds.\n"
- + "#There are %llu samples greater or equal"
- + " than %ld microseconds.\n"
- + "#usecs\t%16s\n",
- + minstr, avgstr, maxstr,
- + my_hist->total_samples,
- + my_hist->below_hist_bound_samples,
- + -my_hist->offset,
- + my_hist->above_hist_bound_samples,
- + MAX_ENTRY_NUM - my_hist->offset,
- + "samples");
- + }
- + if (index < MAX_ENTRY_NUM) {
- + index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
- + if (index_ptr)
- + *index_ptr = index;
- + }
- +
- + return index_ptr;
- +}
- +
- +static void *l_next(struct seq_file *m, void *p, loff_t *pos)
- +{
- + loff_t *index_ptr = p;
- + struct hist_data *my_hist = m->private;
- +
- + if (++*pos >= MAX_ENTRY_NUM) {
- + atomic_inc(&my_hist->hist_mode);
- + return NULL;
- + }
- + *index_ptr = *pos;
- + return index_ptr;
- +}
- +
- +static void l_stop(struct seq_file *m, void *p)
- +{
- + kfree(p);
- +}
- +
- +static int l_show(struct seq_file *m, void *p)
- +{
- + int index = *(loff_t *) p;
- + struct hist_data *my_hist = m->private;
- +
- + seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
- + my_hist->hist_array[index]);
- + return 0;
- +}
- +
- +static const struct seq_operations latency_hist_seq_op = {
- + .start = l_start,
- + .next = l_next,
- + .stop = l_stop,
- + .show = l_show
- +};
- +
- +static int latency_hist_open(struct inode *inode, struct file *file)
- +{
- + int ret;
- +
- + ret = seq_open(file, &latency_hist_seq_op);
- + if (!ret) {
- + struct seq_file *seq = file->private_data;
- + seq->private = inode->i_private;
- + }
- + return ret;
- +}
- +
- +static const struct file_operations latency_hist_fops = {
- + .open = latency_hist_open,
- + .read = seq_read,
- + .llseek = seq_lseek,
- + .release = seq_release,
- +};
- +
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- +static void clear_maxlatprocdata(struct maxlatproc_data *mp)
- +{
- + mp->comm[0] = mp->current_comm[0] = '\0';
- + mp->prio = mp->current_prio = mp->pid = mp->current_pid =
- + mp->latency = mp->timeroffset = -1;
- + mp->timestamp = 0;
- +}
- +#endif
- +
- +static void hist_reset(struct hist_data *hist)
- +{
- + atomic_dec(&hist->hist_mode);
- +
- + memset(hist->hist_array, 0, sizeof(hist->hist_array));
- + hist->below_hist_bound_samples = 0ULL;
- + hist->above_hist_bound_samples = 0ULL;
- + hist->min_lat = LONG_MAX;
- + hist->max_lat = LONG_MIN;
- + hist->total_samples = 0ULL;
- + hist->accumulate_lat = 0LL;
- +
- + atomic_inc(&hist->hist_mode);
- +}
- +
- +static ssize_t
- +latency_hist_reset(struct file *file, const char __user *a,
- + size_t size, loff_t *off)
- +{
- + int cpu;
- + struct hist_data *hist = NULL;
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- + struct maxlatproc_data *mp = NULL;
- +#endif
- + off_t latency_type = (off_t) file->private_data;
- +
- + for_each_online_cpu(cpu) {
- +
- + switch (latency_type) {
- +#ifdef CONFIG_PREEMPT_OFF_HIST
- + case PREEMPTOFF_LATENCY:
- + hist = &per_cpu(preemptoff_hist, cpu);
- + break;
- +#endif
- +#ifdef CONFIG_INTERRUPT_OFF_HIST
- + case IRQSOFF_LATENCY:
- + hist = &per_cpu(irqsoff_hist, cpu);
- + break;
- +#endif
- +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- + case PREEMPTIRQSOFF_LATENCY:
- + hist = &per_cpu(preemptirqsoff_hist, cpu);
- + break;
- +#endif
- +#ifdef CONFIG_WAKEUP_LATENCY_HIST
- + case WAKEUP_LATENCY:
- + hist = &per_cpu(wakeup_latency_hist, cpu);
- + mp = &per_cpu(wakeup_maxlatproc, cpu);
- + break;
- + case WAKEUP_LATENCY_SHAREDPRIO:
- + hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
- + mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
- + break;
- +#endif
- +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- + case MISSED_TIMER_OFFSETS:
- + hist = &per_cpu(missed_timer_offsets, cpu);
- + mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
- + break;
- +#endif
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- + case TIMERANDWAKEUP_LATENCY:
- + hist = &per_cpu(timerandwakeup_latency_hist, cpu);
- + mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
- + break;
- +#endif
- + }
- +
- + hist_reset(hist);
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- + if (latency_type == WAKEUP_LATENCY ||
- + latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
- + latency_type == MISSED_TIMER_OFFSETS ||
- + latency_type == TIMERANDWAKEUP_LATENCY)
- + clear_maxlatprocdata(mp);
- +#endif
- + }
- +
- + return size;
- +}
- +
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- +static ssize_t
- +show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
- +{
- + char buf[64];
- + int r;
- + unsigned long *this_pid = file->private_data;
- +
- + r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
- + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
- +}
- +
- +static ssize_t do_pid(struct file *file, const char __user *ubuf,
- + size_t cnt, loff_t *ppos)
- +{
- + char buf[64];
- + unsigned long pid;
- + unsigned long *this_pid = file->private_data;
- +
- + if (cnt >= sizeof(buf))
- + return -EINVAL;
- +
- + if (copy_from_user(&buf, ubuf, cnt))
- + return -EFAULT;
- +
- + buf[cnt] = '\0';
- +
- + if (kstrtoul(buf, 10, &pid))
- + return -EINVAL;
- +
- + *this_pid = pid;
- +
- + return cnt;
- +}
- +#endif
- +
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- +static ssize_t
- +show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
- +{
- + int r;
- + struct maxlatproc_data *mp = file->private_data;
- + int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
- + unsigned long long t;
- + unsigned long usecs, secs;
- + char *buf;
- +
- + if (mp->pid == -1 || mp->current_pid == -1) {
- + buf = "(none)\n";
- + return simple_read_from_buffer(ubuf, cnt, ppos, buf,
- + strlen(buf));
- + }
- +
- + buf = kmalloc(strmaxlen, GFP_KERNEL);
- + if (buf == NULL)
- + return -ENOMEM;
- +
- + t = ns2usecs(mp->timestamp);
- + usecs = do_div(t, USEC_PER_SEC);
- + secs = (unsigned long) t;
- + r = snprintf(buf, strmaxlen,
- + "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
- + MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
- + mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
- + secs, usecs);
- + r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
- + kfree(buf);
- + return r;
- +}
- +#endif
- +
- +static ssize_t
- +show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
- +{
- + char buf[64];
- + struct enable_data *ed = file->private_data;
- + int r;
- +
- + r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
- + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
- +}
- +
- +static ssize_t
- +do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
- +{
- + char buf[64];
- + long enable;
- + struct enable_data *ed = file->private_data;
- +
- + if (cnt >= sizeof(buf))
- + return -EINVAL;
- +
- + if (copy_from_user(&buf, ubuf, cnt))
- + return -EFAULT;
- +
- + buf[cnt] = 0;
- +
- + if (kstrtoul(buf, 10, &enable))
- + return -EINVAL;
- +
- + if ((enable && ed->enabled) || (!enable && !ed->enabled))
- + return cnt;
- +
- + if (enable) {
- + int ret;
- +
- + switch (ed->latency_type) {
- +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
- + case PREEMPTIRQSOFF_LATENCY:
- + ret = register_trace_preemptirqsoff_hist(
- + probe_preemptirqsoff_hist, NULL);
- + if (ret) {
- + pr_info("wakeup trace: Couldn't assign "
- + "probe_preemptirqsoff_hist "
- + "to trace_preemptirqsoff_hist\n");
- + return ret;
- + }
- + break;
- +#endif
- +#ifdef CONFIG_WAKEUP_LATENCY_HIST
- + case WAKEUP_LATENCY:
- + ret = register_trace_sched_wakeup(
- + probe_wakeup_latency_hist_start, NULL);
- + if (ret) {
- + pr_info("wakeup trace: Couldn't assign "
- + "probe_wakeup_latency_hist_start "
- + "to trace_sched_wakeup\n");
- + return ret;
- + }
- + ret = register_trace_sched_wakeup_new(
- + probe_wakeup_latency_hist_start, NULL);
- + if (ret) {
- + pr_info("wakeup trace: Couldn't assign "
- + "probe_wakeup_latency_hist_start "
- + "to trace_sched_wakeup_new\n");
- + unregister_trace_sched_wakeup(
- + probe_wakeup_latency_hist_start, NULL);
- + return ret;
- + }
- + ret = register_trace_sched_switch(
- + probe_wakeup_latency_hist_stop, NULL);
- + if (ret) {
- + pr_info("wakeup trace: Couldn't assign "
- + "probe_wakeup_latency_hist_stop "
- + "to trace_sched_switch\n");
- + unregister_trace_sched_wakeup(
- + probe_wakeup_latency_hist_start, NULL);
- + unregister_trace_sched_wakeup_new(
- + probe_wakeup_latency_hist_start, NULL);
- + return ret;
- + }
- + ret = register_trace_sched_migrate_task(
- + probe_sched_migrate_task, NULL);
- + if (ret) {
- + pr_info("wakeup trace: Couldn't assign "
- + "probe_sched_migrate_task "
- + "to trace_sched_migrate_task\n");
- + unregister_trace_sched_wakeup(
- + probe_wakeup_latency_hist_start, NULL);
- + unregister_trace_sched_wakeup_new(
- + probe_wakeup_latency_hist_start, NULL);
- + unregister_trace_sched_switch(
- + probe_wakeup_latency_hist_stop, NULL);
- + return ret;
- + }
- + break;
- +#endif
- +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- + case MISSED_TIMER_OFFSETS:
- + ret = register_trace_hrtimer_interrupt(
- + probe_hrtimer_interrupt, NULL);
- + if (ret) {
- + pr_info("wakeup trace: Couldn't assign "
- + "probe_hrtimer_interrupt "
- + "to trace_hrtimer_interrupt\n");
- + return ret;
- + }
- + break;
- +#endif
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- + case TIMERANDWAKEUP_LATENCY:
- + if (!wakeup_latency_enabled_data.enabled ||
- + !missed_timer_offsets_enabled_data.enabled)
- + return -EINVAL;
- + break;
- +#endif
- + default:
- + break;
- + }
- + } else {
- + switch (ed->latency_type) {
- +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
- + case PREEMPTIRQSOFF_LATENCY:
- + {
- + int cpu;
- +
- + unregister_trace_preemptirqsoff_hist(
- + probe_preemptirqsoff_hist, NULL);
- + for_each_online_cpu(cpu) {
- +#ifdef CONFIG_INTERRUPT_OFF_HIST
- + per_cpu(hist_irqsoff_counting,
- + cpu) = 0;
- +#endif
- +#ifdef CONFIG_PREEMPT_OFF_HIST
- + per_cpu(hist_preemptoff_counting,
- + cpu) = 0;
- +#endif
- +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- + per_cpu(hist_preemptirqsoff_counting,
- + cpu) = 0;
- +#endif
- + }
- + }
- + break;
- +#endif
- +#ifdef CONFIG_WAKEUP_LATENCY_HIST
- + case WAKEUP_LATENCY:
- + {
- + int cpu;
- +
- + unregister_trace_sched_wakeup(
- + probe_wakeup_latency_hist_start, NULL);
- + unregister_trace_sched_wakeup_new(
- + probe_wakeup_latency_hist_start, NULL);
- + unregister_trace_sched_switch(
- + probe_wakeup_latency_hist_stop, NULL);
- + unregister_trace_sched_migrate_task(
- + probe_sched_migrate_task, NULL);
- +
- + for_each_online_cpu(cpu) {
- + per_cpu(wakeup_task, cpu) = NULL;
- + per_cpu(wakeup_sharedprio, cpu) = 0;
- + }
- + }
- +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- + timerandwakeup_enabled_data.enabled = 0;
- +#endif
- + break;
- +#endif
- +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- + case MISSED_TIMER_OFFSETS:
- + unregister_trace_hrtimer_interrupt(
- + probe_hrtimer_interrupt, NULL);
- +#ifdef CONFIG_WAKEUP_LATENCY_HIST
- + timerandwakeup_enabled_data.enabled = 0;
- +#endif
- + break;
- +#endif
- + default:
- + break;
- + }
- + }
- + ed->enabled = enable;
- + return cnt;
- +}
- +
- +static const struct file_operations latency_hist_reset_fops = {
- + .open = tracing_open_generic,
- + .write = latency_hist_reset,
- +};
- +
- +static const struct file_operations enable_fops = {
- + .open = tracing_open_generic,
- + .read = show_enable,
- + .write = do_enable,
- +};
- +
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- +static const struct file_operations pid_fops = {
- + .open = tracing_open_generic,
- + .read = show_pid,
- + .write = do_pid,
- +};
- +
- +static const struct file_operations maxlatproc_fops = {
- + .open = tracing_open_generic,
- + .read = show_maxlatproc,
- +};
- +#endif
- +
- +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
- +static notrace void probe_preemptirqsoff_hist(void *v, int reason,
- + int starthist)
- +{
- + int cpu = raw_smp_processor_id();
- + int time_set = 0;
- +
- + if (starthist) {
- + cycle_t uninitialized_var(start);
- +
- + if (!preempt_count() && !irqs_disabled())
- + return;
- +
- +#ifdef CONFIG_INTERRUPT_OFF_HIST
- + if ((reason == IRQS_OFF || reason == TRACE_START) &&
- + !per_cpu(hist_irqsoff_counting, cpu)) {
- + per_cpu(hist_irqsoff_counting, cpu) = 1;
- + start = ftrace_now(cpu);
- + time_set++;
- + per_cpu(hist_irqsoff_start, cpu) = start;
- + }
- +#endif
- +
- +#ifdef CONFIG_PREEMPT_OFF_HIST
- + if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
- + !per_cpu(hist_preemptoff_counting, cpu)) {
- + per_cpu(hist_preemptoff_counting, cpu) = 1;
- + if (!(time_set++))
- + start = ftrace_now(cpu);
- + per_cpu(hist_preemptoff_start, cpu) = start;
- + }
- +#endif
- +
- +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- + if (per_cpu(hist_irqsoff_counting, cpu) &&
- + per_cpu(hist_preemptoff_counting, cpu) &&
- + !per_cpu(hist_preemptirqsoff_counting, cpu)) {
- + per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
- + if (!time_set)
- + start = ftrace_now(cpu);
- + per_cpu(hist_preemptirqsoff_start, cpu) = start;
- + }
- +#endif
- + } else {
- + cycle_t uninitialized_var(stop);
- +
- +#ifdef CONFIG_INTERRUPT_OFF_HIST
- + if ((reason == IRQS_ON || reason == TRACE_STOP) &&
- + per_cpu(hist_irqsoff_counting, cpu)) {
- + cycle_t start = per_cpu(hist_irqsoff_start, cpu);
- +
- + stop = ftrace_now(cpu);
- + time_set++;
- + if (start) {
- + long latency = ((long) (stop - start)) /
- + NSECS_PER_USECS;
- +
- + latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
- + stop, NULL);
- + }
- + per_cpu(hist_irqsoff_counting, cpu) = 0;
- + }
- +#endif
- +
- +#ifdef CONFIG_PREEMPT_OFF_HIST
- + if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
- + per_cpu(hist_preemptoff_counting, cpu)) {
- + cycle_t start = per_cpu(hist_preemptoff_start, cpu);
- +
- + if (!(time_set++))
- + stop = ftrace_now(cpu);
- + if (start) {
- + long latency = ((long) (stop - start)) /
- + NSECS_PER_USECS;
- +
- + latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
- + 0, stop, NULL);
- + }
- + per_cpu(hist_preemptoff_counting, cpu) = 0;
- + }
- +#endif
- +
- +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- + if ((!per_cpu(hist_irqsoff_counting, cpu) ||
- + !per_cpu(hist_preemptoff_counting, cpu)) &&
- + per_cpu(hist_preemptirqsoff_counting, cpu)) {
- + cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
- +
- + if (!time_set)
- + stop = ftrace_now(cpu);
- + if (start) {
- + long latency = ((long) (stop - start)) /
- + NSECS_PER_USECS;
- +
- + latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
- + latency, 0, stop, NULL);
- + }
- + per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
- + }
- +#endif
- + }
- +}
- +#endif
- +
- +#ifdef CONFIG_WAKEUP_LATENCY_HIST
- +static DEFINE_RAW_SPINLOCK(wakeup_lock);
- +static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
- + int cpu)
- +{
- + int old_cpu = task_cpu(task);
- +
- + if (cpu != old_cpu) {
- + unsigned long flags;
- + struct task_struct *cpu_wakeup_task;
- +
- + raw_spin_lock_irqsave(&wakeup_lock, flags);
- +
- + cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
- + if (task == cpu_wakeup_task) {
- + put_task_struct(cpu_wakeup_task);
- + per_cpu(wakeup_task, old_cpu) = NULL;
- + cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
- + get_task_struct(cpu_wakeup_task);
- + }
- +
- + raw_spin_unlock_irqrestore(&wakeup_lock, flags);
- + }
- +}
- +
- +static notrace void probe_wakeup_latency_hist_start(void *v,
- + struct task_struct *p)
- +{
- + unsigned long flags;
- + struct task_struct *curr = current;
- + int cpu = task_cpu(p);
- + struct task_struct *cpu_wakeup_task;
- +
- + raw_spin_lock_irqsave(&wakeup_lock, flags);
- +
- + cpu_wakeup_task = per_cpu(wakeup_task, cpu);
- +
- + if (wakeup_pid) {
- + if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
- + p->prio == curr->prio)
- + per_cpu(wakeup_sharedprio, cpu) = 1;
- + if (likely(wakeup_pid != task_pid_nr(p)))
- + goto out;
- + } else {
- + if (likely(!rt_task(p)) ||
- + (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
- + p->prio > curr->prio)
- + goto out;
- + if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
- + p->prio == curr->prio)
- + per_cpu(wakeup_sharedprio, cpu) = 1;
- + }
- +
- + if (cpu_wakeup_task)
- + put_task_struct(cpu_wakeup_task);
- + cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
- + get_task_struct(cpu_wakeup_task);
- + cpu_wakeup_task->preempt_timestamp_hist =
- + ftrace_now(raw_smp_processor_id());
- +out:
- + raw_spin_unlock_irqrestore(&wakeup_lock, flags);
- +}
- +
- +static notrace void probe_wakeup_latency_hist_stop(void *v,
- + bool preempt, struct task_struct *prev, struct task_struct *next)
- +{
- + unsigned long flags;
- + int cpu = task_cpu(next);
- + long latency;
- + cycle_t stop;
- + struct task_struct *cpu_wakeup_task;
- +
- + raw_spin_lock_irqsave(&wakeup_lock, flags);
- +
- + cpu_wakeup_task = per_cpu(wakeup_task, cpu);
- +
- + if (cpu_wakeup_task == NULL)
- + goto out;
- +
- + /* Already running? */
- + if (unlikely(current == cpu_wakeup_task))
- + goto out_reset;
- +
- + if (next != cpu_wakeup_task) {
- + if (next->prio < cpu_wakeup_task->prio)
- + goto out_reset;
- +
- + if (next->prio == cpu_wakeup_task->prio)
- + per_cpu(wakeup_sharedprio, cpu) = 1;
- +
- + goto out;
- + }
- +
- + if (current->prio == cpu_wakeup_task->prio)
- + per_cpu(wakeup_sharedprio, cpu) = 1;
- +
- + /*
- + * The task we are waiting for is about to be switched to.
- + * Calculate latency and store it in histogram.
- + */
- + stop = ftrace_now(raw_smp_processor_id());
- +
- + latency = ((long) (stop - next->preempt_timestamp_hist)) /
- + NSECS_PER_USECS;
- +
- + if (per_cpu(wakeup_sharedprio, cpu)) {
- + latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
- + next);
- + per_cpu(wakeup_sharedprio, cpu) = 0;
- + } else {
- + latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
- +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- + if (timerandwakeup_enabled_data.enabled) {
- + latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
- + next->timer_offset + latency, next->timer_offset,
- + stop, next);
- + }
- +#endif
- + }
- +
- +out_reset:
- +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- + next->timer_offset = 0;
- +#endif
- + put_task_struct(cpu_wakeup_task);
- + per_cpu(wakeup_task, cpu) = NULL;
- +out:
- + raw_spin_unlock_irqrestore(&wakeup_lock, flags);
- +}
- +#endif
- +
- +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- +static notrace void probe_hrtimer_interrupt(void *v, int cpu,
- + long long latency_ns, struct task_struct *curr,
- + struct task_struct *task)
- +{
- + if (latency_ns <= 0 && task != NULL && rt_task(task) &&
- + (task->prio < curr->prio ||
- + (task->prio == curr->prio &&
- + !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
- + long latency;
- + cycle_t now;
- +
- + if (missed_timer_offsets_pid) {
- + if (likely(missed_timer_offsets_pid !=
- + task_pid_nr(task)))
- + return;
- + }
- +
- + now = ftrace_now(cpu);
- + latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
- + latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
- + task);
- +#ifdef CONFIG_WAKEUP_LATENCY_HIST
- + task->timer_offset = latency;
- +#endif
- + }
- +}
- +#endif
- +
- +static __init int latency_hist_init(void)
- +{
- + struct dentry *latency_hist_root = NULL;
- + struct dentry *dentry;
- +#ifdef CONFIG_WAKEUP_LATENCY_HIST
- + struct dentry *dentry_sharedprio;
- +#endif
- + struct dentry *entry;
- + struct dentry *enable_root;
- + int i = 0;
- + struct hist_data *my_hist;
- + char name[64];
- + char *cpufmt = "CPU%d";
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- + char *cpufmt_maxlatproc = "max_latency-CPU%d";
- + struct maxlatproc_data *mp = NULL;
- +#endif
- +
- + dentry = tracing_init_dentry();
- + latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
- + enable_root = debugfs_create_dir("enable", latency_hist_root);
- +
- +#ifdef CONFIG_INTERRUPT_OFF_HIST
- + dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
- + for_each_possible_cpu(i) {
- + sprintf(name, cpufmt, i);
- + entry = debugfs_create_file(name, 0444, dentry,
- + &per_cpu(irqsoff_hist, i), &latency_hist_fops);
- + my_hist = &per_cpu(irqsoff_hist, i);
- + atomic_set(&my_hist->hist_mode, 1);
- + my_hist->min_lat = LONG_MAX;
- + }
- + entry = debugfs_create_file("reset", 0644, dentry,
- + (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
- +#endif
- +
- +#ifdef CONFIG_PREEMPT_OFF_HIST
- + dentry = debugfs_create_dir(preemptoff_hist_dir,
- + latency_hist_root);
- + for_each_possible_cpu(i) {
- + sprintf(name, cpufmt, i);
- + entry = debugfs_create_file(name, 0444, dentry,
- + &per_cpu(preemptoff_hist, i), &latency_hist_fops);
- + my_hist = &per_cpu(preemptoff_hist, i);
- + atomic_set(&my_hist->hist_mode, 1);
- + my_hist->min_lat = LONG_MAX;
- + }
- + entry = debugfs_create_file("reset", 0644, dentry,
- + (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
- +#endif
- +
- +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
- + dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
- + latency_hist_root);
- + for_each_possible_cpu(i) {
- + sprintf(name, cpufmt, i);
- + entry = debugfs_create_file(name, 0444, dentry,
- + &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
- + my_hist = &per_cpu(preemptirqsoff_hist, i);
- + atomic_set(&my_hist->hist_mode, 1);
- + my_hist->min_lat = LONG_MAX;
- + }
- + entry = debugfs_create_file("reset", 0644, dentry,
- + (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
- +#endif
- +
- +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
- + entry = debugfs_create_file("preemptirqsoff", 0644,
- + enable_root, (void *)&preemptirqsoff_enabled_data,
- + &enable_fops);
- +#endif
- +
- +#ifdef CONFIG_WAKEUP_LATENCY_HIST
- + dentry = debugfs_create_dir(wakeup_latency_hist_dir,
- + latency_hist_root);
- + dentry_sharedprio = debugfs_create_dir(
- + wakeup_latency_hist_dir_sharedprio, dentry);
- + for_each_possible_cpu(i) {
- + sprintf(name, cpufmt, i);
- +
- + entry = debugfs_create_file(name, 0444, dentry,
- + &per_cpu(wakeup_latency_hist, i),
- + &latency_hist_fops);
- + my_hist = &per_cpu(wakeup_latency_hist, i);
- + atomic_set(&my_hist->hist_mode, 1);
- + my_hist->min_lat = LONG_MAX;
- +
- + entry = debugfs_create_file(name, 0444, dentry_sharedprio,
- + &per_cpu(wakeup_latency_hist_sharedprio, i),
- + &latency_hist_fops);
- + my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
- + atomic_set(&my_hist->hist_mode, 1);
- + my_hist->min_lat = LONG_MAX;
- +
- + sprintf(name, cpufmt_maxlatproc, i);
- +
- + mp = &per_cpu(wakeup_maxlatproc, i);
- + entry = debugfs_create_file(name, 0444, dentry, mp,
- + &maxlatproc_fops);
- + clear_maxlatprocdata(mp);
- +
- + mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
- + entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
- + &maxlatproc_fops);
- + clear_maxlatprocdata(mp);
- + }
- + entry = debugfs_create_file("pid", 0644, dentry,
- + (void *)&wakeup_pid, &pid_fops);
- + entry = debugfs_create_file("reset", 0644, dentry,
- + (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
- + entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
- + (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
- + entry = debugfs_create_file("wakeup", 0644,
- + enable_root, (void *)&wakeup_latency_enabled_data,
- + &enable_fops);
- +#endif
- +
- +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- + dentry = debugfs_create_dir(missed_timer_offsets_dir,
- + latency_hist_root);
- + for_each_possible_cpu(i) {
- + sprintf(name, cpufmt, i);
- + entry = debugfs_create_file(name, 0444, dentry,
- + &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
- + my_hist = &per_cpu(missed_timer_offsets, i);
- + atomic_set(&my_hist->hist_mode, 1);
- + my_hist->min_lat = LONG_MAX;
- +
- + sprintf(name, cpufmt_maxlatproc, i);
- + mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
- + entry = debugfs_create_file(name, 0444, dentry, mp,
- + &maxlatproc_fops);
- + clear_maxlatprocdata(mp);
- + }
- + entry = debugfs_create_file("pid", 0644, dentry,
- + (void *)&missed_timer_offsets_pid, &pid_fops);
- + entry = debugfs_create_file("reset", 0644, dentry,
- + (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
- + entry = debugfs_create_file("missed_timer_offsets", 0644,
- + enable_root, (void *)&missed_timer_offsets_enabled_data,
- + &enable_fops);
- +#endif
- +
- +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
- + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
- + dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
- + latency_hist_root);
- + for_each_possible_cpu(i) {
- + sprintf(name, cpufmt, i);
- + entry = debugfs_create_file(name, 0444, dentry,
- + &per_cpu(timerandwakeup_latency_hist, i),
- + &latency_hist_fops);
- + my_hist = &per_cpu(timerandwakeup_latency_hist, i);
- + atomic_set(&my_hist->hist_mode, 1);
- + my_hist->min_lat = LONG_MAX;
- +
- + sprintf(name, cpufmt_maxlatproc, i);
- + mp = &per_cpu(timerandwakeup_maxlatproc, i);
- + entry = debugfs_create_file(name, 0444, dentry, mp,
- + &maxlatproc_fops);
- + clear_maxlatprocdata(mp);
- + }
- + entry = debugfs_create_file("reset", 0644, dentry,
- + (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
- + entry = debugfs_create_file("timerandwakeup", 0644,
- + enable_root, (void *)&timerandwakeup_enabled_data,
- + &enable_fops);
- +#endif
- + return 0;
- +}
- +
- +device_initcall(latency_hist_init);
- diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
- index 15b02645ce8b..00d9ebcf42e2 100644
- --- a/kernel/trace/trace.c
- +++ b/kernel/trace/trace.c
- @@ -1897,6 +1897,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
- struct task_struct *tsk = current;
-
- entry->preempt_count = pc & 0xff;
- + entry->preempt_lazy_count = preempt_lazy_count();
- entry->pid = (tsk) ? tsk->pid : 0;
- entry->flags =
- #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
- @@ -1907,8 +1908,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
- ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
- ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
- ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
- - (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
- + (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
- + (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) |
- (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
- +
- + entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
- }
- EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-
- @@ -2898,14 +2902,17 @@ get_total_entries(struct trace_buffer *buf,
-
- static void print_lat_help_header(struct seq_file *m)
- {
- - seq_puts(m, "# _------=> CPU# \n"
- - "# / _-----=> irqs-off \n"
- - "# | / _----=> need-resched \n"
- - "# || / _---=> hardirq/softirq \n"
- - "# ||| / _--=> preempt-depth \n"
- - "# |||| / delay \n"
- - "# cmd pid ||||| time | caller \n"
- - "# \\ / ||||| \\ | / \n");
- + seq_puts(m, "# _--------=> CPU# \n"
- + "# / _-------=> irqs-off \n"
- + "# | / _------=> need-resched \n"
- + "# || / _-----=> need-resched_lazy \n"
- + "# ||| / _----=> hardirq/softirq \n"
- + "# |||| / _---=> preempt-depth \n"
- + "# ||||| / _--=> preempt-lazy-depth\n"
- + "# |||||| / _-=> migrate-disable \n"
- + "# ||||||| / delay \n"
- + "# cmd pid |||||||| time | caller \n"
- + "# \\ / |||||||| \\ | / \n");
- }
-
- static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
- @@ -2931,11 +2938,14 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
- print_event_info(buf, m);
- seq_puts(m, "# _-----=> irqs-off\n"
- "# / _----=> need-resched\n"
- - "# | / _---=> hardirq/softirq\n"
- - "# || / _--=> preempt-depth\n"
- - "# ||| / delay\n"
- - "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
- - "# | | | |||| | |\n");
- + "# |/ _-----=> need-resched_lazy\n"
- + "# || / _---=> hardirq/softirq\n"
- + "# ||| / _--=> preempt-depth\n"
- + "# |||| / _-=> preempt-lazy-depth\n"
- + "# ||||| / _-=> migrate-disable \n"
- + "# |||||| / delay\n"
- + "# TASK-PID CPU# ||||||| TIMESTAMP FUNCTION\n"
- + "# | | | ||||||| | |\n");
- }
-
- void
- diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
- index b0d8576c27ae..702b9376b278 100644
- --- a/kernel/trace/trace.h
- +++ b/kernel/trace/trace.h
- @@ -124,6 +124,7 @@ struct kretprobe_trace_entry_head {
- * NEED_RESCHED - reschedule is requested
- * HARDIRQ - inside an interrupt handler
- * SOFTIRQ - inside a softirq handler
- + * NEED_RESCHED_LAZY - lazy reschedule is requested
- */
- enum trace_flag_type {
- TRACE_FLAG_IRQS_OFF = 0x01,
- @@ -133,6 +134,7 @@ enum trace_flag_type {
- TRACE_FLAG_SOFTIRQ = 0x10,
- TRACE_FLAG_PREEMPT_RESCHED = 0x20,
- TRACE_FLAG_NMI = 0x40,
- + TRACE_FLAG_NEED_RESCHED_LAZY = 0x80,
- };
-
- #define TRACE_BUF_SIZE 1024
- diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
- index 03c0a48c3ac4..0b85d516b491 100644
- --- a/kernel/trace/trace_events.c
- +++ b/kernel/trace/trace_events.c
- @@ -187,6 +187,8 @@ static int trace_define_common_fields(void)
- __common_field(unsigned char, flags);
- __common_field(unsigned char, preempt_count);
- __common_field(int, pid);
- + __common_field(unsigned short, migrate_disable);
- + __common_field(unsigned short, padding);
-
- return ret;
- }
- diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
- index 03cdff84d026..940bd10b4406 100644
- --- a/kernel/trace/trace_irqsoff.c
- +++ b/kernel/trace/trace_irqsoff.c
- @@ -13,6 +13,7 @@
- #include <linux/uaccess.h>
- #include <linux/module.h>
- #include <linux/ftrace.h>
- +#include <trace/events/hist.h>
-
- #include "trace.h"
-
- @@ -424,11 +425,13 @@ void start_critical_timings(void)
- {
- if (preempt_trace() || irq_trace())
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- + trace_preemptirqsoff_hist_rcuidle(TRACE_START, 1);
- }
- EXPORT_SYMBOL_GPL(start_critical_timings);
-
- void stop_critical_timings(void)
- {
- + trace_preemptirqsoff_hist_rcuidle(TRACE_STOP, 0);
- if (preempt_trace() || irq_trace())
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- }
- @@ -438,6 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
- #ifdef CONFIG_PROVE_LOCKING
- void time_hardirqs_on(unsigned long a0, unsigned long a1)
- {
- + trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0);
- if (!preempt_trace() && irq_trace())
- stop_critical_timing(a0, a1);
- }
- @@ -446,6 +450,7 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
- {
- if (!preempt_trace() && irq_trace())
- start_critical_timing(a0, a1);
- + trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1);
- }
-
- #else /* !CONFIG_PROVE_LOCKING */
- @@ -471,6 +476,7 @@ inline void print_irqtrace_events(struct task_struct *curr)
- */
- void trace_hardirqs_on(void)
- {
- + trace_preemptirqsoff_hist(IRQS_ON, 0);
- if (!preempt_trace() && irq_trace())
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- }
- @@ -480,11 +486,13 @@ void trace_hardirqs_off(void)
- {
- if (!preempt_trace() && irq_trace())
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- + trace_preemptirqsoff_hist(IRQS_OFF, 1);
- }
- EXPORT_SYMBOL(trace_hardirqs_off);
-
- __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
- {
- + trace_preemptirqsoff_hist(IRQS_ON, 0);
- if (!preempt_trace() && irq_trace())
- stop_critical_timing(CALLER_ADDR0, caller_addr);
- }
- @@ -494,6 +502,7 @@ __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
- {
- if (!preempt_trace() && irq_trace())
- start_critical_timing(CALLER_ADDR0, caller_addr);
- + trace_preemptirqsoff_hist(IRQS_OFF, 1);
- }
- EXPORT_SYMBOL(trace_hardirqs_off_caller);
-
- @@ -503,12 +512,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
- #ifdef CONFIG_PREEMPT_TRACER
- void trace_preempt_on(unsigned long a0, unsigned long a1)
- {
- + trace_preemptirqsoff_hist(PREEMPT_ON, 0);
- if (preempt_trace() && !irq_trace())
- stop_critical_timing(a0, a1);
- }
-
- void trace_preempt_off(unsigned long a0, unsigned long a1)
- {
- + trace_preemptirqsoff_hist(PREEMPT_ON, 1);
- if (preempt_trace() && !irq_trace())
- start_critical_timing(a0, a1);
- }
- diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
- index 3fc20422c166..65a6dde71a7d 100644
- --- a/kernel/trace/trace_output.c
- +++ b/kernel/trace/trace_output.c
- @@ -386,6 +386,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
- {
- char hardsoft_irq;
- char need_resched;
- + char need_resched_lazy;
- char irqs_off;
- int hardirq;
- int softirq;
- @@ -416,6 +417,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
- break;
- }
-
- + need_resched_lazy =
- + (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
- +
- hardsoft_irq =
- (nmi && hardirq) ? 'Z' :
- nmi ? 'z' :
- @@ -424,14 +428,25 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
- softirq ? 's' :
- '.' ;
-
- - trace_seq_printf(s, "%c%c%c",
- - irqs_off, need_resched, hardsoft_irq);
- + trace_seq_printf(s, "%c%c%c%c",
- + irqs_off, need_resched, need_resched_lazy,
- + hardsoft_irq);
-
- if (entry->preempt_count)
- trace_seq_printf(s, "%x", entry->preempt_count);
- else
- trace_seq_putc(s, '.');
-
- + if (entry->preempt_lazy_count)
- + trace_seq_printf(s, "%x", entry->preempt_lazy_count);
- + else
- + trace_seq_putc(s, '.');
- +
- + if (entry->migrate_disable)
- + trace_seq_printf(s, "%x", entry->migrate_disable);
- + else
- + trace_seq_putc(s, '.');
- +
- return !trace_seq_has_overflowed(s);
- }
-
- diff --git a/kernel/user.c b/kernel/user.c
- index b069ccbfb0b0..1a2e88e98b5e 100644
- --- a/kernel/user.c
- +++ b/kernel/user.c
- @@ -161,11 +161,11 @@ void free_uid(struct user_struct *up)
- if (!up)
- return;
-
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
- free_user(up, flags);
- else
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- }
-
- struct user_struct *alloc_uid(kuid_t uid)
- diff --git a/kernel/watchdog.c b/kernel/watchdog.c
- index 63177be0159e..59fe007ad496 100644
- --- a/kernel/watchdog.c
- +++ b/kernel/watchdog.c
- @@ -381,6 +381,7 @@ static void watchdog_enable(unsigned int cpu)
- /* kick off the timer for the hardlockup detector */
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer->function = watchdog_timer_fn;
- + hrtimer->irqsafe = 1;
-
- /* Enable the perf event */
- watchdog_nmi_enable(cpu);
- diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
- index 12b8dd640786..4c90d2ee7433 100644
- --- a/kernel/watchdog_hld.c
- +++ b/kernel/watchdog_hld.c
- @@ -19,6 +19,7 @@
- static DEFINE_PER_CPU(bool, hard_watchdog_warn);
- static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
- static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
- +static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
-
- /* boot commands */
- /*
- @@ -104,6 +105,13 @@ static void watchdog_overflow_callback(struct perf_event *event,
- /* only print hardlockups once */
- if (__this_cpu_read(hard_watchdog_warn) == true)
- return;
- + /*
- + * If early-printk is enabled then make sure we do not
- + * lock up in printk() and kill console logging:
- + */
- + printk_kill();
- +
- + raw_spin_lock(&watchdog_output_lock);
-
- pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
- print_modules();
- @@ -121,6 +129,7 @@ static void watchdog_overflow_callback(struct perf_event *event,
- !test_and_set_bit(0, &hardlockup_allcpu_dumped))
- trigger_allbutself_cpu_backtrace();
-
- + raw_spin_unlock(&watchdog_output_lock);
- if (hardlockup_panic)
- nmi_panic(regs, "Hard LOCKUP");
-
- diff --git a/kernel/workqueue.c b/kernel/workqueue.c
- index 181c2ad0cb54..7eed129f114a 100644
- --- a/kernel/workqueue.c
- +++ b/kernel/workqueue.c
- @@ -48,6 +48,8 @@
- #include <linux/nodemask.h>
- #include <linux/moduleparam.h>
- #include <linux/uaccess.h>
- +#include <linux/locallock.h>
- +#include <linux/delay.h>
-
- #include "workqueue_internal.h"
-
- @@ -122,11 +124,16 @@ enum {
- * cpu or grabbing pool->lock is enough for read access. If
- * POOL_DISASSOCIATED is set, it's identical to L.
- *
- + * On RT we need the extra protection via rt_lock_idle_list() for
- + * the list manipulations against read access from
- + * wq_worker_sleeping(). All other places are nicely serialized via
- + * pool->lock.
- + *
- * A: pool->attach_mutex protected.
- *
- * PL: wq_pool_mutex protected.
- *
- - * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
- + * PR: wq_pool_mutex protected for writes. RCU protected for reads.
- *
- * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
- *
- @@ -135,7 +142,7 @@ enum {
- *
- * WQ: wq->mutex protected.
- *
- - * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
- + * WR: wq->mutex protected for writes. RCU protected for reads.
- *
- * MD: wq_mayday_lock protected.
- */
- @@ -185,7 +192,7 @@ struct worker_pool {
- atomic_t nr_running ____cacheline_aligned_in_smp;
-
- /*
- - * Destruction of pool is sched-RCU protected to allow dereferences
- + * Destruction of pool is RCU protected to allow dereferences
- * from get_work_pool().
- */
- struct rcu_head rcu;
- @@ -214,7 +221,7 @@ struct pool_workqueue {
- /*
- * Release of unbound pwq is punted to system_wq. See put_pwq()
- * and pwq_unbound_release_workfn() for details. pool_workqueue
- - * itself is also sched-RCU protected so that the first pwq can be
- + * itself is also RCU protected so that the first pwq can be
- * determined without grabbing wq->mutex.
- */
- struct work_struct unbound_release_work;
- @@ -349,6 +356,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
- struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
- EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
-
- +static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
- +
- static int worker_thread(void *__worker);
- static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
-
- @@ -356,20 +365,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
- #include <trace/events/workqueue.h>
-
- #define assert_rcu_or_pool_mutex() \
- - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
- + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
- !lockdep_is_held(&wq_pool_mutex), \
- - "sched RCU or wq_pool_mutex should be held")
- + "RCU or wq_pool_mutex should be held")
-
- #define assert_rcu_or_wq_mutex(wq) \
- - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
- + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
- !lockdep_is_held(&wq->mutex), \
- - "sched RCU or wq->mutex should be held")
- + "RCU or wq->mutex should be held")
-
- #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
- - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
- + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
- !lockdep_is_held(&wq->mutex) && \
- !lockdep_is_held(&wq_pool_mutex), \
- - "sched RCU, wq->mutex or wq_pool_mutex should be held")
- + "RCU, wq->mutex or wq_pool_mutex should be held")
-
- #define for_each_cpu_worker_pool(pool, cpu) \
- for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
- @@ -381,7 +390,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
- * @pool: iteration cursor
- * @pi: integer used for iteration
- *
- - * This must be called either with wq_pool_mutex held or sched RCU read
- + * This must be called either with wq_pool_mutex held or RCU read
- * locked. If the pool needs to be used beyond the locking in effect, the
- * caller is responsible for guaranteeing that the pool stays online.
- *
- @@ -413,7 +422,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
- * @pwq: iteration cursor
- * @wq: the target workqueue
- *
- - * This must be called either with wq->mutex held or sched RCU read locked.
- + * This must be called either with wq->mutex held or RCU read locked.
- * If the pwq needs to be used beyond the locking in effect, the caller is
- * responsible for guaranteeing that the pwq stays online.
- *
- @@ -425,6 +434,31 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
- if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
- else
-
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +static inline void rt_lock_idle_list(struct worker_pool *pool)
- +{
- + preempt_disable();
- +}
- +static inline void rt_unlock_idle_list(struct worker_pool *pool)
- +{
- + preempt_enable();
- +}
- +static inline void sched_lock_idle_list(struct worker_pool *pool) { }
- +static inline void sched_unlock_idle_list(struct worker_pool *pool) { }
- +#else
- +static inline void rt_lock_idle_list(struct worker_pool *pool) { }
- +static inline void rt_unlock_idle_list(struct worker_pool *pool) { }
- +static inline void sched_lock_idle_list(struct worker_pool *pool)
- +{
- + spin_lock_irq(&pool->lock);
- +}
- +static inline void sched_unlock_idle_list(struct worker_pool *pool)
- +{
- + spin_unlock_irq(&pool->lock);
- +}
- +#endif
- +
- +
- #ifdef CONFIG_DEBUG_OBJECTS_WORK
-
- static struct debug_obj_descr work_debug_descr;
- @@ -549,7 +583,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
- * @wq: the target workqueue
- * @node: the node ID
- *
- - * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
- + * This must be called with any of wq_pool_mutex, wq->mutex or RCU
- * read locked.
- * If the pwq needs to be used beyond the locking in effect, the caller is
- * responsible for guaranteeing that the pwq stays online.
- @@ -693,8 +727,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
- * @work: the work item of interest
- *
- * Pools are created and destroyed under wq_pool_mutex, and allows read
- - * access under sched-RCU read lock. As such, this function should be
- - * called under wq_pool_mutex or with preemption disabled.
- + * access under RCU read lock. As such, this function should be
- + * called under wq_pool_mutex or inside of a rcu_read_lock() region.
- *
- * All fields of the returned pool are accessible as long as the above
- * mentioned locking is in effect. If the returned pool needs to be used
- @@ -831,50 +865,45 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
- */
- static void wake_up_worker(struct worker_pool *pool)
- {
- - struct worker *worker = first_idle_worker(pool);
- + struct worker *worker;
- +
- + rt_lock_idle_list(pool);
- +
- + worker = first_idle_worker(pool);
-
- if (likely(worker))
- wake_up_process(worker->task);
- +
- + rt_unlock_idle_list(pool);
- }
-
- /**
- - * wq_worker_waking_up - a worker is waking up
- + * wq_worker_running - a worker is running again
- * @task: task waking up
- - * @cpu: CPU @task is waking up to
- *
- - * This function is called during try_to_wake_up() when a worker is
- - * being awoken.
- - *
- - * CONTEXT:
- - * spin_lock_irq(rq->lock)
- + * This function is called when a worker returns from schedule()
- */
- -void wq_worker_waking_up(struct task_struct *task, int cpu)
- +void wq_worker_running(struct task_struct *task)
- {
- struct worker *worker = kthread_data(task);
-
- - if (!(worker->flags & WORKER_NOT_RUNNING)) {
- - WARN_ON_ONCE(worker->pool->cpu != cpu);
- + if (!worker->sleeping)
- + return;
- + if (!(worker->flags & WORKER_NOT_RUNNING))
- atomic_inc(&worker->pool->nr_running);
- - }
- + worker->sleeping = 0;
- }
-
- /**
- * wq_worker_sleeping - a worker is going to sleep
- * @task: task going to sleep
- *
- - * This function is called during schedule() when a busy worker is
- - * going to sleep. Worker on the same cpu can be woken up by
- - * returning pointer to its task.
- - *
- - * CONTEXT:
- - * spin_lock_irq(rq->lock)
- - *
- - * Return:
- - * Worker task on @cpu to wake up, %NULL if none.
- + * This function is called from schedule() when a busy worker is
- + * going to sleep.
- */
- -struct task_struct *wq_worker_sleeping(struct task_struct *task)
- +void wq_worker_sleeping(struct task_struct *task)
- {
- - struct worker *worker = kthread_data(task), *to_wakeup = NULL;
- + struct worker *worker = kthread_data(task);
- struct worker_pool *pool;
-
- /*
- @@ -883,29 +912,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
- * checking NOT_RUNNING.
- */
- if (worker->flags & WORKER_NOT_RUNNING)
- - return NULL;
- + return;
-
- pool = worker->pool;
-
- - /* this can only happen on the local cpu */
- - if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
- - return NULL;
- + if (WARN_ON_ONCE(worker->sleeping))
- + return;
- +
- + worker->sleeping = 1;
-
- /*
- * The counterpart of the following dec_and_test, implied mb,
- * worklist not empty test sequence is in insert_work().
- * Please read comment there.
- - *
- - * NOT_RUNNING is clear. This means that we're bound to and
- - * running on the local cpu w/ rq lock held and preemption
- - * disabled, which in turn means that none else could be
- - * manipulating idle_list, so dereferencing idle_list without pool
- - * lock is safe.
- */
- if (atomic_dec_and_test(&pool->nr_running) &&
- - !list_empty(&pool->worklist))
- - to_wakeup = first_idle_worker(pool);
- - return to_wakeup ? to_wakeup->task : NULL;
- + !list_empty(&pool->worklist)) {
- + sched_lock_idle_list(pool);
- + wake_up_worker(pool);
- + sched_unlock_idle_list(pool);
- + }
- }
-
- /**
- @@ -1099,12 +1125,14 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
- {
- if (pwq) {
- /*
- - * As both pwqs and pools are sched-RCU protected, the
- + * As both pwqs and pools are RCU protected, the
- * following lock operations are safe.
- */
- - spin_lock_irq(&pwq->pool->lock);
- + rcu_read_lock();
- + local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
- put_pwq(pwq);
- - spin_unlock_irq(&pwq->pool->lock);
- + local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
- + rcu_read_unlock();
- }
- }
-
- @@ -1208,7 +1236,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
- struct worker_pool *pool;
- struct pool_workqueue *pwq;
-
- - local_irq_save(*flags);
- + local_lock_irqsave(pendingb_lock, *flags);
-
- /* try to steal the timer if it exists */
- if (is_dwork) {
- @@ -1227,6 +1255,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
- return 0;
-
- + rcu_read_lock();
- /*
- * The queueing is in progress, or it is already queued. Try to
- * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
- @@ -1265,14 +1294,16 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
- set_work_pool_and_keep_pending(work, pool->id);
-
- spin_unlock(&pool->lock);
- + rcu_read_unlock();
- return 1;
- }
- spin_unlock(&pool->lock);
- fail:
- - local_irq_restore(*flags);
- + rcu_read_unlock();
- + local_unlock_irqrestore(pendingb_lock, *flags);
- if (work_is_canceling(work))
- return -ENOENT;
- - cpu_relax();
- + cpu_chill();
- return -EAGAIN;
- }
-
- @@ -1374,7 +1405,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
- * queued or lose PENDING. Grabbing PENDING and queueing should
- * happen with IRQ disabled.
- */
- - WARN_ON_ONCE(!irqs_disabled());
- + WARN_ON_ONCE_NONRT(!irqs_disabled());
-
- debug_work_activate(work);
-
- @@ -1382,6 +1413,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
- if (unlikely(wq->flags & __WQ_DRAINING) &&
- WARN_ON_ONCE(!is_chained_work(wq)))
- return;
- + rcu_read_lock();
- retry:
- if (req_cpu == WORK_CPU_UNBOUND)
- cpu = wq_select_unbound_cpu(raw_smp_processor_id());
- @@ -1438,10 +1470,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
- /* pwq determined, queue */
- trace_workqueue_queue_work(req_cpu, pwq, work);
-
- - if (WARN_ON(!list_empty(&work->entry))) {
- - spin_unlock(&pwq->pool->lock);
- - return;
- - }
- + if (WARN_ON(!list_empty(&work->entry)))
- + goto out;
-
- pwq->nr_in_flight[pwq->work_color]++;
- work_flags = work_color_to_flags(pwq->work_color);
- @@ -1459,7 +1489,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
-
- insert_work(pwq, work, worklist, work_flags);
-
- +out:
- spin_unlock(&pwq->pool->lock);
- + rcu_read_unlock();
- }
-
- /**
- @@ -1479,14 +1511,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
- bool ret = false;
- unsigned long flags;
-
- - local_irq_save(flags);
- + local_lock_irqsave(pendingb_lock,flags);
-
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
- __queue_work(cpu, wq, work);
- ret = true;
- }
-
- - local_irq_restore(flags);
- + local_unlock_irqrestore(pendingb_lock, flags);
- return ret;
- }
- EXPORT_SYMBOL(queue_work_on);
- @@ -1554,14 +1586,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
- unsigned long flags;
-
- /* read the comment in __queue_work() */
- - local_irq_save(flags);
- + local_lock_irqsave(pendingb_lock, flags);
-
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
- __queue_delayed_work(cpu, wq, dwork, delay);
- ret = true;
- }
-
- - local_irq_restore(flags);
- + local_unlock_irqrestore(pendingb_lock, flags);
- return ret;
- }
- EXPORT_SYMBOL(queue_delayed_work_on);
- @@ -1596,7 +1628,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
-
- if (likely(ret >= 0)) {
- __queue_delayed_work(cpu, wq, dwork, delay);
- - local_irq_restore(flags);
- + local_unlock_irqrestore(pendingb_lock, flags);
- }
-
- /* -ENOENT from try_to_grab_pending() becomes %true */
- @@ -1629,7 +1661,9 @@ static void worker_enter_idle(struct worker *worker)
- worker->last_active = jiffies;
-
- /* idle_list is LIFO */
- + rt_lock_idle_list(pool);
- list_add(&worker->entry, &pool->idle_list);
- + rt_unlock_idle_list(pool);
-
- if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
- mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
- @@ -1662,7 +1696,9 @@ static void worker_leave_idle(struct worker *worker)
- return;
- worker_clr_flags(worker, WORKER_IDLE);
- pool->nr_idle--;
- + rt_lock_idle_list(pool);
- list_del_init(&worker->entry);
- + rt_unlock_idle_list(pool);
- }
-
- static struct worker *alloc_worker(int node)
- @@ -1828,7 +1864,9 @@ static void destroy_worker(struct worker *worker)
- pool->nr_workers--;
- pool->nr_idle--;
-
- + rt_lock_idle_list(pool);
- list_del_init(&worker->entry);
- + rt_unlock_idle_list(pool);
- worker->flags |= WORKER_DIE;
- wake_up_process(worker->task);
- }
- @@ -2780,14 +2818,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
-
- might_sleep();
-
- - local_irq_disable();
- + rcu_read_lock();
- pool = get_work_pool(work);
- if (!pool) {
- - local_irq_enable();
- + rcu_read_unlock();
- return false;
- }
-
- - spin_lock(&pool->lock);
- + spin_lock_irq(&pool->lock);
- /* see the comment in try_to_grab_pending() with the same code */
- pwq = get_work_pwq(work);
- if (pwq) {
- @@ -2816,10 +2854,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
- else
- lock_map_acquire_read(&pwq->wq->lockdep_map);
- lock_map_release(&pwq->wq->lockdep_map);
- -
- + rcu_read_unlock();
- return true;
- already_gone:
- spin_unlock_irq(&pool->lock);
- + rcu_read_unlock();
- return false;
- }
-
- @@ -2906,7 +2945,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
-
- /* tell other tasks trying to grab @work to back off */
- mark_work_canceling(work);
- - local_irq_restore(flags);
- + local_unlock_irqrestore(pendingb_lock, flags);
-
- flush_work(work);
- clear_work_data(work);
- @@ -2961,10 +3000,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
- */
- bool flush_delayed_work(struct delayed_work *dwork)
- {
- - local_irq_disable();
- + local_lock_irq(pendingb_lock);
- if (del_timer_sync(&dwork->timer))
- __queue_work(dwork->cpu, dwork->wq, &dwork->work);
- - local_irq_enable();
- + local_unlock_irq(pendingb_lock);
- return flush_work(&dwork->work);
- }
- EXPORT_SYMBOL(flush_delayed_work);
- @@ -2982,7 +3021,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
- return false;
-
- set_work_pool_and_clear_pending(work, get_work_pool_id(work));
- - local_irq_restore(flags);
- + local_unlock_irqrestore(pendingb_lock, flags);
- return ret;
- }
-
- @@ -3239,7 +3278,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
- * put_unbound_pool - put a worker_pool
- * @pool: worker_pool to put
- *
- - * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
- + * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
- * safe manner. get_unbound_pool() calls this function on its failure path
- * and this function should be able to release pools which went through,
- * successfully or not, init_worker_pool().
- @@ -3293,8 +3332,8 @@ static void put_unbound_pool(struct worker_pool *pool)
- del_timer_sync(&pool->idle_timer);
- del_timer_sync(&pool->mayday_timer);
-
- - /* sched-RCU protected to allow dereferences from get_work_pool() */
- - call_rcu_sched(&pool->rcu, rcu_free_pool);
- + /* RCU protected to allow dereferences from get_work_pool() */
- + call_rcu(&pool->rcu, rcu_free_pool);
- }
-
- /**
- @@ -3401,14 +3440,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
- put_unbound_pool(pool);
- mutex_unlock(&wq_pool_mutex);
-
- - call_rcu_sched(&pwq->rcu, rcu_free_pwq);
- + call_rcu(&pwq->rcu, rcu_free_pwq);
-
- /*
- * If we're the last pwq going away, @wq is already dead and no one
- * is gonna access it anymore. Schedule RCU free.
- */
- if (is_last)
- - call_rcu_sched(&wq->rcu, rcu_free_wq);
- + call_rcu(&wq->rcu, rcu_free_wq);
- }
-
- /**
- @@ -4072,7 +4111,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
- * The base ref is never dropped on per-cpu pwqs. Directly
- * schedule RCU free.
- */
- - call_rcu_sched(&wq->rcu, rcu_free_wq);
- + call_rcu(&wq->rcu, rcu_free_wq);
- } else {
- /*
- * We're the sole accessor of @wq at this point. Directly
- @@ -4166,7 +4205,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
- struct pool_workqueue *pwq;
- bool ret;
-
- - rcu_read_lock_sched();
- + rcu_read_lock();
- + preempt_disable();
-
- if (cpu == WORK_CPU_UNBOUND)
- cpu = smp_processor_id();
- @@ -4177,7 +4217,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
- pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
-
- ret = !list_empty(&pwq->delayed_works);
- - rcu_read_unlock_sched();
- + preempt_enable();
- + rcu_read_unlock();
-
- return ret;
- }
- @@ -4203,15 +4244,15 @@ unsigned int work_busy(struct work_struct *work)
- if (work_pending(work))
- ret |= WORK_BUSY_PENDING;
-
- - local_irq_save(flags);
- + rcu_read_lock();
- pool = get_work_pool(work);
- if (pool) {
- - spin_lock(&pool->lock);
- + spin_lock_irqsave(&pool->lock, flags);
- if (find_worker_executing_work(pool, work))
- ret |= WORK_BUSY_RUNNING;
- - spin_unlock(&pool->lock);
- + spin_unlock_irqrestore(&pool->lock, flags);
- }
- - local_irq_restore(flags);
- + rcu_read_unlock();
-
- return ret;
- }
- @@ -4400,7 +4441,7 @@ void show_workqueue_state(void)
- unsigned long flags;
- int pi;
-
- - rcu_read_lock_sched();
- + rcu_read_lock();
-
- pr_info("Showing busy workqueues and worker pools:\n");
-
- @@ -4453,7 +4494,7 @@ void show_workqueue_state(void)
- spin_unlock_irqrestore(&pool->lock, flags);
- }
-
- - rcu_read_unlock_sched();
- + rcu_read_unlock();
- }
-
- /*
- @@ -4791,16 +4832,16 @@ bool freeze_workqueues_busy(void)
- * nr_active is monotonically decreasing. It's safe
- * to peek without lock.
- */
- - rcu_read_lock_sched();
- + rcu_read_lock();
- for_each_pwq(pwq, wq) {
- WARN_ON_ONCE(pwq->nr_active < 0);
- if (pwq->nr_active) {
- busy = true;
- - rcu_read_unlock_sched();
- + rcu_read_unlock();
- goto out_unlock;
- }
- }
- - rcu_read_unlock_sched();
- + rcu_read_unlock();
- }
- out_unlock:
- mutex_unlock(&wq_pool_mutex);
- @@ -4990,7 +5031,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
- const char *delim = "";
- int node, written = 0;
-
- - rcu_read_lock_sched();
- + get_online_cpus();
- + rcu_read_lock();
- for_each_node(node) {
- written += scnprintf(buf + written, PAGE_SIZE - written,
- "%s%d:%d", delim, node,
- @@ -4998,7 +5040,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
- delim = " ";
- }
- written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
- - rcu_read_unlock_sched();
- + rcu_read_unlock();
- + put_online_cpus();
-
- return written;
- }
- diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
- index 29fa81f0f51a..42d1e3974554 100644
- --- a/kernel/workqueue_internal.h
- +++ b/kernel/workqueue_internal.h
- @@ -44,6 +44,7 @@ struct worker {
- unsigned long last_active; /* L: last active timestamp */
- unsigned int flags; /* X: flags */
- int id; /* I: worker id */
- + int sleeping; /* None */
-
- /*
- * Opaque string set with work_set_desc(). Printed out with task
- @@ -69,7 +70,7 @@ static inline struct worker *current_wq_worker(void)
- * Scheduler hooks for concurrency managed workqueue. Only to be used from
- * sched/core.c and workqueue.c.
- */
- -void wq_worker_waking_up(struct task_struct *task, int cpu);
- -struct task_struct *wq_worker_sleeping(struct task_struct *task);
- +void wq_worker_running(struct task_struct *task);
- +void wq_worker_sleeping(struct task_struct *task);
-
- #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
- diff --git a/lib/Kconfig b/lib/Kconfig
- index 260a80e313b9..b06becb3f477 100644
- --- a/lib/Kconfig
- +++ b/lib/Kconfig
- @@ -400,6 +400,7 @@ config CHECK_SIGNATURE
-
- config CPUMASK_OFFSTACK
- bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
- + depends on !PREEMPT_RT_FULL
- help
- Use dynamic allocation for cpumask_var_t, instead of putting
- them on the stack. This is a bit more expensive, but avoids
- diff --git a/lib/debugobjects.c b/lib/debugobjects.c
- index 056052dc8e91..d8494e126de8 100644
- --- a/lib/debugobjects.c
- +++ b/lib/debugobjects.c
- @@ -308,7 +308,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
- struct debug_obj *obj;
- unsigned long flags;
-
- - fill_pool();
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + if (preempt_count() == 0 && !irqs_disabled())
- +#endif
- + fill_pool();
-
- db = get_bucket((unsigned long) addr);
-
- diff --git a/lib/idr.c b/lib/idr.c
- index 6098336df267..9decbe914595 100644
- --- a/lib/idr.c
- +++ b/lib/idr.c
- @@ -30,6 +30,7 @@
- #include <linux/idr.h>
- #include <linux/spinlock.h>
- #include <linux/percpu.h>
- +#include <linux/locallock.h>
-
- #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
- #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
- @@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
- static DEFINE_PER_CPU(int, idr_preload_cnt);
- static DEFINE_SPINLOCK(simple_ida_lock);
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
- +
- +static inline void idr_preload_lock(void)
- +{
- + local_lock(idr_lock);
- +}
- +
- +static inline void idr_preload_unlock(void)
- +{
- + local_unlock(idr_lock);
- +}
- +
- +void idr_preload_end(void)
- +{
- + idr_preload_unlock();
- +}
- +EXPORT_SYMBOL(idr_preload_end);
- +#else
- +static inline void idr_preload_lock(void)
- +{
- + preempt_disable();
- +}
- +
- +static inline void idr_preload_unlock(void)
- +{
- + preempt_enable();
- +}
- +#endif
- +
- +
- /* the maximum ID which can be allocated given idr->layers */
- static int idr_max(int layers)
- {
- @@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
- * context. See idr_preload() for details.
- */
- if (!in_interrupt()) {
- - preempt_disable();
- + idr_preload_lock();
- new = __this_cpu_read(idr_preload_head);
- if (new) {
- __this_cpu_write(idr_preload_head, new->ary[0]);
- __this_cpu_dec(idr_preload_cnt);
- new->ary[0] = NULL;
- }
- - preempt_enable();
- + idr_preload_unlock();
- if (new)
- return new;
- }
- @@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
- idr_mark_full(pa, id);
- }
-
- -
- /**
- * idr_preload - preload for idr_alloc()
- * @gfp_mask: allocation mask to use for preloading
- @@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask)
- WARN_ON_ONCE(in_interrupt());
- might_sleep_if(gfpflags_allow_blocking(gfp_mask));
-
- - preempt_disable();
- + idr_preload_lock();
-
- /*
- * idr_alloc() is likely to succeed w/o full idr_layer buffer and
- @@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask)
- while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
- struct idr_layer *new;
-
- - preempt_enable();
- + idr_preload_unlock();
- new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
- - preempt_disable();
- + idr_preload_lock();
- if (!new)
- break;
-
- diff --git a/lib/irq_poll.c b/lib/irq_poll.c
- index 1d6565e81030..b23a79761df7 100644
- --- a/lib/irq_poll.c
- +++ b/lib/irq_poll.c
- @@ -36,6 +36,7 @@ void irq_poll_sched(struct irq_poll *iop)
- list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
- __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
- local_irq_restore(flags);
- + preempt_check_resched_rt();
- }
- EXPORT_SYMBOL(irq_poll_sched);
-
- @@ -71,6 +72,7 @@ void irq_poll_complete(struct irq_poll *iop)
- local_irq_save(flags);
- __irq_poll_complete(iop);
- local_irq_restore(flags);
- + preempt_check_resched_rt();
- }
- EXPORT_SYMBOL(irq_poll_complete);
-
- @@ -95,6 +97,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
- }
-
- local_irq_enable();
- + preempt_check_resched_rt();
-
- /* Even though interrupts have been re-enabled, this
- * access is safe because interrupts can only add new
- @@ -132,6 +135,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
- __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
-
- local_irq_enable();
- + preempt_check_resched_rt();
- }
-
- /**
- @@ -195,6 +199,7 @@ static int irq_poll_cpu_dead(unsigned int cpu)
- this_cpu_ptr(&blk_cpu_iopoll));
- __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
- local_irq_enable();
- + preempt_check_resched_rt();
-
- return 0;
- }
- diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
- index f3a217ea0388..4611b156ef79 100644
- --- a/lib/locking-selftest.c
- +++ b/lib/locking-selftest.c
- @@ -590,6 +590,8 @@ GENERATE_TESTCASE(init_held_rsem)
- #include "locking-selftest-spin-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +
- #include "locking-selftest-rlock-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
-
- @@ -605,9 +607,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
-
- +#endif
- +
- #undef E1
- #undef E2
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * Enabling hardirqs with a softirq-safe lock held:
- */
- @@ -640,6 +645,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
- #undef E1
- #undef E2
-
- +#endif
- +
- /*
- * Enabling irqs with an irq-safe lock held:
- */
- @@ -663,6 +670,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
- #include "locking-selftest-spin-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +
- #include "locking-selftest-rlock-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
-
- @@ -678,6 +687,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
-
- +#endif
- +
- #undef E1
- #undef E2
-
- @@ -709,6 +720,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
- #include "locking-selftest-spin-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +
- #include "locking-selftest-rlock-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
-
- @@ -724,6 +737,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
-
- +#endif
- +
- #undef E1
- #undef E2
- #undef E3
- @@ -757,6 +772,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
- #include "locking-selftest-spin-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +
- #include "locking-selftest-rlock-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
-
- @@ -772,10 +789,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
-
- +#endif
- +
- #undef E1
- #undef E2
- #undef E3
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +
- /*
- * read-lock / write-lock irq inversion.
- *
- @@ -838,6 +859,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
- #undef E2
- #undef E3
-
- +#endif
- +
- +#ifndef CONFIG_PREEMPT_RT_FULL
- +
- /*
- * read-lock / write-lock recursion that is actually safe.
- */
- @@ -876,6 +901,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
- #undef E2
- #undef E3
-
- +#endif
- +
- /*
- * read-lock / write-lock recursion that is unsafe.
- */
- @@ -1858,6 +1885,7 @@ void locking_selftest(void)
-
- printk(" --------------------------------------------------------------------------\n");
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * irq-context testcases:
- */
- @@ -1870,6 +1898,28 @@ void locking_selftest(void)
-
- DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
- // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
- +#else
- + /* On -rt, we only do hardirq context test for raw spinlock */
- + DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
- + DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
- +
- + DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
- + DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
- +
- + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
- + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
- + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
- + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
- + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
- + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
- +
- + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
- + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
- + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
- + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
- + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
- + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
- +#endif
-
- ww_tests();
-
- diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
- index 6d40944960de..822a2c027e72 100644
- --- a/lib/percpu_ida.c
- +++ b/lib/percpu_ida.c
- @@ -26,6 +26,9 @@
- #include <linux/string.h>
- #include <linux/spinlock.h>
- #include <linux/percpu_ida.h>
- +#include <linux/locallock.h>
- +
- +static DEFINE_LOCAL_IRQ_LOCK(irq_off_lock);
-
- struct percpu_ida_cpu {
- /*
- @@ -148,13 +151,13 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
- unsigned long flags;
- int tag;
-
- - local_irq_save(flags);
- + local_lock_irqsave(irq_off_lock, flags);
- tags = this_cpu_ptr(pool->tag_cpu);
-
- /* Fastpath */
- tag = alloc_local_tag(tags);
- if (likely(tag >= 0)) {
- - local_irq_restore(flags);
- + local_unlock_irqrestore(irq_off_lock, flags);
- return tag;
- }
-
- @@ -173,6 +176,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
-
- if (!tags->nr_free)
- alloc_global_tags(pool, tags);
- +
- if (!tags->nr_free)
- steal_tags(pool, tags);
-
- @@ -184,7 +188,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
- }
-
- spin_unlock(&pool->lock);
- - local_irq_restore(flags);
- + local_unlock_irqrestore(irq_off_lock, flags);
-
- if (tag >= 0 || state == TASK_RUNNING)
- break;
- @@ -196,7 +200,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
-
- schedule();
-
- - local_irq_save(flags);
- + local_lock_irqsave(irq_off_lock, flags);
- tags = this_cpu_ptr(pool->tag_cpu);
- }
- if (state != TASK_RUNNING)
- @@ -221,7 +225,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
-
- BUG_ON(tag >= pool->nr_tags);
-
- - local_irq_save(flags);
- + local_lock_irqsave(irq_off_lock, flags);
- tags = this_cpu_ptr(pool->tag_cpu);
-
- spin_lock(&tags->lock);
- @@ -253,7 +257,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
- spin_unlock(&pool->lock);
- }
-
- - local_irq_restore(flags);
- + local_unlock_irqrestore(irq_off_lock, flags);
- }
- EXPORT_SYMBOL_GPL(percpu_ida_free);
-
- @@ -345,7 +349,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
- struct percpu_ida_cpu *remote;
- unsigned cpu, i, err = 0;
-
- - local_irq_save(flags);
- + local_lock_irqsave(irq_off_lock, flags);
- for_each_possible_cpu(cpu) {
- remote = per_cpu_ptr(pool->tag_cpu, cpu);
- spin_lock(&remote->lock);
- @@ -367,7 +371,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
- }
- spin_unlock(&pool->lock);
- out:
- - local_irq_restore(flags);
- + local_unlock_irqrestore(irq_off_lock, flags);
- return err;
- }
- EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
- diff --git a/lib/radix-tree.c b/lib/radix-tree.c
- index 8e6d552c40dd..741da5a77fd5 100644
- --- a/lib/radix-tree.c
- +++ b/lib/radix-tree.c
- @@ -36,7 +36,7 @@
- #include <linux/bitops.h>
- #include <linux/rcupdate.h>
- #include <linux/preempt.h> /* in_interrupt() */
- -
- +#include <linux/locallock.h>
-
- /* Number of nodes in fully populated tree of given height */
- static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
- @@ -68,6 +68,7 @@ struct radix_tree_preload {
- struct radix_tree_node *nodes;
- };
- static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
- +static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock);
-
- static inline void *node_to_entry(void *ptr)
- {
- @@ -290,13 +291,14 @@ radix_tree_node_alloc(struct radix_tree_root *root)
- * succeed in getting a node here (and never reach
- * kmem_cache_alloc)
- */
- - rtp = this_cpu_ptr(&radix_tree_preloads);
- + rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
- if (rtp->nr) {
- ret = rtp->nodes;
- rtp->nodes = ret->private_data;
- ret->private_data = NULL;
- rtp->nr--;
- }
- + put_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
- /*
- * Update the allocation stack trace as this is more useful
- * for debugging.
- @@ -357,14 +359,14 @@ static int __radix_tree_preload(gfp_t gfp_mask, int nr)
- */
- gfp_mask &= ~__GFP_ACCOUNT;
-
- - preempt_disable();
- + local_lock(radix_tree_preloads_lock);
- rtp = this_cpu_ptr(&radix_tree_preloads);
- while (rtp->nr < nr) {
- - preempt_enable();
- + local_unlock(radix_tree_preloads_lock);
- node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
- if (node == NULL)
- goto out;
- - preempt_disable();
- + local_lock(radix_tree_preloads_lock);
- rtp = this_cpu_ptr(&radix_tree_preloads);
- if (rtp->nr < nr) {
- node->private_data = rtp->nodes;
- @@ -406,7 +408,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
- if (gfpflags_allow_blocking(gfp_mask))
- return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
- /* Preloading doesn't help anything with this gfp mask, skip it */
- - preempt_disable();
- + local_lock(radix_tree_preloads_lock);
- return 0;
- }
- EXPORT_SYMBOL(radix_tree_maybe_preload);
- @@ -422,7 +424,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
-
- /* Preloading doesn't help anything with this gfp mask, skip it */
- if (!gfpflags_allow_blocking(gfp_mask)) {
- - preempt_disable();
- + local_lock(radix_tree_preloads_lock);
- return 0;
- }
-
- @@ -456,6 +458,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
- return __radix_tree_preload(gfp_mask, nr_nodes);
- }
-
- +void radix_tree_preload_end(void)
- +{
- + local_unlock(radix_tree_preloads_lock);
- +}
- +EXPORT_SYMBOL(radix_tree_preload_end);
- +
- /*
- * The maximum index which can be stored in a radix tree
- */
- diff --git a/lib/scatterlist.c b/lib/scatterlist.c
- index 004fc70fc56a..ccc46992a517 100644
- --- a/lib/scatterlist.c
- +++ b/lib/scatterlist.c
- @@ -620,7 +620,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
- flush_kernel_dcache_page(miter->page);
-
- if (miter->__flags & SG_MITER_ATOMIC) {
- - WARN_ON_ONCE(preemptible());
- + WARN_ON_ONCE(!pagefault_disabled());
- kunmap_atomic(miter->addr);
- } else
- kunmap(miter->page);
- @@ -664,7 +664,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
- if (!sg_miter_skip(&miter, skip))
- return false;
-
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
-
- while (sg_miter_next(&miter) && offset < buflen) {
- unsigned int len;
- @@ -681,7 +681,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
-
- sg_miter_stop(&miter);
-
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- return offset;
- }
- EXPORT_SYMBOL(sg_copy_buffer);
- diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
- index 1afec32de6f2..11fa431046a8 100644
- --- a/lib/smp_processor_id.c
- +++ b/lib/smp_processor_id.c
- @@ -39,8 +39,9 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
- if (!printk_ratelimit())
- goto out_enable;
-
- - printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
- - what1, what2, preempt_count() - 1, current->comm, current->pid);
- + printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n",
- + what1, what2, preempt_count() - 1, __migrate_disabled(current),
- + current->comm, current->pid);
-
- print_symbol("caller is %s\n", (long)__builtin_return_address(0));
- dump_stack();
- diff --git a/mm/Kconfig b/mm/Kconfig
- index 86e3e0e74d20..77e5862a1ed2 100644
- --- a/mm/Kconfig
- +++ b/mm/Kconfig
- @@ -410,7 +410,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
-
- config TRANSPARENT_HUGEPAGE
- bool "Transparent Hugepage Support"
- - depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
- + depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
- select COMPACTION
- select RADIX_TREE_MULTIORDER
- help
- diff --git a/mm/backing-dev.c b/mm/backing-dev.c
- index 6ff2d7744223..b5a91dd53b5f 100644
- --- a/mm/backing-dev.c
- +++ b/mm/backing-dev.c
- @@ -457,9 +457,9 @@ void wb_congested_put(struct bdi_writeback_congested *congested)
- {
- unsigned long flags;
-
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- return;
- }
-
- diff --git a/mm/compaction.c b/mm/compaction.c
- index 70e6bec46dc2..6678ed58b7c6 100644
- --- a/mm/compaction.c
- +++ b/mm/compaction.c
- @@ -1593,10 +1593,12 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
- block_start_pfn(cc->migrate_pfn, cc->order);
-
- if (cc->last_migrated_pfn < current_block_start) {
- - cpu = get_cpu();
- + cpu = get_cpu_light();
- + local_lock_irq(swapvec_lock);
- lru_add_drain_cpu(cpu);
- + local_unlock_irq(swapvec_lock);
- drain_local_pages(zone);
- - put_cpu();
- + put_cpu_light();
- /* No more flushing until we migrate again */
- cc->last_migrated_pfn = 0;
- }
- diff --git a/mm/filemap.c b/mm/filemap.c
- index edfb90e3830c..a8d2c7a73d54 100644
- --- a/mm/filemap.c
- +++ b/mm/filemap.c
- @@ -159,9 +159,12 @@ static int page_cache_tree_insert(struct address_space *mapping,
- * node->private_list is protected by
- * mapping->tree_lock.
- */
- - if (!list_empty(&node->private_list))
- - list_lru_del(&workingset_shadow_nodes,
- + if (!list_empty(&node->private_list)) {
- + local_lock(workingset_shadow_lock);
- + list_lru_del(&__workingset_shadow_nodes,
- &node->private_list);
- + local_unlock(workingset_shadow_lock);
- + }
- }
- return 0;
- }
- @@ -217,8 +220,10 @@ static void page_cache_tree_delete(struct address_space *mapping,
- if (!dax_mapping(mapping) && !workingset_node_pages(node) &&
- list_empty(&node->private_list)) {
- node->private_data = mapping;
- - list_lru_add(&workingset_shadow_nodes,
- - &node->private_list);
- + local_lock(workingset_shadow_lock);
- + list_lru_add(&__workingset_shadow_nodes,
- + &node->private_list);
- + local_unlock(workingset_shadow_lock);
- }
- }
-
- diff --git a/mm/highmem.c b/mm/highmem.c
- index 50b4ca6787f0..77518a3b35a1 100644
- --- a/mm/highmem.c
- +++ b/mm/highmem.c
- @@ -29,10 +29,11 @@
- #include <linux/kgdb.h>
- #include <asm/tlbflush.h>
-
- -
- +#ifndef CONFIG_PREEMPT_RT_FULL
- #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
- DEFINE_PER_CPU(int, __kmap_atomic_idx);
- #endif
- +#endif
-
- /*
- * Virtual_count is not a pure "count".
- @@ -107,8 +108,9 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
- unsigned long totalhigh_pages __read_mostly;
- EXPORT_SYMBOL(totalhigh_pages);
-
- -
- +#ifndef CONFIG_PREEMPT_RT_FULL
- EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
- +#endif
-
- unsigned int nr_free_highpages (void)
- {
- diff --git a/mm/memcontrol.c b/mm/memcontrol.c
- index 2a800c4a39bd..c04403033aec 100644
- --- a/mm/memcontrol.c
- +++ b/mm/memcontrol.c
- @@ -67,6 +67,7 @@
- #include <net/sock.h>
- #include <net/ip.h>
- #include "slab.h"
- +#include <linux/locallock.h>
-
- #include <asm/uaccess.h>
-
- @@ -92,6 +93,8 @@ int do_swap_account __read_mostly;
- #define do_swap_account 0
- #endif
-
- +static DEFINE_LOCAL_IRQ_LOCK(event_lock);
- +
- /* Whether legacy memory+swap accounting is active */
- static bool do_memsw_account(void)
- {
- @@ -1795,7 +1798,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
- return;
- /* Notify other cpus that system-wide "drain" is running */
- get_online_cpus();
- - curcpu = get_cpu();
- + curcpu = get_cpu_light();
- for_each_online_cpu(cpu) {
- struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
- struct mem_cgroup *memcg;
- @@ -1812,7 +1815,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
- schedule_work_on(cpu, &stock->work);
- }
- }
- - put_cpu();
- + put_cpu_light();
- put_online_cpus();
- mutex_unlock(&percpu_charge_mutex);
- }
- @@ -4558,12 +4561,12 @@ static int mem_cgroup_move_account(struct page *page,
-
- ret = 0;
-
- - local_irq_disable();
- + local_lock_irq(event_lock);
- mem_cgroup_charge_statistics(to, page, compound, nr_pages);
- memcg_check_events(to, page);
- mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
- memcg_check_events(from, page);
- - local_irq_enable();
- + local_unlock_irq(event_lock);
- out_unlock:
- unlock_page(page);
- out:
- @@ -5438,10 +5441,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
-
- commit_charge(page, memcg, lrucare);
-
- - local_irq_disable();
- + local_lock_irq(event_lock);
- mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
- memcg_check_events(memcg, page);
- - local_irq_enable();
- + local_unlock_irq(event_lock);
-
- if (do_memsw_account() && PageSwapCache(page)) {
- swp_entry_t entry = { .val = page_private(page) };
- @@ -5497,14 +5500,14 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
- memcg_oom_recover(memcg);
- }
-
- - local_irq_save(flags);
- + local_lock_irqsave(event_lock, flags);
- __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
- __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
- __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
- __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
- __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
- memcg_check_events(memcg, dummy_page);
- - local_irq_restore(flags);
- + local_unlock_irqrestore(event_lock, flags);
-
- if (!mem_cgroup_is_root(memcg))
- css_put_many(&memcg->css, nr_pages);
- @@ -5659,10 +5662,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
-
- commit_charge(newpage, memcg, false);
-
- - local_irq_save(flags);
- + local_lock_irqsave(event_lock, flags);
- mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
- memcg_check_events(memcg, newpage);
- - local_irq_restore(flags);
- + local_unlock_irqrestore(event_lock, flags);
- }
-
- DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
- @@ -5853,6 +5856,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
- {
- struct mem_cgroup *memcg, *swap_memcg;
- unsigned short oldid;
- + unsigned long flags;
-
- VM_BUG_ON_PAGE(PageLRU(page), page);
- VM_BUG_ON_PAGE(page_count(page), page);
- @@ -5893,12 +5897,16 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
- * important here to have the interrupts disabled because it is the
- * only synchronisation we have for udpating the per-CPU variables.
- */
- + local_lock_irqsave(event_lock, flags);
- +#ifndef CONFIG_PREEMPT_RT_BASE
- VM_BUG_ON(!irqs_disabled());
- +#endif
- mem_cgroup_charge_statistics(memcg, page, false, -1);
- memcg_check_events(memcg, page);
-
- if (!mem_cgroup_is_root(memcg))
- css_put(&memcg->css);
- + local_unlock_irqrestore(event_lock, flags);
- }
-
- /*
- diff --git a/mm/mmu_context.c b/mm/mmu_context.c
- index 6f4d27c5bb32..5cd25c745a8f 100644
- --- a/mm/mmu_context.c
- +++ b/mm/mmu_context.c
- @@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm)
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- + preempt_disable_rt();
- active_mm = tsk->active_mm;
- if (active_mm != mm) {
- atomic_inc(&mm->mm_count);
- @@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm)
- }
- tsk->mm = mm;
- switch_mm(active_mm, mm, tsk);
- + preempt_enable_rt();
- task_unlock(tsk);
- #ifdef finish_arch_post_lock_switch
- finish_arch_post_lock_switch();
- diff --git a/mm/page_alloc.c b/mm/page_alloc.c
- index fbc38888252b..1cb08e1406ea 100644
- --- a/mm/page_alloc.c
- +++ b/mm/page_alloc.c
- @@ -61,6 +61,7 @@
- #include <linux/page_ext.h>
- #include <linux/hugetlb.h>
- #include <linux/sched/rt.h>
- +#include <linux/locallock.h>
- #include <linux/page_owner.h>
- #include <linux/kthread.h>
- #include <linux/memcontrol.h>
- @@ -281,6 +282,18 @@ EXPORT_SYMBOL(nr_node_ids);
- EXPORT_SYMBOL(nr_online_nodes);
- #endif
-
- +static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
- +
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +# define cpu_lock_irqsave(cpu, flags) \
- + local_lock_irqsave_on(pa_lock, flags, cpu)
- +# define cpu_unlock_irqrestore(cpu, flags) \
- + local_unlock_irqrestore_on(pa_lock, flags, cpu)
- +#else
- +# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
- +# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
- +#endif
- +
- int page_group_by_mobility_disabled __read_mostly;
-
- #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
- @@ -1092,7 +1105,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
- #endif /* CONFIG_DEBUG_VM */
-
- /*
- - * Frees a number of pages from the PCP lists
- + * Frees a number of pages which have been collected from the pcp lists.
- * Assumes all pages on list are in same zone, and of same order.
- * count is the number of pages to free.
- *
- @@ -1103,19 +1116,58 @@ static bool bulkfree_pcp_prepare(struct page *page)
- * pinned" detection logic.
- */
- static void free_pcppages_bulk(struct zone *zone, int count,
- - struct per_cpu_pages *pcp)
- + struct list_head *list)
- {
- - int migratetype = 0;
- - int batch_free = 0;
- unsigned long nr_scanned;
- bool isolated_pageblocks;
- + unsigned long flags;
- +
- + spin_lock_irqsave(&zone->lock, flags);
-
- - spin_lock(&zone->lock);
- isolated_pageblocks = has_isolate_pageblock(zone);
- nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
- if (nr_scanned)
- __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
-
- + while (!list_empty(list)) {
- + struct page *page;
- + int mt; /* migratetype of the to-be-freed page */
- +
- + page = list_first_entry(list, struct page, lru);
- + /* must delete as __free_one_page list manipulates */
- + list_del(&page->lru);
- +
- + mt = get_pcppage_migratetype(page);
- + /* MIGRATE_ISOLATE page should not go to pcplists */
- + VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
- + /* Pageblock could have been isolated meanwhile */
- + if (unlikely(isolated_pageblocks))
- + mt = get_pageblock_migratetype(page);
- +
- + if (bulkfree_pcp_prepare(page))
- + continue;
- +
- + __free_one_page(page, page_to_pfn(page), zone, 0, mt);
- + trace_mm_page_pcpu_drain(page, 0, mt);
- + count--;
- + }
- + WARN_ON(count != 0);
- + spin_unlock_irqrestore(&zone->lock, flags);
- +}
- +
- +/*
- + * Moves a number of pages from the PCP lists to free list which
- + * is freed outside of the locked region.
- + *
- + * Assumes all pages on list are in same zone, and of same order.
- + * count is the number of pages to free.
- + */
- +static void isolate_pcp_pages(int count, struct per_cpu_pages *src,
- + struct list_head *dst)
- +{
- + int migratetype = 0;
- + int batch_free = 0;
- +
- while (count) {
- struct page *page;
- struct list_head *list;
- @@ -1131,7 +1183,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
- batch_free++;
- if (++migratetype == MIGRATE_PCPTYPES)
- migratetype = 0;
- - list = &pcp->lists[migratetype];
- + list = &src->lists[migratetype];
- } while (list_empty(list));
-
- /* This is the only non-empty list. Free them all. */
- @@ -1139,27 +1191,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
- batch_free = count;
-
- do {
- - int mt; /* migratetype of the to-be-freed page */
- -
- page = list_last_entry(list, struct page, lru);
- - /* must delete as __free_one_page list manipulates */
- list_del(&page->lru);
-
- - mt = get_pcppage_migratetype(page);
- - /* MIGRATE_ISOLATE page should not go to pcplists */
- - VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
- - /* Pageblock could have been isolated meanwhile */
- - if (unlikely(isolated_pageblocks))
- - mt = get_pageblock_migratetype(page);
- -
- - if (bulkfree_pcp_prepare(page))
- - continue;
- -
- - __free_one_page(page, page_to_pfn(page), zone, 0, mt);
- - trace_mm_page_pcpu_drain(page, 0, mt);
- + list_add(&page->lru, dst);
- } while (--count && --batch_free && !list_empty(list));
- }
- - spin_unlock(&zone->lock);
- }
-
- static void free_one_page(struct zone *zone,
- @@ -1168,7 +1205,9 @@ static void free_one_page(struct zone *zone,
- int migratetype)
- {
- unsigned long nr_scanned;
- - spin_lock(&zone->lock);
- + unsigned long flags;
- +
- + spin_lock_irqsave(&zone->lock, flags);
- nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
- if (nr_scanned)
- __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
- @@ -1178,7 +1217,7 @@ static void free_one_page(struct zone *zone,
- migratetype = get_pfnblock_migratetype(page, pfn);
- }
- __free_one_page(page, pfn, zone, order, migratetype);
- - spin_unlock(&zone->lock);
- + spin_unlock_irqrestore(&zone->lock, flags);
- }
-
- static void __meminit __init_single_page(struct page *page, unsigned long pfn,
- @@ -1264,10 +1303,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
- return;
-
- migratetype = get_pfnblock_migratetype(page, pfn);
- - local_irq_save(flags);
- + local_lock_irqsave(pa_lock, flags);
- __count_vm_events(PGFREE, 1 << order);
- free_one_page(page_zone(page), page, pfn, order, migratetype);
- - local_irq_restore(flags);
- + local_unlock_irqrestore(pa_lock, flags);
- }
-
- static void __init __free_pages_boot_core(struct page *page, unsigned int order)
- @@ -2282,16 +2321,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
- void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
- {
- unsigned long flags;
- + LIST_HEAD(dst);
- int to_drain, batch;
-
- - local_irq_save(flags);
- + local_lock_irqsave(pa_lock, flags);
- batch = READ_ONCE(pcp->batch);
- to_drain = min(pcp->count, batch);
- if (to_drain > 0) {
- - free_pcppages_bulk(zone, to_drain, pcp);
- + isolate_pcp_pages(to_drain, pcp, &dst);
- pcp->count -= to_drain;
- }
- - local_irq_restore(flags);
- + local_unlock_irqrestore(pa_lock, flags);
- + free_pcppages_bulk(zone, to_drain, &dst);
- }
- #endif
-
- @@ -2307,16 +2348,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
- unsigned long flags;
- struct per_cpu_pageset *pset;
- struct per_cpu_pages *pcp;
- + LIST_HEAD(dst);
- + int count;
-
- - local_irq_save(flags);
- + cpu_lock_irqsave(cpu, flags);
- pset = per_cpu_ptr(zone->pageset, cpu);
-
- pcp = &pset->pcp;
- - if (pcp->count) {
- - free_pcppages_bulk(zone, pcp->count, pcp);
- + count = pcp->count;
- + if (count) {
- + isolate_pcp_pages(count, pcp, &dst);
- pcp->count = 0;
- }
- - local_irq_restore(flags);
- + cpu_unlock_irqrestore(cpu, flags);
- + if (count)
- + free_pcppages_bulk(zone, count, &dst);
- }
-
- /*
- @@ -2402,8 +2448,17 @@ void drain_all_pages(struct zone *zone)
- else
- cpumask_clear_cpu(cpu, &cpus_with_pcps);
- }
- +#ifndef CONFIG_PREEMPT_RT_BASE
- on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
- zone, 1);
- +#else
- + for_each_cpu(cpu, &cpus_with_pcps) {
- + if (zone)
- + drain_pages_zone(cpu, zone);
- + else
- + drain_pages(cpu);
- + }
- +#endif
- }
-
- #ifdef CONFIG_HIBERNATION
- @@ -2463,7 +2518,7 @@ void free_hot_cold_page(struct page *page, bool cold)
-
- migratetype = get_pfnblock_migratetype(page, pfn);
- set_pcppage_migratetype(page, migratetype);
- - local_irq_save(flags);
- + local_lock_irqsave(pa_lock, flags);
- __count_vm_event(PGFREE);
-
- /*
- @@ -2489,12 +2544,17 @@ void free_hot_cold_page(struct page *page, bool cold)
- pcp->count++;
- if (pcp->count >= pcp->high) {
- unsigned long batch = READ_ONCE(pcp->batch);
- - free_pcppages_bulk(zone, batch, pcp);
- + LIST_HEAD(dst);
- +
- + isolate_pcp_pages(batch, pcp, &dst);
- pcp->count -= batch;
- + local_unlock_irqrestore(pa_lock, flags);
- + free_pcppages_bulk(zone, batch, &dst);
- + return;
- }
-
- out:
- - local_irq_restore(flags);
- + local_unlock_irqrestore(pa_lock, flags);
- }
-
- /*
- @@ -2629,7 +2689,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
- struct per_cpu_pages *pcp;
- struct list_head *list;
-
- - local_irq_save(flags);
- + local_lock_irqsave(pa_lock, flags);
- do {
- pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
- @@ -2656,7 +2716,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
- * allocate greater than order-1 page units with __GFP_NOFAIL.
- */
- WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
- - spin_lock_irqsave(&zone->lock, flags);
- + local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
-
- do {
- page = NULL;
- @@ -2668,22 +2728,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
- if (!page)
- page = __rmqueue(zone, order, migratetype);
- } while (page && check_new_pages(page, order));
- - spin_unlock(&zone->lock);
- - if (!page)
- + if (!page) {
- + spin_unlock(&zone->lock);
- goto failed;
- + }
- __mod_zone_freepage_state(zone, -(1 << order),
- get_pcppage_migratetype(page));
- + spin_unlock(&zone->lock);
- }
-
- __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
- zone_statistics(preferred_zone, zone, gfp_flags);
- - local_irq_restore(flags);
- + local_unlock_irqrestore(pa_lock, flags);
-
- VM_BUG_ON_PAGE(bad_range(zone, page), page);
- return page;
-
- failed:
- - local_irq_restore(flags);
- + local_unlock_irqrestore(pa_lock, flags);
- return NULL;
- }
-
- @@ -6561,7 +6623,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
- int cpu = (unsigned long)hcpu;
-
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
- + local_lock_irq_on(swapvec_lock, cpu);
- lru_add_drain_cpu(cpu);
- + local_unlock_irq_on(swapvec_lock, cpu);
- drain_pages(cpu);
-
- /*
- @@ -6587,6 +6651,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
- void __init page_alloc_init(void)
- {
- hotcpu_notifier(page_alloc_cpu_notify, 0);
- + local_irq_lock_init(pa_lock);
- }
-
- /*
- @@ -7422,7 +7487,7 @@ void zone_pcp_reset(struct zone *zone)
- struct per_cpu_pageset *pset;
-
- /* avoid races with drain_pages() */
- - local_irq_save(flags);
- + local_lock_irqsave(pa_lock, flags);
- if (zone->pageset != &boot_pageset) {
- for_each_online_cpu(cpu) {
- pset = per_cpu_ptr(zone->pageset, cpu);
- @@ -7431,7 +7496,7 @@ void zone_pcp_reset(struct zone *zone)
- free_percpu(zone->pageset);
- zone->pageset = &boot_pageset;
- }
- - local_irq_restore(flags);
- + local_unlock_irqrestore(pa_lock, flags);
- }
-
- #ifdef CONFIG_MEMORY_HOTREMOVE
- diff --git a/mm/percpu.c b/mm/percpu.c
- index f014cebbf405..4e739fcf91bf 100644
- --- a/mm/percpu.c
- +++ b/mm/percpu.c
- @@ -1283,18 +1283,7 @@ void free_percpu(void __percpu *ptr)
- }
- EXPORT_SYMBOL_GPL(free_percpu);
-
- -/**
- - * is_kernel_percpu_address - test whether address is from static percpu area
- - * @addr: address to test
- - *
- - * Test whether @addr belongs to in-kernel static percpu area. Module
- - * static percpu areas are not considered. For those, use
- - * is_module_percpu_address().
- - *
- - * RETURNS:
- - * %true if @addr is from in-kernel static percpu area, %false otherwise.
- - */
- -bool is_kernel_percpu_address(unsigned long addr)
- +bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
- {
- #ifdef CONFIG_SMP
- const size_t static_size = __per_cpu_end - __per_cpu_start;
- @@ -1303,15 +1292,38 @@ bool is_kernel_percpu_address(unsigned long addr)
-
- for_each_possible_cpu(cpu) {
- void *start = per_cpu_ptr(base, cpu);
- + void *va = (void *)addr;
-
- - if ((void *)addr >= start && (void *)addr < start + static_size)
- + if (va >= start && va < start + static_size) {
- + if (can_addr) {
- + *can_addr = (unsigned long) (va - start);
- + *can_addr += (unsigned long)
- + per_cpu_ptr(base, get_boot_cpu_id());
- + }
- return true;
- - }
- + }
- + }
- #endif
- /* on UP, can't distinguish from other static vars, always false */
- return false;
- }
-
- +/**
- + * is_kernel_percpu_address - test whether address is from static percpu area
- + * @addr: address to test
- + *
- + * Test whether @addr belongs to in-kernel static percpu area. Module
- + * static percpu areas are not considered. For those, use
- + * is_module_percpu_address().
- + *
- + * RETURNS:
- + * %true if @addr is from in-kernel static percpu area, %false otherwise.
- + */
- +bool is_kernel_percpu_address(unsigned long addr)
- +{
- + return __is_kernel_percpu_address(addr, NULL);
- +}
- +
- /**
- * per_cpu_ptr_to_phys - convert translated percpu address to physical address
- * @addr: the address to be converted to physical address
- diff --git a/mm/slab.h b/mm/slab.h
- index ceb7d70cdb76..dfd281e43fbe 100644
- --- a/mm/slab.h
- +++ b/mm/slab.h
- @@ -426,7 +426,11 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
- * The slab lists for all objects.
- */
- struct kmem_cache_node {
- +#ifdef CONFIG_SLUB
- + raw_spinlock_t list_lock;
- +#else
- spinlock_t list_lock;
- +#endif
-
- #ifdef CONFIG_SLAB
- struct list_head slabs_partial; /* partial list first, better asm code */
- diff --git a/mm/slub.c b/mm/slub.c
- index edc79ca3c6d5..67eb368b9314 100644
- --- a/mm/slub.c
- +++ b/mm/slub.c
- @@ -1144,7 +1144,7 @@ static noinline int free_debug_processing(
- unsigned long uninitialized_var(flags);
- int ret = 0;
-
- - spin_lock_irqsave(&n->list_lock, flags);
- + raw_spin_lock_irqsave(&n->list_lock, flags);
- slab_lock(page);
-
- if (s->flags & SLAB_CONSISTENCY_CHECKS) {
- @@ -1179,7 +1179,7 @@ static noinline int free_debug_processing(
- bulk_cnt, cnt);
-
- slab_unlock(page);
- - spin_unlock_irqrestore(&n->list_lock, flags);
- + raw_spin_unlock_irqrestore(&n->list_lock, flags);
- if (!ret)
- slab_fix(s, "Object at 0x%p not freed", object);
- return ret;
- @@ -1307,6 +1307,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
-
- #endif /* CONFIG_SLUB_DEBUG */
-
- +struct slub_free_list {
- + raw_spinlock_t lock;
- + struct list_head list;
- +};
- +static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
- +
- /*
- * Hooks for other subsystems that check memory allocations. In a typical
- * production configuration these hooks all should produce no code at all.
- @@ -1530,10 +1536,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
- void *start, *p;
- int idx, order;
- bool shuffle;
- + bool enableirqs = false;
-
- flags &= gfp_allowed_mask;
-
- if (gfpflags_allow_blocking(flags))
- + enableirqs = true;
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + if (system_state == SYSTEM_RUNNING)
- + enableirqs = true;
- +#endif
- + if (enableirqs)
- local_irq_enable();
-
- flags |= s->allocflags;
- @@ -1608,7 +1621,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
- page->frozen = 1;
-
- out:
- - if (gfpflags_allow_blocking(flags))
- + if (enableirqs)
- local_irq_disable();
- if (!page)
- return NULL;
- @@ -1667,6 +1680,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
- __free_pages(page, order);
- }
-
- +static void free_delayed(struct list_head *h)
- +{
- + while(!list_empty(h)) {
- + struct page *page = list_first_entry(h, struct page, lru);
- +
- + list_del(&page->lru);
- + __free_slab(page->slab_cache, page);
- + }
- +}
- +
- #define need_reserve_slab_rcu \
- (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
-
- @@ -1698,6 +1721,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
- }
-
- call_rcu(head, rcu_free_slab);
- + } else if (irqs_disabled()) {
- + struct slub_free_list *f = this_cpu_ptr(&slub_free_list);
- +
- + raw_spin_lock(&f->lock);
- + list_add(&page->lru, &f->list);
- + raw_spin_unlock(&f->lock);
- } else
- __free_slab(s, page);
- }
- @@ -1805,7 +1834,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
- if (!n || !n->nr_partial)
- return NULL;
-
- - spin_lock(&n->list_lock);
- + raw_spin_lock(&n->list_lock);
- list_for_each_entry_safe(page, page2, &n->partial, lru) {
- void *t;
-
- @@ -1830,7 +1859,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
- break;
-
- }
- - spin_unlock(&n->list_lock);
- + raw_spin_unlock(&n->list_lock);
- return object;
- }
-
- @@ -2076,7 +2105,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
- * that acquire_slab() will see a slab page that
- * is frozen
- */
- - spin_lock(&n->list_lock);
- + raw_spin_lock(&n->list_lock);
- }
- } else {
- m = M_FULL;
- @@ -2087,7 +2116,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
- * slabs from diagnostic functions will not see
- * any frozen slabs.
- */
- - spin_lock(&n->list_lock);
- + raw_spin_lock(&n->list_lock);
- }
- }
-
- @@ -2122,7 +2151,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
- goto redo;
-
- if (lock)
- - spin_unlock(&n->list_lock);
- + raw_spin_unlock(&n->list_lock);
-
- if (m == M_FREE) {
- stat(s, DEACTIVATE_EMPTY);
- @@ -2154,10 +2183,10 @@ static void unfreeze_partials(struct kmem_cache *s,
- n2 = get_node(s, page_to_nid(page));
- if (n != n2) {
- if (n)
- - spin_unlock(&n->list_lock);
- + raw_spin_unlock(&n->list_lock);
-
- n = n2;
- - spin_lock(&n->list_lock);
- + raw_spin_lock(&n->list_lock);
- }
-
- do {
- @@ -2186,7 +2215,7 @@ static void unfreeze_partials(struct kmem_cache *s,
- }
-
- if (n)
- - spin_unlock(&n->list_lock);
- + raw_spin_unlock(&n->list_lock);
-
- while (discard_page) {
- page = discard_page;
- @@ -2225,14 +2254,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
- pobjects = oldpage->pobjects;
- pages = oldpage->pages;
- if (drain && pobjects > s->cpu_partial) {
- + struct slub_free_list *f;
- unsigned long flags;
- + LIST_HEAD(tofree);
- /*
- * partial array is full. Move the existing
- * set to the per node partial list.
- */
- local_irq_save(flags);
- unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
- + f = this_cpu_ptr(&slub_free_list);
- + raw_spin_lock(&f->lock);
- + list_splice_init(&f->list, &tofree);
- + raw_spin_unlock(&f->lock);
- local_irq_restore(flags);
- + free_delayed(&tofree);
- oldpage = NULL;
- pobjects = 0;
- pages = 0;
- @@ -2304,7 +2340,22 @@ static bool has_cpu_slab(int cpu, void *info)
-
- static void flush_all(struct kmem_cache *s)
- {
- + LIST_HEAD(tofree);
- + int cpu;
- +
- on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
- + for_each_online_cpu(cpu) {
- + struct slub_free_list *f;
- +
- + if (!has_cpu_slab(cpu, s))
- + continue;
- +
- + f = &per_cpu(slub_free_list, cpu);
- + raw_spin_lock_irq(&f->lock);
- + list_splice_init(&f->list, &tofree);
- + raw_spin_unlock_irq(&f->lock);
- + free_delayed(&tofree);
- + }
- }
-
- /*
- @@ -2359,10 +2410,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
- unsigned long x = 0;
- struct page *page;
-
- - spin_lock_irqsave(&n->list_lock, flags);
- + raw_spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
- x += get_count(page);
- - spin_unlock_irqrestore(&n->list_lock, flags);
- + raw_spin_unlock_irqrestore(&n->list_lock, flags);
- return x;
- }
- #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
- @@ -2500,8 +2551,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
- * already disabled (which is the case for bulk allocation).
- */
- static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
- - unsigned long addr, struct kmem_cache_cpu *c)
- + unsigned long addr, struct kmem_cache_cpu *c,
- + struct list_head *to_free)
- {
- + struct slub_free_list *f;
- void *freelist;
- struct page *page;
-
- @@ -2561,6 +2614,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
- VM_BUG_ON(!c->page->frozen);
- c->freelist = get_freepointer(s, freelist);
- c->tid = next_tid(c->tid);
- +
- +out:
- + f = this_cpu_ptr(&slub_free_list);
- + raw_spin_lock(&f->lock);
- + list_splice_init(&f->list, to_free);
- + raw_spin_unlock(&f->lock);
- +
- return freelist;
-
- new_slab:
- @@ -2592,7 +2652,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
- deactivate_slab(s, page, get_freepointer(s, freelist));
- c->page = NULL;
- c->freelist = NULL;
- - return freelist;
- + goto out;
- }
-
- /*
- @@ -2604,6 +2664,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
- {
- void *p;
- unsigned long flags;
- + LIST_HEAD(tofree);
-
- local_irq_save(flags);
- #ifdef CONFIG_PREEMPT
- @@ -2615,8 +2676,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
- c = this_cpu_ptr(s->cpu_slab);
- #endif
-
- - p = ___slab_alloc(s, gfpflags, node, addr, c);
- + p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree);
- local_irq_restore(flags);
- + free_delayed(&tofree);
- return p;
- }
-
- @@ -2802,7 +2864,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
-
- do {
- if (unlikely(n)) {
- - spin_unlock_irqrestore(&n->list_lock, flags);
- + raw_spin_unlock_irqrestore(&n->list_lock, flags);
- n = NULL;
- }
- prior = page->freelist;
- @@ -2834,7 +2896,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
- * Otherwise the list_lock will synchronize with
- * other processors updating the list of slabs.
- */
- - spin_lock_irqsave(&n->list_lock, flags);
- + raw_spin_lock_irqsave(&n->list_lock, flags);
-
- }
- }
- @@ -2876,7 +2938,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
- add_partial(n, page, DEACTIVATE_TO_TAIL);
- stat(s, FREE_ADD_PARTIAL);
- }
- - spin_unlock_irqrestore(&n->list_lock, flags);
- + raw_spin_unlock_irqrestore(&n->list_lock, flags);
- return;
-
- slab_empty:
- @@ -2891,7 +2953,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
- remove_full(s, n, page);
- }
-
- - spin_unlock_irqrestore(&n->list_lock, flags);
- + raw_spin_unlock_irqrestore(&n->list_lock, flags);
- stat(s, FREE_SLAB);
- discard_slab(s, page);
- }
- @@ -3096,6 +3158,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
- void **p)
- {
- struct kmem_cache_cpu *c;
- + LIST_HEAD(to_free);
- int i;
-
- /* memcg and kmem_cache debug support */
- @@ -3119,7 +3182,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
- * of re-populating per CPU c->freelist
- */
- p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
- - _RET_IP_, c);
- + _RET_IP_, c, &to_free);
- if (unlikely(!p[i]))
- goto error;
-
- @@ -3131,6 +3194,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
- }
- c->tid = next_tid(c->tid);
- local_irq_enable();
- + free_delayed(&to_free);
-
- /* Clear memory outside IRQ disabled fastpath loop */
- if (unlikely(flags & __GFP_ZERO)) {
- @@ -3278,7 +3342,7 @@ static void
- init_kmem_cache_node(struct kmem_cache_node *n)
- {
- n->nr_partial = 0;
- - spin_lock_init(&n->list_lock);
- + raw_spin_lock_init(&n->list_lock);
- INIT_LIST_HEAD(&n->partial);
- #ifdef CONFIG_SLUB_DEBUG
- atomic_long_set(&n->nr_slabs, 0);
- @@ -3622,6 +3686,10 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
- const char *text)
- {
- #ifdef CONFIG_SLUB_DEBUG
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + /* XXX move out of irq-off section */
- + slab_err(s, page, text, s->name);
- +#else
- void *addr = page_address(page);
- void *p;
- unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
- @@ -3642,6 +3710,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
- slab_unlock(page);
- kfree(map);
- #endif
- +#endif
- }
-
- /*
- @@ -3655,7 +3724,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
- struct page *page, *h;
-
- BUG_ON(irqs_disabled());
- - spin_lock_irq(&n->list_lock);
- + raw_spin_lock_irq(&n->list_lock);
- list_for_each_entry_safe(page, h, &n->partial, lru) {
- if (!page->inuse) {
- remove_partial(n, page);
- @@ -3665,7 +3734,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
- "Objects remaining in %s on __kmem_cache_shutdown()");
- }
- }
- - spin_unlock_irq(&n->list_lock);
- + raw_spin_unlock_irq(&n->list_lock);
-
- list_for_each_entry_safe(page, h, &discard, lru)
- discard_slab(s, page);
- @@ -3908,7 +3977,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
- for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
- INIT_LIST_HEAD(promote + i);
-
- - spin_lock_irqsave(&n->list_lock, flags);
- + raw_spin_lock_irqsave(&n->list_lock, flags);
-
- /*
- * Build lists of slabs to discard or promote.
- @@ -3939,7 +4008,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
- for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
- list_splice(promote + i, &n->partial);
-
- - spin_unlock_irqrestore(&n->list_lock, flags);
- + raw_spin_unlock_irqrestore(&n->list_lock, flags);
-
- /* Release empty slabs */
- list_for_each_entry_safe(page, t, &discard, lru)
- @@ -4115,6 +4184,12 @@ void __init kmem_cache_init(void)
- {
- static __initdata struct kmem_cache boot_kmem_cache,
- boot_kmem_cache_node;
- + int cpu;
- +
- + for_each_possible_cpu(cpu) {
- + raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
- + INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
- + }
-
- if (debug_guardpage_minorder())
- slub_max_order = 0;
- @@ -4323,7 +4398,7 @@ static int validate_slab_node(struct kmem_cache *s,
- struct page *page;
- unsigned long flags;
-
- - spin_lock_irqsave(&n->list_lock, flags);
- + raw_spin_lock_irqsave(&n->list_lock, flags);
-
- list_for_each_entry(page, &n->partial, lru) {
- validate_slab_slab(s, page, map);
- @@ -4345,7 +4420,7 @@ static int validate_slab_node(struct kmem_cache *s,
- s->name, count, atomic_long_read(&n->nr_slabs));
-
- out:
- - spin_unlock_irqrestore(&n->list_lock, flags);
- + raw_spin_unlock_irqrestore(&n->list_lock, flags);
- return count;
- }
-
- @@ -4533,12 +4608,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
- if (!atomic_long_read(&n->nr_slabs))
- continue;
-
- - spin_lock_irqsave(&n->list_lock, flags);
- + raw_spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
- process_slab(&t, s, page, alloc, map);
- list_for_each_entry(page, &n->full, lru)
- process_slab(&t, s, page, alloc, map);
- - spin_unlock_irqrestore(&n->list_lock, flags);
- + raw_spin_unlock_irqrestore(&n->list_lock, flags);
- }
-
- for (i = 0; i < t.count; i++) {
- diff --git a/mm/swap.c b/mm/swap.c
- index 4dcf852e1e6d..69c3a5b24060 100644
- --- a/mm/swap.c
- +++ b/mm/swap.c
- @@ -32,6 +32,7 @@
- #include <linux/memcontrol.h>
- #include <linux/gfp.h>
- #include <linux/uio.h>
- +#include <linux/locallock.h>
- #include <linux/hugetlb.h>
- #include <linux/page_idle.h>
-
- @@ -50,6 +51,8 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
- #ifdef CONFIG_SMP
- static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
- #endif
- +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
- +DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
-
- /*
- * This path almost never happens for VM activity - pages are normally
- @@ -240,11 +243,11 @@ void rotate_reclaimable_page(struct page *page)
- unsigned long flags;
-
- get_page(page);
- - local_irq_save(flags);
- + local_lock_irqsave(rotate_lock, flags);
- pvec = this_cpu_ptr(&lru_rotate_pvecs);
- if (!pagevec_add(pvec, page) || PageCompound(page))
- pagevec_move_tail(pvec);
- - local_irq_restore(flags);
- + local_unlock_irqrestore(rotate_lock, flags);
- }
- }
-
- @@ -294,12 +297,13 @@ void activate_page(struct page *page)
- {
- page = compound_head(page);
- if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
- + struct pagevec *pvec = &get_locked_var(swapvec_lock,
- + activate_page_pvecs);
-
- get_page(page);
- if (!pagevec_add(pvec, page) || PageCompound(page))
- pagevec_lru_move_fn(pvec, __activate_page, NULL);
- - put_cpu_var(activate_page_pvecs);
- + put_locked_var(swapvec_lock, activate_page_pvecs);
- }
- }
-
- @@ -326,7 +330,7 @@ void activate_page(struct page *page)
-
- static void __lru_cache_activate_page(struct page *page)
- {
- - struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
- + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
- int i;
-
- /*
- @@ -348,7 +352,7 @@ static void __lru_cache_activate_page(struct page *page)
- }
- }
-
- - put_cpu_var(lru_add_pvec);
- + put_locked_var(swapvec_lock, lru_add_pvec);
- }
-
- /*
- @@ -390,12 +394,12 @@ EXPORT_SYMBOL(mark_page_accessed);
-
- static void __lru_cache_add(struct page *page)
- {
- - struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
- + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
-
- get_page(page);
- if (!pagevec_add(pvec, page) || PageCompound(page))
- __pagevec_lru_add(pvec);
- - put_cpu_var(lru_add_pvec);
- + put_locked_var(swapvec_lock, lru_add_pvec);
- }
-
- /**
- @@ -593,9 +597,15 @@ void lru_add_drain_cpu(int cpu)
- unsigned long flags;
-
- /* No harm done if a racing interrupt already did this */
- - local_irq_save(flags);
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + local_lock_irqsave_on(rotate_lock, flags, cpu);
- pagevec_move_tail(pvec);
- - local_irq_restore(flags);
- + local_unlock_irqrestore_on(rotate_lock, flags, cpu);
- +#else
- + local_lock_irqsave(rotate_lock, flags);
- + pagevec_move_tail(pvec);
- + local_unlock_irqrestore(rotate_lock, flags);
- +#endif
- }
-
- pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
- @@ -627,11 +637,12 @@ void deactivate_file_page(struct page *page)
- return;
-
- if (likely(get_page_unless_zero(page))) {
- - struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
- + struct pagevec *pvec = &get_locked_var(swapvec_lock,
- + lru_deactivate_file_pvecs);
-
- if (!pagevec_add(pvec, page) || PageCompound(page))
- pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
- - put_cpu_var(lru_deactivate_file_pvecs);
- + put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
- }
- }
-
- @@ -646,27 +657,31 @@ void deactivate_file_page(struct page *page)
- void deactivate_page(struct page *page)
- {
- if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
- - struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
- + struct pagevec *pvec = &get_locked_var(swapvec_lock,
- + lru_deactivate_pvecs);
-
- get_page(page);
- if (!pagevec_add(pvec, page) || PageCompound(page))
- pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
- - put_cpu_var(lru_deactivate_pvecs);
- + put_locked_var(swapvec_lock, lru_deactivate_pvecs);
- }
- }
-
- void lru_add_drain(void)
- {
- - lru_add_drain_cpu(get_cpu());
- - put_cpu();
- + lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
- + local_unlock_cpu(swapvec_lock);
- }
-
- -static void lru_add_drain_per_cpu(struct work_struct *dummy)
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
- {
- - lru_add_drain();
- + local_lock_on(swapvec_lock, cpu);
- + lru_add_drain_cpu(cpu);
- + local_unlock_on(swapvec_lock, cpu);
- }
-
- -static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
- +#else
-
- /*
- * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
- @@ -686,6 +701,22 @@ static int __init lru_init(void)
- }
- early_initcall(lru_init);
-
- +static void lru_add_drain_per_cpu(struct work_struct *dummy)
- +{
- + lru_add_drain();
- +}
- +
- +static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
- +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
- +{
- + struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
- +
- + INIT_WORK(work, lru_add_drain_per_cpu);
- + queue_work_on(cpu, lru_add_drain_wq, work);
- + cpumask_set_cpu(cpu, has_work);
- +}
- +#endif
- +
- void lru_add_drain_all(void)
- {
- static DEFINE_MUTEX(lock);
- @@ -697,21 +728,18 @@ void lru_add_drain_all(void)
- cpumask_clear(&has_work);
-
- for_each_online_cpu(cpu) {
- - struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
- -
- if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
- pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
- pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
- pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
- - need_activate_page_drain(cpu)) {
- - INIT_WORK(work, lru_add_drain_per_cpu);
- - queue_work_on(cpu, lru_add_drain_wq, work);
- - cpumask_set_cpu(cpu, &has_work);
- - }
- + need_activate_page_drain(cpu))
- + remote_lru_add_drain(cpu, &has_work);
- }
-
- +#ifndef CONFIG_PREEMPT_RT_BASE
- for_each_cpu(cpu, &has_work)
- flush_work(&per_cpu(lru_add_drain_work, cpu));
- +#endif
-
- put_online_cpus();
- mutex_unlock(&lock);
- diff --git a/mm/truncate.c b/mm/truncate.c
- index 9c809e7d73c3..b7681e888ba0 100644
- --- a/mm/truncate.c
- +++ b/mm/truncate.c
- @@ -62,9 +62,12 @@ static void clear_exceptional_entry(struct address_space *mapping,
- * protected by mapping->tree_lock.
- */
- if (!workingset_node_shadows(node) &&
- - !list_empty(&node->private_list))
- - list_lru_del(&workingset_shadow_nodes,
- + !list_empty(&node->private_list)) {
- + local_lock(workingset_shadow_lock);
- + list_lru_del(&__workingset_shadow_nodes,
- &node->private_list);
- + local_unlock(workingset_shadow_lock);
- + }
- __radix_tree_delete_node(&mapping->page_tree, node);
- unlock:
- spin_unlock_irq(&mapping->tree_lock);
- diff --git a/mm/vmalloc.c b/mm/vmalloc.c
- index 195de42bea1f..b46cb686fde7 100644
- --- a/mm/vmalloc.c
- +++ b/mm/vmalloc.c
- @@ -855,7 +855,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
- struct vmap_block *vb;
- struct vmap_area *va;
- unsigned long vb_idx;
- - int node, err;
- + int node, err, cpu;
- void *vaddr;
-
- node = numa_node_id();
- @@ -898,11 +898,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
- BUG_ON(err);
- radix_tree_preload_end();
-
- - vbq = &get_cpu_var(vmap_block_queue);
- + cpu = get_cpu_light();
- + vbq = this_cpu_ptr(&vmap_block_queue);
- spin_lock(&vbq->lock);
- list_add_tail_rcu(&vb->free_list, &vbq->free);
- spin_unlock(&vbq->lock);
- - put_cpu_var(vmap_block_queue);
- + put_cpu_light();
-
- return vaddr;
- }
- @@ -971,6 +972,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
- struct vmap_block *vb;
- void *vaddr = NULL;
- unsigned int order;
- + int cpu;
-
- BUG_ON(offset_in_page(size));
- BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
- @@ -985,7 +987,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
- order = get_order(size);
-
- rcu_read_lock();
- - vbq = &get_cpu_var(vmap_block_queue);
- + cpu = get_cpu_light();
- + vbq = this_cpu_ptr(&vmap_block_queue);
- list_for_each_entry_rcu(vb, &vbq->free, free_list) {
- unsigned long pages_off;
-
- @@ -1008,7 +1011,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
- break;
- }
-
- - put_cpu_var(vmap_block_queue);
- + put_cpu_light();
- rcu_read_unlock();
-
- /* Allocate new block if nothing was found */
- diff --git a/mm/vmstat.c b/mm/vmstat.c
- index 6a088df04b29..abda95be88b4 100644
- --- a/mm/vmstat.c
- +++ b/mm/vmstat.c
- @@ -245,6 +245,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
- long x;
- long t;
-
- + preempt_disable_rt();
- x = delta + __this_cpu_read(*p);
-
- t = __this_cpu_read(pcp->stat_threshold);
- @@ -254,6 +255,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
- x = 0;
- }
- __this_cpu_write(*p, x);
- + preempt_enable_rt();
- }
- EXPORT_SYMBOL(__mod_zone_page_state);
-
- @@ -265,6 +267,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
- long x;
- long t;
-
- + preempt_disable_rt();
- x = delta + __this_cpu_read(*p);
-
- t = __this_cpu_read(pcp->stat_threshold);
- @@ -274,6 +277,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
- x = 0;
- }
- __this_cpu_write(*p, x);
- + preempt_enable_rt();
- }
- EXPORT_SYMBOL(__mod_node_page_state);
-
- @@ -306,6 +310,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
- s8 __percpu *p = pcp->vm_stat_diff + item;
- s8 v, t;
-
- + preempt_disable_rt();
- v = __this_cpu_inc_return(*p);
- t = __this_cpu_read(pcp->stat_threshold);
- if (unlikely(v > t)) {
- @@ -314,6 +319,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
- zone_page_state_add(v + overstep, zone, item);
- __this_cpu_write(*p, -overstep);
- }
- + preempt_enable_rt();
- }
-
- void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
- @@ -322,6 +328,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
- s8 v, t;
-
- + preempt_disable_rt();
- v = __this_cpu_inc_return(*p);
- t = __this_cpu_read(pcp->stat_threshold);
- if (unlikely(v > t)) {
- @@ -330,6 +337,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
- node_page_state_add(v + overstep, pgdat, item);
- __this_cpu_write(*p, -overstep);
- }
- + preempt_enable_rt();
- }
-
- void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
- @@ -350,6 +358,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
- s8 __percpu *p = pcp->vm_stat_diff + item;
- s8 v, t;
-
- + preempt_disable_rt();
- v = __this_cpu_dec_return(*p);
- t = __this_cpu_read(pcp->stat_threshold);
- if (unlikely(v < - t)) {
- @@ -358,6 +367,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
- zone_page_state_add(v - overstep, zone, item);
- __this_cpu_write(*p, overstep);
- }
- + preempt_enable_rt();
- }
-
- void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
- @@ -366,6 +376,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
- s8 __percpu *p = pcp->vm_node_stat_diff + item;
- s8 v, t;
-
- + preempt_disable_rt();
- v = __this_cpu_dec_return(*p);
- t = __this_cpu_read(pcp->stat_threshold);
- if (unlikely(v < - t)) {
- @@ -374,6 +385,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
- node_page_state_add(v - overstep, pgdat, item);
- __this_cpu_write(*p, overstep);
- }
- + preempt_enable_rt();
- }
-
- void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
- diff --git a/mm/workingset.c b/mm/workingset.c
- index 4c4f05655e6e..b97b1e87b54c 100644
- --- a/mm/workingset.c
- +++ b/mm/workingset.c
- @@ -334,7 +334,8 @@ void workingset_activation(struct page *page)
- * point where they would still be useful.
- */
-
- -struct list_lru workingset_shadow_nodes;
- +struct list_lru __workingset_shadow_nodes;
- +DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
-
- static unsigned long count_shadow_nodes(struct shrinker *shrinker,
- struct shrink_control *sc)
- @@ -344,9 +345,9 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
- unsigned long pages;
-
- /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
- - local_irq_disable();
- - shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
- - local_irq_enable();
- + local_lock_irq(workingset_shadow_lock);
- + shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc);
- + local_unlock_irq(workingset_shadow_lock);
-
- if (sc->memcg) {
- pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
- @@ -438,9 +439,9 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
- spin_unlock(&mapping->tree_lock);
- ret = LRU_REMOVED_RETRY;
- out:
- - local_irq_enable();
- + local_unlock_irq(workingset_shadow_lock);
- cond_resched();
- - local_irq_disable();
- + local_lock_irq(workingset_shadow_lock);
- spin_lock(lru_lock);
- return ret;
- }
- @@ -451,10 +452,10 @@ static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
- unsigned long ret;
-
- /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
- - local_irq_disable();
- - ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc,
- + local_lock_irq(workingset_shadow_lock);
- + ret = list_lru_shrink_walk(&__workingset_shadow_nodes, sc,
- shadow_lru_isolate, NULL);
- - local_irq_enable();
- + local_unlock_irq(workingset_shadow_lock);
- return ret;
- }
-
- @@ -492,7 +493,7 @@ static int __init workingset_init(void)
- pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
- timestamp_bits, max_order, bucket_order);
-
- - ret = __list_lru_init(&workingset_shadow_nodes, true, &shadow_nodes_key);
- + ret = __list_lru_init(&__workingset_shadow_nodes, true, &shadow_nodes_key);
- if (ret)
- goto err;
- ret = register_shrinker(&workingset_shadow_shrinker);
- @@ -500,7 +501,7 @@ static int __init workingset_init(void)
- goto err_list_lru;
- return 0;
- err_list_lru:
- - list_lru_destroy(&workingset_shadow_nodes);
- + list_lru_destroy(&__workingset_shadow_nodes);
- err:
- return ret;
- }
- diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
- index d3548c48369f..8894f0749d8d 100644
- --- a/mm/zsmalloc.c
- +++ b/mm/zsmalloc.c
- @@ -53,6 +53,7 @@
- #include <linux/mount.h>
- #include <linux/migrate.h>
- #include <linux/pagemap.h>
- +#include <linux/locallock.h>
-
- #define ZSPAGE_MAGIC 0x58
-
- @@ -70,9 +71,22 @@
- */
- #define ZS_MAX_ZSPAGE_ORDER 2
- #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
- -
- #define ZS_HANDLE_SIZE (sizeof(unsigned long))
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +
- +struct zsmalloc_handle {
- + unsigned long addr;
- + struct mutex lock;
- +};
- +
- +#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle))
- +
- +#else
- +
- +#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long))
- +#endif
- +
- /*
- * Object location (<PFN>, <obj_idx>) is encoded as
- * as single (unsigned long) handle value.
- @@ -327,7 +341,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
-
- static int create_cache(struct zs_pool *pool)
- {
- - pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
- + pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE,
- 0, 0, NULL);
- if (!pool->handle_cachep)
- return 1;
- @@ -351,10 +365,27 @@ static void destroy_cache(struct zs_pool *pool)
-
- static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
- {
- - return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
- - gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
- + void *p;
- +
- + p = kmem_cache_alloc(pool->handle_cachep,
- + gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + if (p) {
- + struct zsmalloc_handle *zh = p;
- +
- + mutex_init(&zh->lock);
- + }
- +#endif
- + return (unsigned long)p;
- }
-
- +#ifdef CONFIG_PREEMPT_RT_FULL
- +static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle)
- +{
- + return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1));
- +}
- +#endif
- +
- static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
- {
- kmem_cache_free(pool->handle_cachep, (void *)handle);
- @@ -373,12 +404,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
-
- static void record_obj(unsigned long handle, unsigned long obj)
- {
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
- +
- + WRITE_ONCE(zh->addr, obj);
- +#else
- /*
- * lsb of @obj represents handle lock while other bits
- * represent object value the handle is pointing so
- * updating shouldn't do store tearing.
- */
- WRITE_ONCE(*(unsigned long *)handle, obj);
- +#endif
- }
-
- /* zpool driver */
- @@ -467,6 +504,7 @@ MODULE_ALIAS("zpool-zsmalloc");
-
- /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
- static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
- +static DEFINE_LOCAL_IRQ_LOCK(zs_map_area_lock);
-
- static bool is_zspage_isolated(struct zspage *zspage)
- {
- @@ -902,7 +940,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
-
- static unsigned long handle_to_obj(unsigned long handle)
- {
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
- +
- + return zh->addr;
- +#else
- return *(unsigned long *)handle;
- +#endif
- }
-
- static unsigned long obj_to_head(struct page *page, void *obj)
- @@ -916,22 +960,46 @@ static unsigned long obj_to_head(struct page *page, void *obj)
-
- static inline int testpin_tag(unsigned long handle)
- {
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
- +
- + return mutex_is_locked(&zh->lock);
- +#else
- return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
- +#endif
- }
-
- static inline int trypin_tag(unsigned long handle)
- {
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
- +
- + return mutex_trylock(&zh->lock);
- +#else
- return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
- +#endif
- }
-
- static void pin_tag(unsigned long handle)
- {
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
- +
- + return mutex_lock(&zh->lock);
- +#else
- bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
- +#endif
- }
-
- static void unpin_tag(unsigned long handle)
- {
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
- +
- + return mutex_unlock(&zh->lock);
- +#else
- bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
- +#endif
- }
-
- static void reset_page(struct page *page)
- @@ -1423,7 +1491,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
- class = pool->size_class[class_idx];
- off = (class->size * obj_idx) & ~PAGE_MASK;
-
- - area = &get_cpu_var(zs_map_area);
- + area = &get_locked_var(zs_map_area_lock, zs_map_area);
- area->vm_mm = mm;
- if (off + class->size <= PAGE_SIZE) {
- /* this object is contained entirely within a page */
- @@ -1477,7 +1545,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
-
- __zs_unmap_object(area, pages, off, class->size);
- }
- - put_cpu_var(zs_map_area);
- + put_locked_var(zs_map_area_lock, zs_map_area);
-
- migrate_read_unlock(zspage);
- unpin_tag(handle);
- diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
- index c88a6007e643..5de85b55a821 100644
- --- a/net/bluetooth/hci_sock.c
- +++ b/net/bluetooth/hci_sock.c
- @@ -251,15 +251,13 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
- }
-
- /* Send frame to sockets with specific channel */
- -void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
- - int flag, struct sock *skip_sk)
- +static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
- + int flag, struct sock *skip_sk)
- {
- struct sock *sk;
-
- BT_DBG("channel %u len %d", channel, skb->len);
-
- - read_lock(&hci_sk_list.lock);
- -
- sk_for_each(sk, &hci_sk_list.head) {
- struct sk_buff *nskb;
-
- @@ -285,6 +283,13 @@ void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
- kfree_skb(nskb);
- }
-
- +}
- +
- +void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
- + int flag, struct sock *skip_sk)
- +{
- + read_lock(&hci_sk_list.lock);
- + __hci_send_to_channel(channel, skb, flag, skip_sk);
- read_unlock(&hci_sk_list.lock);
- }
-
- @@ -388,8 +393,8 @@ void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
- hdr->index = index;
- hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
-
- - hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
- - HCI_SOCK_TRUSTED, NULL);
- + __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
- + HCI_SOCK_TRUSTED, NULL);
- kfree_skb(skb);
- }
-
- diff --git a/net/core/dev.c b/net/core/dev.c
- index 09007a71c8dd..6cb279747408 100644
- --- a/net/core/dev.c
- +++ b/net/core/dev.c
- @@ -190,6 +190,7 @@ static unsigned int napi_gen_id = NR_CPUS;
- static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
-
- static seqcount_t devnet_rename_seq;
- +static DEFINE_MUTEX(devnet_rename_mutex);
-
- static inline void dev_base_seq_inc(struct net *net)
- {
- @@ -211,14 +212,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
- static inline void rps_lock(struct softnet_data *sd)
- {
- #ifdef CONFIG_RPS
- - spin_lock(&sd->input_pkt_queue.lock);
- + raw_spin_lock(&sd->input_pkt_queue.raw_lock);
- #endif
- }
-
- static inline void rps_unlock(struct softnet_data *sd)
- {
- #ifdef CONFIG_RPS
- - spin_unlock(&sd->input_pkt_queue.lock);
- + raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
- #endif
- }
-
- @@ -888,7 +889,8 @@ int netdev_get_name(struct net *net, char *name, int ifindex)
- strcpy(name, dev->name);
- rcu_read_unlock();
- if (read_seqcount_retry(&devnet_rename_seq, seq)) {
- - cond_resched();
- + mutex_lock(&devnet_rename_mutex);
- + mutex_unlock(&devnet_rename_mutex);
- goto retry;
- }
-
- @@ -1157,20 +1159,17 @@ int dev_change_name(struct net_device *dev, const char *newname)
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- - write_seqcount_begin(&devnet_rename_seq);
- + mutex_lock(&devnet_rename_mutex);
- + __raw_write_seqcount_begin(&devnet_rename_seq);
-
- - if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
- - write_seqcount_end(&devnet_rename_seq);
- - return 0;
- - }
- + if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
- + goto outunlock;
-
- memcpy(oldname, dev->name, IFNAMSIZ);
-
- err = dev_get_valid_name(net, dev, newname);
- - if (err < 0) {
- - write_seqcount_end(&devnet_rename_seq);
- - return err;
- - }
- + if (err < 0)
- + goto outunlock;
-
- if (oldname[0] && !strchr(oldname, '%'))
- netdev_info(dev, "renamed from %s\n", oldname);
- @@ -1183,11 +1182,12 @@ int dev_change_name(struct net_device *dev, const char *newname)
- if (ret) {
- memcpy(dev->name, oldname, IFNAMSIZ);
- dev->name_assign_type = old_assign_type;
- - write_seqcount_end(&devnet_rename_seq);
- - return ret;
- + err = ret;
- + goto outunlock;
- }
-
- - write_seqcount_end(&devnet_rename_seq);
- + __raw_write_seqcount_end(&devnet_rename_seq);
- + mutex_unlock(&devnet_rename_mutex);
-
- netdev_adjacent_rename_links(dev, oldname);
-
- @@ -1208,7 +1208,8 @@ int dev_change_name(struct net_device *dev, const char *newname)
- /* err >= 0 after dev_alloc_name() or stores the first errno */
- if (err >= 0) {
- err = ret;
- - write_seqcount_begin(&devnet_rename_seq);
- + mutex_lock(&devnet_rename_mutex);
- + __raw_write_seqcount_begin(&devnet_rename_seq);
- memcpy(dev->name, oldname, IFNAMSIZ);
- memcpy(oldname, newname, IFNAMSIZ);
- dev->name_assign_type = old_assign_type;
- @@ -1221,6 +1222,11 @@ int dev_change_name(struct net_device *dev, const char *newname)
- }
-
- return err;
- +
- +outunlock:
- + __raw_write_seqcount_end(&devnet_rename_seq);
- + mutex_unlock(&devnet_rename_mutex);
- + return err;
- }
-
- /**
- @@ -2287,6 +2293,7 @@ static void __netif_reschedule(struct Qdisc *q)
- sd->output_queue_tailp = &q->next_sched;
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_restore(flags);
- + preempt_check_resched_rt();
- }
-
- void __netif_schedule(struct Qdisc *q)
- @@ -2371,6 +2378,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
- __this_cpu_write(softnet_data.completion_queue, skb);
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_restore(flags);
- + preempt_check_resched_rt();
- }
- EXPORT_SYMBOL(__dev_kfree_skb_irq);
-
- @@ -3112,7 +3120,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
- * This permits qdisc->running owner to get the lock more
- * often and dequeue packets faster.
- */
- +#ifdef CONFIG_PREEMPT_RT_FULL
- + contended = true;
- +#else
- contended = qdisc_is_running(q);
- +#endif
- if (unlikely(contended))
- spin_lock(&q->busylock);
-
- @@ -3175,8 +3187,10 @@ static void skb_update_prio(struct sk_buff *skb)
- #define skb_update_prio(skb)
- #endif
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- DEFINE_PER_CPU(int, xmit_recursion);
- EXPORT_SYMBOL(xmit_recursion);
- +#endif
-
- /**
- * dev_loopback_xmit - loop back @skb
- @@ -3410,8 +3424,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
- int cpu = smp_processor_id(); /* ok because BHs are off */
-
- if (txq->xmit_lock_owner != cpu) {
- - if (unlikely(__this_cpu_read(xmit_recursion) >
- - XMIT_RECURSION_LIMIT))
- + if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT))
- goto recursion_alert;
-
- skb = validate_xmit_skb(skb, dev);
- @@ -3421,9 +3434,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
- HARD_TX_LOCK(dev, txq, cpu);
-
- if (!netif_xmit_stopped(txq)) {
- - __this_cpu_inc(xmit_recursion);
- + xmit_rec_inc();
- skb = dev_hard_start_xmit(skb, dev, txq, &rc);
- - __this_cpu_dec(xmit_recursion);
- + xmit_rec_dec();
- if (dev_xmit_complete(rc)) {
- HARD_TX_UNLOCK(dev, txq);
- goto out;
- @@ -3797,6 +3810,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
- rps_unlock(sd);
-
- local_irq_restore(flags);
- + preempt_check_resched_rt();
-
- atomic_long_inc(&skb->dev->rx_dropped);
- kfree_skb(skb);
- @@ -3815,7 +3829,7 @@ static int netif_rx_internal(struct sk_buff *skb)
- struct rps_dev_flow voidflow, *rflow = &voidflow;
- int cpu;
-
- - preempt_disable();
- + migrate_disable();
- rcu_read_lock();
-
- cpu = get_rps_cpu(skb->dev, skb, &rflow);
- @@ -3825,13 +3839,13 @@ static int netif_rx_internal(struct sk_buff *skb)
- ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
-
- rcu_read_unlock();
- - preempt_enable();
- + migrate_enable();
- } else
- #endif
- {
- unsigned int qtail;
- - ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
- - put_cpu();
- + ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
- + put_cpu_light();
- }
- return ret;
- }
- @@ -3865,11 +3879,9 @@ int netif_rx_ni(struct sk_buff *skb)
-
- trace_netif_rx_ni_entry(skb);
-
- - preempt_disable();
- + local_bh_disable();
- err = netif_rx_internal(skb);
- - if (local_softirq_pending())
- - do_softirq();
- - preempt_enable();
- + local_bh_enable();
-
- return err;
- }
- @@ -4348,7 +4360,7 @@ static void flush_backlog(struct work_struct *work)
- skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
- if (skb->dev->reg_state == NETREG_UNREGISTERING) {
- __skb_unlink(skb, &sd->input_pkt_queue);
- - kfree_skb(skb);
- + __skb_queue_tail(&sd->tofree_queue, skb);
- input_queue_head_incr(sd);
- }
- }
- @@ -4358,11 +4370,14 @@ static void flush_backlog(struct work_struct *work)
- skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
- if (skb->dev->reg_state == NETREG_UNREGISTERING) {
- __skb_unlink(skb, &sd->process_queue);
- - kfree_skb(skb);
- + __skb_queue_tail(&sd->tofree_queue, skb);
- input_queue_head_incr(sd);
- }
- }
- + if (!skb_queue_empty(&sd->tofree_queue))
- + raise_softirq_irqoff(NET_RX_SOFTIRQ);
- local_bh_enable();
- +
- }
-
- static void flush_all_backlogs(void)
- @@ -4853,6 +4868,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
- sd->rps_ipi_list = NULL;
-
- local_irq_enable();
- + preempt_check_resched_rt();
-
- /* Send pending IPI's to kick RPS processing on remote cpus. */
- while (remsd) {
- @@ -4866,6 +4882,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
- } else
- #endif
- local_irq_enable();
- + preempt_check_resched_rt();
- }
-
- static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
- @@ -4895,7 +4912,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
- while (again) {
- struct sk_buff *skb;
-
- + local_irq_disable();
- while ((skb = __skb_dequeue(&sd->process_queue))) {
- + local_irq_enable();
- rcu_read_lock();
- __netif_receive_skb(skb);
- rcu_read_unlock();
- @@ -4903,9 +4922,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
- if (++work >= quota)
- return work;
-
- + local_irq_disable();
- }
-
- - local_irq_disable();
- rps_lock(sd);
- if (skb_queue_empty(&sd->input_pkt_queue)) {
- /*
- @@ -4943,9 +4962,11 @@ void __napi_schedule(struct napi_struct *n)
- local_irq_save(flags);
- ____napi_schedule(this_cpu_ptr(&softnet_data), n);
- local_irq_restore(flags);
- + preempt_check_resched_rt();
- }
- EXPORT_SYMBOL(__napi_schedule);
-
- +#ifndef CONFIG_PREEMPT_RT_FULL
- /**
- * __napi_schedule_irqoff - schedule for receive
- * @n: entry to schedule
- @@ -4957,6 +4978,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
- ____napi_schedule(this_cpu_ptr(&softnet_data), n);
- }
- EXPORT_SYMBOL(__napi_schedule_irqoff);
- +#endif
-
- void __napi_complete(struct napi_struct *n)
- {
- @@ -5246,13 +5268,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
- struct softnet_data *sd = this_cpu_ptr(&softnet_data);
- unsigned long time_limit = jiffies + 2;
- int budget = netdev_budget;
- + struct sk_buff_head tofree_q;
- + struct sk_buff *skb;
- LIST_HEAD(list);
- LIST_HEAD(repoll);
-
- + __skb_queue_head_init(&tofree_q);
- +
- local_irq_disable();
- + skb_queue_splice_init(&sd->tofree_queue, &tofree_q);
- list_splice_init(&sd->poll_list, &list);
- local_irq_enable();
-
- + while ((skb = __skb_dequeue(&tofree_q)))
- + kfree_skb(skb);
- +
- for (;;) {
- struct napi_struct *n;
-
- @@ -5283,7 +5313,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
- list_splice_tail(&repoll, &list);
- list_splice(&list, &sd->poll_list);
- if (!list_empty(&sd->poll_list))
- - __raise_softirq_irqoff(NET_RX_SOFTIRQ);
- + __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ);
-
- net_rps_action_and_irq_enable(sd);
- }
- @@ -8045,16 +8075,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
-
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_enable();
- + preempt_check_resched_rt();
-
- /* Process offline CPU's input_pkt_queue */
- while ((skb = __skb_dequeue(&oldsd->process_queue))) {
- netif_rx_ni(skb);
- input_queue_head_incr(oldsd);
- }
- - while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
- + while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
- netif_rx_ni(skb);
- input_queue_head_incr(oldsd);
- }
- + while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
- + kfree_skb(skb);
- + }
-
- return NOTIFY_OK;
- }
- @@ -8359,8 +8393,9 @@ static int __init net_dev_init(void)
-
- INIT_WORK(flush, flush_backlog);
-
- - skb_queue_head_init(&sd->input_pkt_queue);
- - skb_queue_head_init(&sd->process_queue);
- + skb_queue_head_init_raw(&sd->input_pkt_queue);
- + skb_queue_head_init_raw(&sd->process_queue);
- + skb_queue_head_init_raw(&sd->tofree_queue);
- INIT_LIST_HEAD(&sd->poll_list);
- sd->output_queue_tailp = &sd->output_queue;
- #ifdef CONFIG_RPS
- diff --git a/net/core/filter.c b/net/core/filter.c
- index 4eb4ce0aeef4..4f09d6a57217 100644
- --- a/net/core/filter.c
- +++ b/net/core/filter.c
- @@ -1645,7 +1645,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
- {
- int ret;
-
- - if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
- + if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) {
- net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
- kfree_skb(skb);
- return -ENETDOWN;
- @@ -1653,9 +1653,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
-
- skb->dev = dev;
-
- - __this_cpu_inc(xmit_recursion);
- + xmit_rec_inc();
- ret = dev_queue_xmit(skb);
- - __this_cpu_dec(xmit_recursion);
- + xmit_rec_dec();
-
- return ret;
- }
- diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
- index cad8e791f28e..2a9364fe62a5 100644
- --- a/net/core/gen_estimator.c
- +++ b/net/core/gen_estimator.c
- @@ -84,7 +84,7 @@ struct gen_estimator
- struct gnet_stats_basic_packed *bstats;
- struct gnet_stats_rate_est64 *rate_est;
- spinlock_t *stats_lock;
- - seqcount_t *running;
- + net_seqlock_t *running;
- int ewma_log;
- u32 last_packets;
- unsigned long avpps;
- @@ -213,7 +213,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
- spinlock_t *stats_lock,
- - seqcount_t *running,
- + net_seqlock_t *running,
- struct nlattr *opt)
- {
- struct gen_estimator *est;
- @@ -309,7 +309,7 @@ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
- spinlock_t *stats_lock,
- - seqcount_t *running, struct nlattr *opt)
- + net_seqlock_t *running, struct nlattr *opt)
- {
- gen_kill_estimator(bstats, rate_est);
- return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt);
- diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
- index 508e051304fb..bc3b17b78c94 100644
- --- a/net/core/gen_stats.c
- +++ b/net/core/gen_stats.c
- @@ -130,7 +130,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
- }
-
- void
- -__gnet_stats_copy_basic(const seqcount_t *running,
- +__gnet_stats_copy_basic(net_seqlock_t *running,
- struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b)
- @@ -143,10 +143,10 @@ __gnet_stats_copy_basic(const seqcount_t *running,
- }
- do {
- if (running)
- - seq = read_seqcount_begin(running);
- + seq = net_seq_begin(running);
- bstats->bytes = b->bytes;
- bstats->packets = b->packets;
- - } while (running && read_seqcount_retry(running, seq));
- + } while (running && net_seq_retry(running, seq));
- }
- EXPORT_SYMBOL(__gnet_stats_copy_basic);
-
- @@ -164,7 +164,7 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic);
- * if the room in the socket buffer was not sufficient.
- */
- int
- -gnet_stats_copy_basic(const seqcount_t *running,
- +gnet_stats_copy_basic(net_seqlock_t *running,
- struct gnet_dump *d,
- struct gnet_stats_basic_cpu __percpu *cpu,
- struct gnet_stats_basic_packed *b)
- diff --git a/net/core/skbuff.c b/net/core/skbuff.c
- index a64515583bc1..fec448d29f42 100644
- --- a/net/core/skbuff.c
- +++ b/net/core/skbuff.c
- @@ -64,6 +64,7 @@
- #include <linux/errqueue.h>
- #include <linux/prefetch.h>
- #include <linux/if_vlan.h>
- +#include <linux/locallock.h>
-
- #include <net/protocol.h>
- #include <net/dst.h>
- @@ -360,6 +361,8 @@ struct napi_alloc_cache {
-
- static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
- static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
- +static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
- +static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock);
-
- static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
- {
- @@ -367,10 +370,10 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
- unsigned long flags;
- void *data;
-
- - local_irq_save(flags);
- + local_lock_irqsave(netdev_alloc_lock, flags);
- nc = this_cpu_ptr(&netdev_alloc_cache);
- data = __alloc_page_frag(nc, fragsz, gfp_mask);
- - local_irq_restore(flags);
- + local_unlock_irqrestore(netdev_alloc_lock, flags);
- return data;
- }
-
- @@ -389,9 +392,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
-
- static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
- {
- - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- + struct napi_alloc_cache *nc;
- + void *data;
-
- - return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
- + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
- + data = __alloc_page_frag(&nc->page, fragsz, gfp_mask);
- + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
- + return data;
- }
-
- void *napi_alloc_frag(unsigned int fragsz)
- @@ -438,13 +445,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
- if (sk_memalloc_socks())
- gfp_mask |= __GFP_MEMALLOC;
-
- - local_irq_save(flags);
- + local_lock_irqsave(netdev_alloc_lock, flags);
-
- nc = this_cpu_ptr(&netdev_alloc_cache);
- data = __alloc_page_frag(nc, len, gfp_mask);
- pfmemalloc = nc->pfmemalloc;
-
- - local_irq_restore(flags);
- + local_unlock_irqrestore(netdev_alloc_lock, flags);
-
- if (unlikely(!data))
- return NULL;
- @@ -485,9 +492,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
- struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
- gfp_t gfp_mask)
- {
- - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- + struct napi_alloc_cache *nc;
- struct sk_buff *skb;
- void *data;
- + bool pfmemalloc;
-
- len += NET_SKB_PAD + NET_IP_ALIGN;
-
- @@ -505,7 +513,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
- if (sk_memalloc_socks())
- gfp_mask |= __GFP_MEMALLOC;
-
- + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
- data = __alloc_page_frag(&nc->page, len, gfp_mask);
- + pfmemalloc = nc->page.pfmemalloc;
- + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
- if (unlikely(!data))
- return NULL;
-
- @@ -516,7 +527,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
- }
-
- /* use OR instead of assignment to avoid clearing of bits in mask */
- - if (nc->page.pfmemalloc)
- + if (pfmemalloc)
- skb->pfmemalloc = 1;
- skb->head_frag = 1;
-
- @@ -760,23 +771,26 @@ EXPORT_SYMBOL(consume_skb);
-
- void __kfree_skb_flush(void)
- {
- - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- + struct napi_alloc_cache *nc;
-
- + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
- /* flush skb_cache if containing objects */
- if (nc->skb_count) {
- kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
- nc->skb_cache);
- nc->skb_count = 0;
- }
- + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
- }
-
- static inline void _kfree_skb_defer(struct sk_buff *skb)
- {
- - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- + struct napi_alloc_cache *nc;
-
- /* drop skb->head and call any destructors for packet */
- skb_release_all(skb);
-
- + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
- /* record skb to CPU local list */
- nc->skb_cache[nc->skb_count++] = skb;
-
- @@ -791,6 +805,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb)
- nc->skb_cache);
- nc->skb_count = 0;
- }
- + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
- }
- void __kfree_skb_defer(struct sk_buff *skb)
- {
- diff --git a/net/core/sock.c b/net/core/sock.c
- index e3b60460dc9c..8d15848c3a22 100644
- --- a/net/core/sock.c
- +++ b/net/core/sock.c
- @@ -2493,12 +2493,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
- if (sk->sk_lock.owned)
- __lock_sock(sk);
- sk->sk_lock.owned = 1;
- - spin_unlock(&sk->sk_lock.slock);
- + spin_unlock_bh(&sk->sk_lock.slock);
- /*
- * The sk_lock has mutex_lock() semantics here:
- */
- mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
- - local_bh_enable();
- }
- EXPORT_SYMBOL(lock_sock_nested);
-
- diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
- index 31f17f0bbd1c..c9525356823c 100644
- --- a/net/ipv4/icmp.c
- +++ b/net/ipv4/icmp.c
- @@ -69,6 +69,7 @@
- #include <linux/jiffies.h>
- #include <linux/kernel.h>
- #include <linux/fcntl.h>
- +#include <linux/sysrq.h>
- #include <linux/socket.h>
- #include <linux/in.h>
- #include <linux/inet.h>
- @@ -77,6 +78,7 @@
- #include <linux/string.h>
- #include <linux/netfilter_ipv4.h>
- #include <linux/slab.h>
- +#include <linux/locallock.h>
- #include <net/snmp.h>
- #include <net/ip.h>
- #include <net/route.h>
- @@ -204,6 +206,8 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
- *
- * On SMP we have one ICMP socket per-cpu.
- */
- +static DEFINE_LOCAL_IRQ_LOCK(icmp_sk_lock);
- +
- static struct sock *icmp_sk(struct net *net)
- {
- return *this_cpu_ptr(net->ipv4.icmp_sk);
- @@ -215,12 +219,18 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
-
- local_bh_disable();
-
- + if (!local_trylock(icmp_sk_lock)) {
- + local_bh_enable();
- + return NULL;
- + }
- +
- sk = icmp_sk(net);
-
- if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
- /* This can happen if the output path signals a
- * dst_link_failure() for an outgoing ICMP packet.
- */
- + local_unlock(icmp_sk_lock);
- local_bh_enable();
- return NULL;
- }
- @@ -230,6 +240,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
- static inline void icmp_xmit_unlock(struct sock *sk)
- {
- spin_unlock_bh(&sk->sk_lock.slock);
- + local_unlock(icmp_sk_lock);
- }
-
- int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
- @@ -358,6 +369,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
- struct sock *sk;
- struct sk_buff *skb;
-
- + local_lock(icmp_sk_lock);
- sk = icmp_sk(dev_net((*rt)->dst.dev));
- if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
- icmp_param->data_len+icmp_param->head_len,
- @@ -380,6 +392,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
- skb->ip_summed = CHECKSUM_NONE;
- ip_push_pending_frames(sk, fl4);
- }
- + local_unlock(icmp_sk_lock);
- }
-
- /*
- @@ -899,6 +912,30 @@ static bool icmp_redirect(struct sk_buff *skb)
- return true;
- }
-
- +/*
- + * 32bit and 64bit have different timestamp length, so we check for
- + * the cookie at offset 20 and verify it is repeated at offset 50
- + */
- +#define CO_POS0 20
- +#define CO_POS1 50
- +#define CO_SIZE sizeof(int)
- +#define ICMP_SYSRQ_SIZE 57
- +
- +/*
- + * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie
- + * pattern and if it matches send the next byte as a trigger to sysrq.
- + */
- +static void icmp_check_sysrq(struct net *net, struct sk_buff *skb)
- +{
- + int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq);
- + char *p = skb->data;
- +
- + if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) &&
- + !memcmp(&cookie, p + CO_POS1, CO_SIZE) &&
- + p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE])
- + handle_sysrq(p[CO_POS0 + CO_SIZE]);
- +}
- +
- /*
- * Handle ICMP_ECHO ("ping") requests.
- *
- @@ -926,6 +963,11 @@ static bool icmp_echo(struct sk_buff *skb)
- icmp_param.data_len = skb->len;
- icmp_param.head_len = sizeof(struct icmphdr);
- icmp_reply(&icmp_param, skb);
- +
- + if (skb->len == ICMP_SYSRQ_SIZE &&
- + net->ipv4.sysctl_icmp_echo_sysrq) {
- + icmp_check_sysrq(net, skb);
- + }
- }
- /* should there be an ICMP stat for ignored echos? */
- return true;
- diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
- index 566cfc50f7cf..4b8551d78a3b 100644
- --- a/net/ipv4/sysctl_net_ipv4.c
- +++ b/net/ipv4/sysctl_net_ipv4.c
- @@ -680,6 +680,13 @@ static struct ctl_table ipv4_net_table[] = {
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- + {
- + .procname = "icmp_echo_sysrq",
- + .data = &init_net.ipv4.sysctl_icmp_echo_sysrq,
- + .maxlen = sizeof(int),
- + .mode = 0644,
- + .proc_handler = proc_dointvec
- + },
- {
- .procname = "icmp_ignore_bogus_error_responses",
- .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
- diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
- index b3960738464e..17699390a324 100644
- --- a/net/ipv4/tcp_ipv4.c
- +++ b/net/ipv4/tcp_ipv4.c
- @@ -62,6 +62,7 @@
- #include <linux/init.h>
- #include <linux/times.h>
- #include <linux/slab.h>
- +#include <linux/locallock.h>
-
- #include <net/net_namespace.h>
- #include <net/icmp.h>
- @@ -568,6 +569,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
- }
- EXPORT_SYMBOL(tcp_v4_send_check);
-
- +static DEFINE_LOCAL_IRQ_LOCK(tcp_sk_lock);
- /*
- * This routine will send an RST to the other tcp.
- *
- @@ -695,7 +697,9 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
- offsetof(struct inet_timewait_sock, tw_bound_dev_if));
-
- arg.tos = ip_hdr(skb)->tos;
- +
- local_bh_disable();
- + local_lock(tcp_sk_lock);
- ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
- skb, &TCP_SKB_CB(skb)->header.h4.opt,
- ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
- @@ -703,6 +707,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
-
- __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
- __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
- + local_unlock(tcp_sk_lock);
- local_bh_enable();
-
- #ifdef CONFIG_TCP_MD5SIG
- @@ -780,12 +785,14 @@ static void tcp_v4_send_ack(struct net *net,
- arg.bound_dev_if = oif;
- arg.tos = tos;
- local_bh_disable();
- + local_lock(tcp_sk_lock);
- ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
- skb, &TCP_SKB_CB(skb)->header.h4.opt,
- ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
- &arg, arg.iov[0].iov_len);
-
- __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
- + local_unlock(tcp_sk_lock);
- local_bh_enable();
- }
-
- diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
- index 439e597fd374..ca0daeaff370 100644
- --- a/net/mac80211/rx.c
- +++ b/net/mac80211/rx.c
- @@ -4229,7 +4229,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
- struct ieee80211_supported_band *sband;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-
- - WARN_ON_ONCE(softirq_count() == 0);
- + WARN_ON_ONCE_NONRT(softirq_count() == 0);
-
- if (WARN_ON(status->band >= NUM_NL80211_BANDS))
- goto drop;
- diff --git a/net/netfilter/core.c b/net/netfilter/core.c
- index d869ea50623e..5cafa87b030b 100644
- --- a/net/netfilter/core.c
- +++ b/net/netfilter/core.c
- @@ -22,12 +22,18 @@
- #include <linux/proc_fs.h>
- #include <linux/mutex.h>
- #include <linux/slab.h>
- +#include <linux/locallock.h>
- #include <linux/rcupdate.h>
- #include <net/net_namespace.h>
- #include <net/sock.h>
-
- #include "nf_internals.h"
-
- +#ifdef CONFIG_PREEMPT_RT_BASE
- +DEFINE_LOCAL_IRQ_LOCK(xt_write_lock);
- +EXPORT_PER_CPU_SYMBOL(xt_write_lock);
- +#endif
- +
- static DEFINE_MUTEX(afinfo_mutex);
-
- const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
- diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
- index 267db0d603bc..00994de54d57 100644
- --- a/net/packet/af_packet.c
- +++ b/net/packet/af_packet.c
- @@ -63,6 +63,7 @@
- #include <linux/if_packet.h>
- #include <linux/wireless.h>
- #include <linux/kernel.h>
- +#include <linux/delay.h>
- #include <linux/kmod.h>
- #include <linux/slab.h>
- #include <linux/vmalloc.h>
- @@ -694,7 +695,7 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data)
- if (BLOCK_NUM_PKTS(pbd)) {
- while (atomic_read(&pkc->blk_fill_in_prog)) {
- /* Waiting for skb_copy_bits to finish... */
- - cpu_relax();
- + cpu_chill();
- }
- }
-
- @@ -956,7 +957,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
- if (!(status & TP_STATUS_BLK_TMO)) {
- while (atomic_read(&pkc->blk_fill_in_prog)) {
- /* Waiting for skb_copy_bits to finish... */
- - cpu_relax();
- + cpu_chill();
- }
- }
- prb_close_block(pkc, pbd, po, status);
- diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
- index 977f69886c00..f3e7a36b0396 100644
- --- a/net/rds/ib_rdma.c
- +++ b/net/rds/ib_rdma.c
- @@ -34,6 +34,7 @@
- #include <linux/slab.h>
- #include <linux/rculist.h>
- #include <linux/llist.h>
- +#include <linux/delay.h>
-
- #include "rds_single_path.h"
- #include "ib_mr.h"
- @@ -210,7 +211,7 @@ static inline void wait_clean_list_grace(void)
- for_each_online_cpu(cpu) {
- flag = &per_cpu(clean_list_grace, cpu);
- while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
- - cpu_relax();
- + cpu_chill();
- }
- }
-
- diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
- index 7d921e56e715..13df56a738e5 100644
- --- a/net/rxrpc/security.c
- +++ b/net/rxrpc/security.c
- @@ -19,9 +19,6 @@
- #include <keys/rxrpc-type.h>
- #include "ar-internal.h"
-
- -static LIST_HEAD(rxrpc_security_methods);
- -static DECLARE_RWSEM(rxrpc_security_sem);
- -
- static const struct rxrpc_security *rxrpc_security_types[] = {
- [RXRPC_SECURITY_NONE] = &rxrpc_no_security,
- #ifdef CONFIG_RXKAD
- diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
- index ea13df1be067..76c20745b502 100644
- --- a/net/sched/sch_api.c
- +++ b/net/sched/sch_api.c
- @@ -980,7 +980,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
- rcu_assign_pointer(sch->stab, stab);
- }
- if (tca[TCA_RATE]) {
- - seqcount_t *running;
- + net_seqlock_t *running;
-
- err = -EOPNOTSUPP;
- if (sch->flags & TCQ_F_MQROOT)
- diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
- index 9016c8baf2aa..d925f0e63679 100644
- --- a/net/sched/sch_generic.c
- +++ b/net/sched/sch_generic.c
- @@ -425,7 +425,11 @@ struct Qdisc noop_qdisc = {
- .ops = &noop_qdisc_ops,
- .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
- .dev_queue = &noop_netdev_queue,
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + .running = __SEQLOCK_UNLOCKED(noop_qdisc.running),
- +#else
- .running = SEQCNT_ZERO(noop_qdisc.running),
- +#endif
- .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
- };
- EXPORT_SYMBOL(noop_qdisc);
- @@ -624,9 +628,17 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
- lockdep_set_class(&sch->busylock,
- dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
-
- +#ifdef CONFIG_PREEMPT_RT_BASE
- + seqlock_init(&sch->running);
- + lockdep_set_class(&sch->running.seqcount,
- + dev->qdisc_running_key ?: &qdisc_running_key);
- + lockdep_set_class(&sch->running.lock,
- + dev->qdisc_running_key ?: &qdisc_running_key);
- +#else
- seqcount_init(&sch->running);
- lockdep_set_class(&sch->running,
- dev->qdisc_running_key ?: &qdisc_running_key);
- +#endif
-
- sch->ops = ops;
- sch->enqueue = ops->enqueue;
- @@ -926,7 +938,7 @@ void dev_deactivate_many(struct list_head *head)
- /* Wait for outstanding qdisc_run calls. */
- list_for_each_entry(dev, head, close_list)
- while (some_qdisc_is_busy(dev))
- - yield();
- + msleep(1);
- }
-
- void dev_deactivate(struct net_device *dev)
- diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
- index 9c9db55a0c1e..e6583b018a72 100644
- --- a/net/sunrpc/svc_xprt.c
- +++ b/net/sunrpc/svc_xprt.c
- @@ -396,7 +396,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
- goto out;
- }
-
- - cpu = get_cpu();
- + cpu = get_cpu_light();
- pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
-
- atomic_long_inc(&pool->sp_stats.packets);
- @@ -432,7 +432,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
-
- atomic_long_inc(&pool->sp_stats.threads_woken);
- wake_up_process(rqstp->rq_task);
- - put_cpu();
- + put_cpu_light();
- goto out;
- }
- rcu_read_unlock();
- @@ -453,7 +453,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
- goto redo_search;
- }
- rqstp = NULL;
- - put_cpu();
- + put_cpu_light();
- out:
- trace_svc_xprt_do_enqueue(xprt, rqstp);
- }
- diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
- index 6fdc97ef6023..523e0420d7f0 100755
- --- a/scripts/mkcompile_h
- +++ b/scripts/mkcompile_h
- @@ -4,7 +4,8 @@ TARGET=$1
- ARCH=$2
- SMP=$3
- PREEMPT=$4
- -CC=$5
- +RT=$5
- +CC=$6
-
- vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
-
- @@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
- CONFIG_FLAGS=""
- if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
- if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
- +if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
- UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
-
- # Truncate to maximum length
- diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
- index 9d33c1e85c79..3d307bda86f9 100644
- --- a/sound/core/pcm_native.c
- +++ b/sound/core/pcm_native.c
- @@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
- void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
- {
- if (!substream->pcm->nonatomic)
- - local_irq_disable();
- + local_irq_disable_nort();
- snd_pcm_stream_lock(substream);
- }
- EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
- @@ -150,7 +150,7 @@ void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
- {
- snd_pcm_stream_unlock(substream);
- if (!substream->pcm->nonatomic)
- - local_irq_enable();
- + local_irq_enable_nort();
- }
- EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
-
- @@ -158,7 +158,7 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
- {
- unsigned long flags = 0;
- if (!substream->pcm->nonatomic)
- - local_irq_save(flags);
- + local_irq_save_nort(flags);
- snd_pcm_stream_lock(substream);
- return flags;
- }
- @@ -176,7 +176,7 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
- {
- snd_pcm_stream_unlock(substream);
- if (!substream->pcm->nonatomic)
- - local_irq_restore(flags);
- + local_irq_restore_nort(flags);
- }
- EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
-
|