rmk.patch 376 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562
  1. diff -Nur linux-3.15-rc1.orig/arch/arm/boot/dts/imx6dl-hummingboard.dts linux-3.15-rc1/arch/arm/boot/dts/imx6dl-hummingboard.dts
  2. --- linux-3.15-rc1.orig/arch/arm/boot/dts/imx6dl-hummingboard.dts 2014-04-13 23:18:35.000000000 +0200
  3. +++ linux-3.15-rc1/arch/arm/boot/dts/imx6dl-hummingboard.dts 2014-04-25 14:11:13.515375059 +0200
  4. @@ -67,6 +67,14 @@
  5. status = "okay";
  6. };
  7. +&hdmi {
  8. + pinctrl-names = "default";
  9. + pinctrl-0 = <&pinctrl_hummingboard_hdmi>;
  10. + ddc-i2c-bus = <&i2c2>;
  11. + status = "okay";
  12. + crtcs = <&ipu1 0>;
  13. +};
  14. +
  15. &i2c1 {
  16. pinctrl-names = "default";
  17. pinctrl-0 = <&pinctrl_hummingboard_i2c1>;
  18. @@ -82,6 +90,13 @@
  19. */
  20. };
  21. +&i2c2 {
  22. + clock-frequency = <100000>;
  23. + pinctrl-names = "default";
  24. + pinctrl-0 = <&pinctrl_hummingboard_i2c2>;
  25. + status = "okay";
  26. +};
  27. +
  28. &iomuxc {
  29. hummingboard {
  30. pinctrl_hummingboard_flexcan1: hummingboard-flexcan1 {
  31. @@ -97,6 +112,12 @@
  32. >;
  33. };
  34. + pinctrl_hummingboard_hdmi: hummingboard-hdmi {
  35. + fsl,pins = <
  36. + MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
  37. + >;
  38. + };
  39. +
  40. pinctrl_hummingboard_i2c1: hummingboard-i2c1 {
  41. fsl,pins = <
  42. MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
  43. @@ -104,6 +125,13 @@
  44. >;
  45. };
  46. + pinctrl_hummingboard_i2c2: hummingboard-i2c2 {
  47. + fsl,pins = <
  48. + MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
  49. + MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
  50. + >;
  51. + };
  52. +
  53. pinctrl_hummingboard_spdif: hummingboard-spdif {
  54. fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
  55. };
  56. diff -Nur linux-3.15-rc1.orig/arch/arm/boot/dts/imx6q-cubox-i.dts linux-3.15-rc1/arch/arm/boot/dts/imx6q-cubox-i.dts
  57. --- linux-3.15-rc1.orig/arch/arm/boot/dts/imx6q-cubox-i.dts 2014-04-13 23:18:35.000000000 +0200
  58. +++ linux-3.15-rc1/arch/arm/boot/dts/imx6q-cubox-i.dts 2014-04-25 14:11:13.515375059 +0200
  59. @@ -13,4 +13,8 @@
  60. &sata {
  61. status = "okay";
  62. + fsl,transmit-level-mV = <1104>;
  63. + fsl,transmit-boost-mdB = <0>;
  64. + fsl,transmit-atten-16ths = <9>;
  65. + fsl,no-spread-spectrum;
  66. };
  67. diff -Nur linux-3.15-rc1.orig/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi linux-3.15-rc1/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
  68. --- linux-3.15-rc1.orig/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2014-04-13 23:18:35.000000000 +0200
  69. +++ linux-3.15-rc1/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2014-04-25 14:11:13.515375059 +0200
  70. @@ -12,6 +12,19 @@
  71. pinctrl-0 = <&pinctrl_cubox_i_ir>;
  72. };
  73. + pwmleds {
  74. + compatible = "pwm-leds";
  75. + pinctrl-names = "default";
  76. + pinctrl-0 = <&pinctrl_cubox_i_pwm1>;
  77. +
  78. + front {
  79. + active-low;
  80. + label = "imx6:red:front";
  81. + max-brightness = <248>;
  82. + pwms = <&pwm1 0 50000>;
  83. + };
  84. + };
  85. +
  86. regulators {
  87. compatible = "simple-bus";
  88. @@ -55,6 +68,21 @@
  89. };
  90. };
  91. +&hdmi {
  92. + pinctrl-names = "default";
  93. + pinctrl-0 = <&pinctrl_cubox_i_hdmi>;
  94. + ddc-i2c-bus = <&i2c2>;
  95. + status = "okay";
  96. + crtcs = <&ipu1 0>;
  97. +};
  98. +
  99. +&i2c2 {
  100. + clock-frequency = <100000>;
  101. + pinctrl-names = "default";
  102. + pinctrl-0 = <&pinctrl_cubox_i_i2c2>;
  103. + status = "okay";
  104. +};
  105. +
  106. &i2c3 {
  107. pinctrl-names = "default";
  108. pinctrl-0 = <&pinctrl_cubox_i_i2c3>;
  109. @@ -69,6 +97,19 @@
  110. &iomuxc {
  111. cubox_i {
  112. + pinctrl_cubox_i_hdmi: cubox-i-hdmi {
  113. + fsl,pins = <
  114. + MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
  115. + >;
  116. + };
  117. +
  118. + pinctrl_cubox_i_i2c2: cubox-i-i2c2 {
  119. + fsl,pins = <
  120. + MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
  121. + MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
  122. + >;
  123. + };
  124. +
  125. pinctrl_cubox_i_i2c3: cubox-i-i2c3 {
  126. fsl,pins = <
  127. MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1
  128. @@ -82,6 +123,10 @@
  129. >;
  130. };
  131. + pinctrl_cubox_i_pwm1: cubox-i-pwm1-front-led {
  132. + fsl,pins = <MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b0>;
  133. + };
  134. +
  135. pinctrl_cubox_i_spdif: cubox-i-spdif {
  136. fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
  137. };
  138. @@ -111,6 +156,28 @@
  139. MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
  140. >;
  141. };
  142. +
  143. + pinctrl_cubox_i_usdhc2_100mhz: cubox-i-usdhc2-100mhz {
  144. + fsl,pins = <
  145. + MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9
  146. + MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9
  147. + MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
  148. + MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
  149. + MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
  150. + MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130b9
  151. + >;
  152. + };
  153. +
  154. + pinctrl_cubox_i_usdhc2_200mhz: cubox-i-usdhc2-200mhz {
  155. + fsl,pins = <
  156. + MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9
  157. + MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9
  158. + MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
  159. + MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
  160. + MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
  161. + MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130f9
  162. + >;
  163. + };
  164. };
  165. };
  166. @@ -130,9 +197,19 @@
  167. status = "okay";
  168. };
  169. +&uart4 {
  170. + status = "okay";
  171. +};
  172. +
  173. +&usdhc1 {
  174. + status = "okay";
  175. +};
  176. +
  177. &usdhc2 {
  178. - pinctrl-names = "default";
  179. + pinctrl-names = "default", "state_100mhz", "state_200mhz";
  180. pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
  181. + pinctrl-1 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_100mhz>;
  182. + pinctrl-2 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_200mhz>;
  183. vmmc-supply = <&reg_3p3v>;
  184. cd-gpios = <&gpio1 4 0>;
  185. status = "okay";
  186. diff -Nur linux-3.15-rc1.orig/arch/arm/boot/dts/imx6qdl.dtsi linux-3.15-rc1/arch/arm/boot/dts/imx6qdl.dtsi
  187. --- linux-3.15-rc1.orig/arch/arm/boot/dts/imx6qdl.dtsi 2014-04-13 23:18:35.000000000 +0200
  188. +++ linux-3.15-rc1/arch/arm/boot/dts/imx6qdl.dtsi 2014-04-25 14:11:13.515375059 +0200
  189. @@ -125,6 +125,8 @@
  190. cache-level = <2>;
  191. arm,tag-latency = <4 2 3>;
  192. arm,data-latency = <4 2 3>;
  193. + arm,dynamic-clk-gating;
  194. + arm,standby-mode;
  195. };
  196. pcie: pcie@0x01000000 {
  197. diff -Nur linux-3.15-rc1.orig/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi linux-3.15-rc1/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
  198. --- linux-3.15-rc1.orig/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi 2014-04-13 23:18:35.000000000 +0200
  199. +++ linux-3.15-rc1/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi 2014-04-25 14:11:13.515375059 +0200
  200. @@ -26,25 +26,25 @@
  201. /* GPIO16 -> AR8035 25MHz */
  202. MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0xc0000000
  203. MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x80000000
  204. - MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
  205. - MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
  206. - MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
  207. - MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
  208. - MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
  209. + MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
  210. + MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
  211. + MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
  212. + MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
  213. + MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
  214. /* AR8035 CLK_25M --> ENET_REF_CLK (V22) */
  215. MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x0a0b1
  216. /* AR8035 pin strapping: IO voltage: pull up */
  217. - MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
  218. + MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
  219. /* AR8035 pin strapping: PHYADDR#0: pull down */
  220. - MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x130b0
  221. + MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x13030
  222. /* AR8035 pin strapping: PHYADDR#1: pull down */
  223. - MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x130b0
  224. + MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x13030
  225. /* AR8035 pin strapping: MODE#1: pull up */
  226. - MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
  227. + MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
  228. /* AR8035 pin strapping: MODE#3: pull up */
  229. - MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
  230. + MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
  231. /* AR8035 pin strapping: MODE#0: pull down */
  232. - MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x130b0
  233. + MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x13030
  234. /*
  235. * As the RMII pins are also connected to RGMII
  236. diff -Nur linux-3.15-rc1.orig/arch/arm/boot/dts/imx6qdl-microsom.dtsi linux-3.15-rc1/arch/arm/boot/dts/imx6qdl-microsom.dtsi
  237. --- linux-3.15-rc1.orig/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2014-04-13 23:18:35.000000000 +0200
  238. +++ linux-3.15-rc1/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2014-04-25 14:11:13.515375059 +0200
  239. @@ -1,9 +1,69 @@
  240. /*
  241. * Copyright (C) 2013,2014 Russell King
  242. */
  243. +#include <dt-bindings/gpio/gpio.h>
  244. +/ {
  245. + regulators {
  246. + compatible = "simple-bus";
  247. +
  248. + reg_brcm_osc: brcm-osc-reg {
  249. + compatible = "regulator-fixed";
  250. + enable-active-high;
  251. + gpio = <&gpio5 5 0>;
  252. + pinctrl-names = "default";
  253. + pinctrl-0 = <&pinctrl_microsom_brcm_osc_reg>;
  254. + regulator-name = "brcm_osc_reg";
  255. + regulator-min-microvolt = <3300000>;
  256. + regulator-max-microvolt = <3300000>;
  257. + regulator-always-on;
  258. + regulator-boot-on;
  259. + };
  260. +
  261. + reg_brcm: brcm-reg {
  262. + compatible = "regulator-fixed";
  263. + enable-active-high;
  264. + gpio = <&gpio3 19 0>;
  265. + pinctrl-names = "default";
  266. + pinctrl-0 = <&pinctrl_microsom_brcm_reg>;
  267. + regulator-name = "brcm_reg";
  268. + regulator-min-microvolt = <3300000>;
  269. + regulator-max-microvolt = <3300000>;
  270. + startup-delay-us = <200000>;
  271. + };
  272. + };
  273. +};
  274. &iomuxc {
  275. microsom {
  276. + pinctrl_microsom_brcm_osc_reg: microsom-brcm-osc-reg {
  277. + fsl,pins = <
  278. + MX6QDL_PAD_DISP0_DAT11__GPIO5_IO05 0x40013070
  279. + >;
  280. + };
  281. +
  282. + pinctrl_microsom_brcm_reg: microsom-brcm-reg {
  283. + fsl,pins = <
  284. + MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x40013070
  285. + >;
  286. + };
  287. +
  288. + pinctrl_microsom_brcm_wifi: microsom-brcm-wifi {
  289. + fsl,pins = <
  290. + MX6QDL_PAD_GPIO_8__XTALOSC_REF_CLK_32K 0x1b0b0
  291. + MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x40013070
  292. + MX6QDL_PAD_CSI0_DAT8__GPIO5_IO26 0x40013070
  293. + MX6QDL_PAD_CSI0_DAT9__GPIO5_IO27 0x40013070
  294. + >;
  295. + };
  296. +
  297. + pinctrl_microsom_brcm_bt: microsom-brcm-bt {
  298. + fsl,pins = <
  299. + MX6QDL_PAD_CSI0_DAT14__GPIO6_IO00 0x40013070
  300. + MX6QDL_PAD_CSI0_DAT15__GPIO6_IO01 0x40013070
  301. + MX6QDL_PAD_CSI0_DAT18__GPIO6_IO04 0x40013070
  302. + >;
  303. + };
  304. +
  305. pinctrl_microsom_uart1: microsom-uart1 {
  306. fsl,pins = <
  307. MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
  308. @@ -11,6 +71,15 @@
  309. >;
  310. };
  311. + pinctrl_microsom_uart4_1: microsom-uart4 {
  312. + fsl,pins = <
  313. + MX6QDL_PAD_CSI0_DAT12__UART4_TX_DATA 0x1b0b1
  314. + MX6QDL_PAD_CSI0_DAT13__UART4_RX_DATA 0x1b0b1
  315. + MX6QDL_PAD_CSI0_DAT16__UART4_RTS_B 0x1b0b1
  316. + MX6QDL_PAD_CSI0_DAT17__UART4_CTS_B 0x1b0b1
  317. + >;
  318. + };
  319. +
  320. pinctrl_microsom_usbotg: microsom-usbotg {
  321. /*
  322. * Similar to pinctrl_usbotg_2, but we want it
  323. @@ -18,6 +87,17 @@
  324. */
  325. fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
  326. };
  327. +
  328. + pinctrl_microsom_usdhc1: microsom-usdhc1 {
  329. + fsl,pins = <
  330. + MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
  331. + MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
  332. + MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
  333. + MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
  334. + MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
  335. + MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
  336. + >;
  337. + };
  338. };
  339. };
  340. @@ -27,7 +107,25 @@
  341. status = "okay";
  342. };
  343. +/* UART4 - Connected to optional BRCM Wifi/BT/FM */
  344. +&uart4 {
  345. + pinctrl-names = "default";
  346. + pinctrl-0 = <&pinctrl_microsom_brcm_bt &pinctrl_microsom_uart4_1>;
  347. + fsl,uart-has-rtscts;
  348. +};
  349. +
  350. &usbotg {
  351. pinctrl-names = "default";
  352. pinctrl-0 = <&pinctrl_microsom_usbotg>;
  353. };
  354. +
  355. +/* USDHC1 - Connected to optional BRCM Wifi/BT/FM */
  356. +&usdhc1 {
  357. + card-external-vcc-supply = <&reg_brcm>;
  358. + card-reset-gpios = <&gpio5 26 GPIO_ACTIVE_LOW>, <&gpio6 0 GPIO_ACTIVE_LOW>;
  359. + keep-power-in-suspend;
  360. + non-removable;
  361. + pinctrl-names = "default";
  362. + pinctrl-0 = <&pinctrl_microsom_brcm_wifi &pinctrl_microsom_usdhc1>;
  363. + vmmc-supply = <&reg_brcm>;
  364. +};
  365. diff -Nur linux-3.15-rc1.orig/arch/arm/boot/dts/imx6sl.dtsi linux-3.15-rc1/arch/arm/boot/dts/imx6sl.dtsi
  366. --- linux-3.15-rc1.orig/arch/arm/boot/dts/imx6sl.dtsi 2014-04-13 23:18:35.000000000 +0200
  367. +++ linux-3.15-rc1/arch/arm/boot/dts/imx6sl.dtsi 2014-04-25 14:11:13.515375059 +0200
  368. @@ -111,6 +111,8 @@
  369. cache-level = <2>;
  370. arm,tag-latency = <4 2 3>;
  371. arm,data-latency = <4 2 3>;
  372. + arm,dynamic-clk-gating;
  373. + arm,standby-mode;
  374. };
  375. pmu {
  376. diff -Nur linux-3.15-rc1.orig/arch/arm/boot/dts/marco.dtsi linux-3.15-rc1/arch/arm/boot/dts/marco.dtsi
  377. --- linux-3.15-rc1.orig/arch/arm/boot/dts/marco.dtsi 2014-04-13 23:18:35.000000000 +0200
  378. +++ linux-3.15-rc1/arch/arm/boot/dts/marco.dtsi 2014-04-25 14:11:13.515375059 +0200
  379. @@ -36,7 +36,7 @@
  380. ranges = <0x40000000 0x40000000 0xa0000000>;
  381. l2-cache-controller@c0030000 {
  382. - compatible = "sirf,marco-pl310-cache", "arm,pl310-cache";
  383. + compatible = "arm,pl310-cache";
  384. reg = <0xc0030000 0x1000>;
  385. interrupts = <0 59 0>;
  386. arm,tag-latency = <1 1 1>;
  387. diff -Nur linux-3.15-rc1.orig/arch/arm/boot/dts/prima2.dtsi linux-3.15-rc1/arch/arm/boot/dts/prima2.dtsi
  388. --- linux-3.15-rc1.orig/arch/arm/boot/dts/prima2.dtsi 2014-04-13 23:18:35.000000000 +0200
  389. +++ linux-3.15-rc1/arch/arm/boot/dts/prima2.dtsi 2014-04-25 14:11:13.515375059 +0200
  390. @@ -48,7 +48,7 @@
  391. ranges = <0x40000000 0x40000000 0x80000000>;
  392. l2-cache-controller@80040000 {
  393. - compatible = "arm,pl310-cache", "sirf,prima2-pl310-cache";
  394. + compatible = "arm,pl310-cache";
  395. reg = <0x80040000 0x1000>;
  396. interrupts = <59>;
  397. arm,tag-latency = <1 1 1>;
  398. diff -Nur linux-3.15-rc1.orig/arch/arm/configs/imx_v6_v7_defconfig linux-3.15-rc1/arch/arm/configs/imx_v6_v7_defconfig
  399. --- linux-3.15-rc1.orig/arch/arm/configs/imx_v6_v7_defconfig 2014-04-13 23:18:35.000000000 +0200
  400. +++ linux-3.15-rc1/arch/arm/configs/imx_v6_v7_defconfig 2014-04-25 14:11:13.515375059 +0200
  401. @@ -245,6 +245,7 @@
  402. CONFIG_DRM_IMX_LDB=y
  403. CONFIG_DRM_IMX_IPUV3_CORE=y
  404. CONFIG_DRM_IMX_IPUV3=y
  405. +CONFIG_DRM_IMX_HDMI=y
  406. CONFIG_COMMON_CLK_DEBUG=y
  407. # CONFIG_IOMMU_SUPPORT is not set
  408. CONFIG_PWM=y
  409. diff -Nur linux-3.15-rc1.orig/arch/arm/include/asm/hardware/cache-l2x0.h linux-3.15-rc1/arch/arm/include/asm/hardware/cache-l2x0.h
  410. --- linux-3.15-rc1.orig/arch/arm/include/asm/hardware/cache-l2x0.h 2014-04-13 23:18:35.000000000 +0200
  411. +++ linux-3.15-rc1/arch/arm/include/asm/hardware/cache-l2x0.h 2014-04-25 14:11:13.515375059 +0200
  412. @@ -26,8 +26,8 @@
  413. #define L2X0_CACHE_TYPE 0x004
  414. #define L2X0_CTRL 0x100
  415. #define L2X0_AUX_CTRL 0x104
  416. -#define L2X0_TAG_LATENCY_CTRL 0x108
  417. -#define L2X0_DATA_LATENCY_CTRL 0x10C
  418. +#define L310_TAG_LATENCY_CTRL 0x108
  419. +#define L310_DATA_LATENCY_CTRL 0x10C
  420. #define L2X0_EVENT_CNT_CTRL 0x200
  421. #define L2X0_EVENT_CNT1_CFG 0x204
  422. #define L2X0_EVENT_CNT0_CFG 0x208
  423. @@ -54,53 +54,93 @@
  424. #define L2X0_LOCKDOWN_WAY_D_BASE 0x900
  425. #define L2X0_LOCKDOWN_WAY_I_BASE 0x904
  426. #define L2X0_LOCKDOWN_STRIDE 0x08
  427. -#define L2X0_ADDR_FILTER_START 0xC00
  428. -#define L2X0_ADDR_FILTER_END 0xC04
  429. +#define L310_ADDR_FILTER_START 0xC00
  430. +#define L310_ADDR_FILTER_END 0xC04
  431. #define L2X0_TEST_OPERATION 0xF00
  432. #define L2X0_LINE_DATA 0xF10
  433. #define L2X0_LINE_TAG 0xF30
  434. #define L2X0_DEBUG_CTRL 0xF40
  435. -#define L2X0_PREFETCH_CTRL 0xF60
  436. -#define L2X0_POWER_CTRL 0xF80
  437. -#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1)
  438. -#define L2X0_STNDBY_MODE_EN (1 << 0)
  439. +#define L310_PREFETCH_CTRL 0xF60
  440. +#define L310_POWER_CTRL 0xF80
  441. +#define L310_DYNAMIC_CLK_GATING_EN (1 << 1)
  442. +#define L310_STNDBY_MODE_EN (1 << 0)
  443. /* Registers shifts and masks */
  444. #define L2X0_CACHE_ID_PART_MASK (0xf << 6)
  445. #define L2X0_CACHE_ID_PART_L210 (1 << 6)
  446. +#define L2X0_CACHE_ID_PART_L220 (2 << 6)
  447. #define L2X0_CACHE_ID_PART_L310 (3 << 6)
  448. #define L2X0_CACHE_ID_RTL_MASK 0x3f
  449. -#define L2X0_CACHE_ID_RTL_R0P0 0x0
  450. -#define L2X0_CACHE_ID_RTL_R1P0 0x2
  451. -#define L2X0_CACHE_ID_RTL_R2P0 0x4
  452. -#define L2X0_CACHE_ID_RTL_R3P0 0x5
  453. -#define L2X0_CACHE_ID_RTL_R3P1 0x6
  454. -#define L2X0_CACHE_ID_RTL_R3P2 0x8
  455. -
  456. -#define L2X0_AUX_CTRL_MASK 0xc0000fff
  457. +#define L210_CACHE_ID_RTL_R0P2_02 0x00
  458. +#define L210_CACHE_ID_RTL_R0P1 0x01
  459. +#define L210_CACHE_ID_RTL_R0P2_01 0x02
  460. +#define L210_CACHE_ID_RTL_R0P3 0x03
  461. +#define L210_CACHE_ID_RTL_R0P4 0x0b
  462. +#define L210_CACHE_ID_RTL_R0P5 0x0f
  463. +#define L220_CACHE_ID_RTL_R1P7_01REL0 0x06
  464. +#define L310_CACHE_ID_RTL_R0P0 0x00
  465. +#define L310_CACHE_ID_RTL_R1P0 0x02
  466. +#define L310_CACHE_ID_RTL_R2P0 0x04
  467. +#define L310_CACHE_ID_RTL_R3P0 0x05
  468. +#define L310_CACHE_ID_RTL_R3P1 0x06
  469. +#define L310_CACHE_ID_RTL_R3P1_50REL0 0x07
  470. +#define L310_CACHE_ID_RTL_R3P2 0x08
  471. +#define L310_CACHE_ID_RTL_R3P3 0x09
  472. +
  473. +/* L2C auxiliary control register - bits common to L2C-210/220/310 */
  474. +#define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17
  475. +#define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17)
  476. +#define L2C_AUX_CTRL_WAY_SIZE(n) ((n) << 17)
  477. +#define L2C_AUX_CTRL_EVTMON_ENABLE BIT(20)
  478. +#define L2C_AUX_CTRL_PARITY_ENABLE BIT(21)
  479. +#define L2C_AUX_CTRL_SHARED_OVERRIDE BIT(22)
  480. +/* L2C-210/220 common bits */
  481. #define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
  482. -#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7
  483. +#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK (7 << 0)
  484. #define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
  485. -#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3)
  486. +#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (7 << 3)
  487. #define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
  488. -#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6)
  489. +#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (7 << 6)
  490. #define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
  491. -#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9)
  492. -#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
  493. -#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
  494. -#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
  495. -#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
  496. -#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
  497. -#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
  498. -#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28
  499. -#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
  500. -#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
  501. -
  502. -#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0
  503. -#define L2X0_LATENCY_CTRL_RD_SHIFT 4
  504. -#define L2X0_LATENCY_CTRL_WR_SHIFT 8
  505. -
  506. -#define L2X0_ADDR_FILTER_EN 1
  507. +#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (7 << 9)
  508. +#define L2X0_AUX_CTRL_ASSOC_SHIFT 13
  509. +#define L2X0_AUX_CTRL_ASSOC_MASK (15 << 13)
  510. +/* L2C-210 specific bits */
  511. +#define L210_AUX_CTRL_WRAP_DISABLE BIT(12)
  512. +#define L210_AUX_CTRL_WA_OVERRIDE BIT(23)
  513. +#define L210_AUX_CTRL_EXCLUSIVE_ABORT BIT(24)
  514. +/* L2C-220 specific bits */
  515. +#define L220_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
  516. +#define L220_AUX_CTRL_FWA_SHIFT 23
  517. +#define L220_AUX_CTRL_FWA_MASK (3 << 23)
  518. +#define L220_AUX_CTRL_NS_LOCKDOWN BIT(26)
  519. +#define L220_AUX_CTRL_NS_INT_CTRL BIT(27)
  520. +/* L2C-310 specific bits */
  521. +#define L310_AUX_CTRL_FULL_LINE_ZERO BIT(0) /* R2P0+ */
  522. +#define L310_AUX_CTRL_HIGHPRIO_SO_DEV BIT(10) /* R2P0+ */
  523. +#define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */
  524. +#define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
  525. +#define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16)
  526. +#define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */
  527. +#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26)
  528. +#define L310_AUX_CTRL_NS_INT_CTRL BIT(27)
  529. +#define L310_AUX_CTRL_DATA_PREFETCH BIT(28)
  530. +#define L310_AUX_CTRL_INSTR_PREFETCH BIT(29)
  531. +#define L310_AUX_CTRL_EARLY_BRESP BIT(30) /* R2P0+ */
  532. +
  533. +#define L310_LATENCY_CTRL_SETUP(n) ((n) << 0)
  534. +#define L310_LATENCY_CTRL_RD(n) ((n) << 4)
  535. +#define L310_LATENCY_CTRL_WR(n) ((n) << 8)
  536. +
  537. +#define L310_ADDR_FILTER_EN 1
  538. +
  539. +#define L310_PREFETCH_CTRL_OFFSET_MASK 0x1f
  540. +#define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR BIT(23)
  541. +#define L310_PREFETCH_CTRL_PREFETCH_DROP BIT(24)
  542. +#define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP BIT(27)
  543. +#define L310_PREFETCH_CTRL_DATA_PREFETCH BIT(28)
  544. +#define L310_PREFETCH_CTRL_INSTR_PREFETCH BIT(29)
  545. +#define L310_PREFETCH_CTRL_DBL_LINEFILL BIT(30)
  546. #define L2X0_CTRL_EN 1
  547. diff -Nur linux-3.15-rc1.orig/arch/arm/include/asm/outercache.h linux-3.15-rc1/arch/arm/include/asm/outercache.h
  548. --- linux-3.15-rc1.orig/arch/arm/include/asm/outercache.h 2014-04-13 23:18:35.000000000 +0200
  549. +++ linux-3.15-rc1/arch/arm/include/asm/outercache.h 2014-04-25 14:11:13.515375059 +0200
  550. @@ -21,6 +21,7 @@
  551. #ifndef __ASM_OUTERCACHE_H
  552. #define __ASM_OUTERCACHE_H
  553. +#include <linux/bug.h>
  554. #include <linux/types.h>
  555. struct outer_cache_fns {
  556. @@ -28,53 +29,84 @@
  557. void (*clean_range)(unsigned long, unsigned long);
  558. void (*flush_range)(unsigned long, unsigned long);
  559. void (*flush_all)(void);
  560. - void (*inv_all)(void);
  561. void (*disable)(void);
  562. #ifdef CONFIG_OUTER_CACHE_SYNC
  563. void (*sync)(void);
  564. #endif
  565. - void (*set_debug)(unsigned long);
  566. void (*resume)(void);
  567. +
  568. + /* This is an ARM L2C thing */
  569. + void (*write_sec)(unsigned long, unsigned);
  570. };
  571. extern struct outer_cache_fns outer_cache;
  572. #ifdef CONFIG_OUTER_CACHE
  573. -
  574. +/**
  575. + * outer_inv_range - invalidate range of outer cache lines
  576. + * @start: starting physical address, inclusive
  577. + * @end: end physical address, exclusive
  578. + */
  579. static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
  580. {
  581. if (outer_cache.inv_range)
  582. outer_cache.inv_range(start, end);
  583. }
  584. +
  585. +/**
  586. + * outer_clean_range - clean dirty outer cache lines
  587. + * @start: starting physical address, inclusive
  588. + * @end: end physical address, exclusive
  589. + */
  590. static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
  591. {
  592. if (outer_cache.clean_range)
  593. outer_cache.clean_range(start, end);
  594. }
  595. +
  596. +/**
  597. + * outer_flush_range - clean and invalidate outer cache lines
  598. + * @start: starting physical address, inclusive
  599. + * @end: end physical address, exclusive
  600. + */
  601. static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
  602. {
  603. if (outer_cache.flush_range)
  604. outer_cache.flush_range(start, end);
  605. }
  606. +/**
  607. + * outer_flush_all - clean and invalidate all cache lines in the outer cache
  608. + *
  609. + * Note: depending on implementation, this may not be atomic - it must
  610. + * only be called with interrupts disabled and no other active outer
  611. + * cache masters.
  612. + *
  613. + * It is intended that this function is only used by implementations
  614. + * needing to override the outer_cache.disable() method due to security.
  615. + * (Some implementations perform this as a clean followed by an invalidate.)
  616. + */
  617. static inline void outer_flush_all(void)
  618. {
  619. if (outer_cache.flush_all)
  620. outer_cache.flush_all();
  621. }
  622. -static inline void outer_inv_all(void)
  623. -{
  624. - if (outer_cache.inv_all)
  625. - outer_cache.inv_all();
  626. -}
  627. -
  628. -static inline void outer_disable(void)
  629. -{
  630. - if (outer_cache.disable)
  631. - outer_cache.disable();
  632. -}
  633. -
  634. +/**
  635. + * outer_disable - clean, invalidate and disable the outer cache
  636. + *
  637. + * Disable the outer cache, ensuring that any data contained in the outer
  638. + * cache is pushed out to lower levels of system memory. The note and
  639. + * conditions above concerning outer_flush_all() applies here.
  640. + */
  641. +extern void outer_disable(void);
  642. +
  643. +/**
  644. + * outer_resume - restore the cache configuration and re-enable outer cache
  645. + *
  646. + * Restore any configuration that the cache had when previously enabled,
  647. + * and re-enable the outer cache.
  648. + */
  649. static inline void outer_resume(void)
  650. {
  651. if (outer_cache.resume)
  652. @@ -90,13 +122,18 @@
  653. static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
  654. { }
  655. static inline void outer_flush_all(void) { }
  656. -static inline void outer_inv_all(void) { }
  657. static inline void outer_disable(void) { }
  658. static inline void outer_resume(void) { }
  659. #endif
  660. #ifdef CONFIG_OUTER_CACHE_SYNC
  661. +/**
  662. + * outer_sync - perform a sync point for outer cache
  663. + *
  664. + * Ensure that all outer cache operations are complete and any store
  665. + * buffers are drained.
  666. + */
  667. static inline void outer_sync(void)
  668. {
  669. if (outer_cache.sync)
  670. diff -Nur linux-3.15-rc1.orig/arch/arm/Kconfig linux-3.15-rc1/arch/arm/Kconfig
  671. --- linux-3.15-rc1.orig/arch/arm/Kconfig 2014-04-13 23:18:35.000000000 +0200
  672. +++ linux-3.15-rc1/arch/arm/Kconfig 2014-04-25 14:11:13.515375059 +0200
  673. @@ -1229,19 +1229,6 @@
  674. register of the Cortex-A9 which reduces the linefill issuing
  675. capabilities of the processor.
  676. -config PL310_ERRATA_588369
  677. - bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
  678. - depends on CACHE_L2X0
  679. - help
  680. - The PL310 L2 cache controller implements three types of Clean &
  681. - Invalidate maintenance operations: by Physical Address
  682. - (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC).
  683. - They are architecturally defined to behave as the execution of a
  684. - clean operation followed immediately by an invalidate operation,
  685. - both performing to the same memory location. This functionality
  686. - is not correctly implemented in PL310 as clean lines are not
  687. - invalidated as a result of these operations.
  688. -
  689. config ARM_ERRATA_643719
  690. bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
  691. depends on CPU_V7 && SMP
  692. @@ -1264,17 +1251,6 @@
  693. tables. The workaround changes the TLB flushing routines to invalidate
  694. entries regardless of the ASID.
  695. -config PL310_ERRATA_727915
  696. - bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
  697. - depends on CACHE_L2X0
  698. - help
  699. - PL310 implements the Clean & Invalidate by Way L2 cache maintenance
  700. - operation (offset 0x7FC). This operation runs in background so that
  701. - PL310 can handle normal accesses while it is in progress. Under very
  702. - rare circumstances, due to this erratum, write data can be lost when
  703. - PL310 treats a cacheable write transaction during a Clean &
  704. - Invalidate by Way operation.
  705. -
  706. config ARM_ERRATA_743622
  707. bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
  708. depends on CPU_V7
  709. @@ -1300,21 +1276,6 @@
  710. operation is received by a CPU before the ICIALLUIS has completed,
  711. potentially leading to corrupted entries in the cache or TLB.
  712. -config PL310_ERRATA_753970
  713. - bool "PL310 errata: cache sync operation may be faulty"
  714. - depends on CACHE_PL310
  715. - help
  716. - This option enables the workaround for the 753970 PL310 (r3p0) erratum.
  717. -
  718. - Under some condition the effect of cache sync operation on
  719. - the store buffer still remains when the operation completes.
  720. - This means that the store buffer is always asked to drain and
  721. - this prevents it from merging any further writes. The workaround
  722. - is to replace the normal offset of cache sync operation (0x730)
  723. - by another offset targeting an unmapped PL310 register 0x740.
  724. - This has the same effect as the cache sync operation: store buffer
  725. - drain and waiting for all buffers empty.
  726. -
  727. config ARM_ERRATA_754322
  728. bool "ARM errata: possible faulty MMU translations following an ASID switch"
  729. depends on CPU_V7
  730. @@ -1363,18 +1324,6 @@
  731. relevant cache maintenance functions and sets a specific bit
  732. in the diagnostic control register of the SCU.
  733. -config PL310_ERRATA_769419
  734. - bool "PL310 errata: no automatic Store Buffer drain"
  735. - depends on CACHE_L2X0
  736. - help
  737. - On revisions of the PL310 prior to r3p2, the Store Buffer does
  738. - not automatically drain. This can cause normal, non-cacheable
  739. - writes to be retained when the memory system is idle, leading
  740. - to suboptimal I/O performance for drivers using coherent DMA.
  741. - This option adds a write barrier to the cpu_idle loop so that,
  742. - on systems with an outer cache, the store buffer is drained
  743. - explicitly.
  744. -
  745. config ARM_ERRATA_775420
  746. bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
  747. depends on CPU_V7
  748. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-berlin/berlin.c linux-3.15-rc1/arch/arm/mach-berlin/berlin.c
  749. --- linux-3.15-rc1.orig/arch/arm/mach-berlin/berlin.c 2014-04-13 23:18:35.000000000 +0200
  750. +++ linux-3.15-rc1/arch/arm/mach-berlin/berlin.c 2014-04-25 14:11:13.515375059 +0200
  751. @@ -24,7 +24,7 @@
  752. * with DT probing for L2CCs, berlin_init_machine can be removed.
  753. * Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc
  754. */
  755. - l2x0_of_init(0x70c00000, 0xfeffffff);
  756. + l2x0_of_init(0x30c00000, 0xfeffffff);
  757. of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
  758. }
  759. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-cns3xxx/core.c linux-3.15-rc1/arch/arm/mach-cns3xxx/core.c
  760. --- linux-3.15-rc1.orig/arch/arm/mach-cns3xxx/core.c 2014-04-13 23:18:35.000000000 +0200
  761. +++ linux-3.15-rc1/arch/arm/mach-cns3xxx/core.c 2014-04-25 14:11:13.515375059 +0200
  762. @@ -272,9 +272,9 @@
  763. *
  764. * 1 cycle of latency for setup, read and write accesses
  765. */
  766. - val = readl(base + L2X0_TAG_LATENCY_CTRL);
  767. + val = readl(base + L310_TAG_LATENCY_CTRL);
  768. val &= 0xfffff888;
  769. - writel(val, base + L2X0_TAG_LATENCY_CTRL);
  770. + writel(val, base + L310_TAG_LATENCY_CTRL);
  771. /*
  772. * Data RAM Control register
  773. @@ -285,12 +285,12 @@
  774. *
  775. * 1 cycle of latency for setup, read and write accesses
  776. */
  777. - val = readl(base + L2X0_DATA_LATENCY_CTRL);
  778. + val = readl(base + L310_DATA_LATENCY_CTRL);
  779. val &= 0xfffff888;
  780. - writel(val, base + L2X0_DATA_LATENCY_CTRL);
  781. + writel(val, base + L310_DATA_LATENCY_CTRL);
  782. /* 32 KiB, 8-way, parity disable */
  783. - l2x0_init(base, 0x00540000, 0xfe000fff);
  784. + l2x0_init(base, 0x00500000, 0xfe0f0fff);
  785. }
  786. #endif /* CONFIG_CACHE_L2X0 */
  787. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-exynos/common.h linux-3.15-rc1/arch/arm/mach-exynos/common.h
  788. --- linux-3.15-rc1.orig/arch/arm/mach-exynos/common.h 2014-04-13 23:18:35.000000000 +0200
  789. +++ linux-3.15-rc1/arch/arm/mach-exynos/common.h 2014-04-25 14:11:13.515375059 +0200
  790. @@ -55,7 +55,6 @@
  791. NUM_SYS_POWERDOWN,
  792. };
  793. -extern unsigned long l2x0_regs_phys;
  794. struct exynos_pmu_conf {
  795. void __iomem *reg;
  796. unsigned int val[NUM_SYS_POWERDOWN];
  797. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-exynos/exynos.c linux-3.15-rc1/arch/arm/mach-exynos/exynos.c
  798. --- linux-3.15-rc1.orig/arch/arm/mach-exynos/exynos.c 2014-04-13 23:18:35.000000000 +0200
  799. +++ linux-3.15-rc1/arch/arm/mach-exynos/exynos.c 2014-04-25 14:11:13.515375059 +0200
  800. @@ -32,9 +32,6 @@
  801. #include "mfc.h"
  802. #include "regs-pmu.h"
  803. -#define L2_AUX_VAL 0x7C470001
  804. -#define L2_AUX_MASK 0xC200ffff
  805. -
  806. static struct map_desc exynos4_iodesc[] __initdata = {
  807. {
  808. .virtual = (unsigned long)S3C_VA_SYS,
  809. @@ -321,17 +318,7 @@
  810. static int __init exynos4_l2x0_cache_init(void)
  811. {
  812. - int ret;
  813. -
  814. - ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
  815. - if (ret)
  816. - return ret;
  817. -
  818. - if (IS_ENABLED(CONFIG_S5P_SLEEP)) {
  819. - l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
  820. - clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
  821. - }
  822. - return 0;
  823. + return l2x0_of_init(0x3c400001, 0xc20fffff);
  824. }
  825. early_initcall(exynos4_l2x0_cache_init);
  826. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-exynos/sleep.S linux-3.15-rc1/arch/arm/mach-exynos/sleep.S
  827. --- linux-3.15-rc1.orig/arch/arm/mach-exynos/sleep.S 2014-04-13 23:18:35.000000000 +0200
  828. +++ linux-3.15-rc1/arch/arm/mach-exynos/sleep.S 2014-04-25 14:11:13.519375076 +0200
  829. @@ -16,8 +16,6 @@
  830. */
  831. #include <linux/linkage.h>
  832. -#include <asm/asm-offsets.h>
  833. -#include <asm/hardware/cache-l2x0.h>
  834. #define CPU_MASK 0xff0ffff0
  835. #define CPU_CORTEX_A9 0x410fc090
  836. @@ -53,33 +51,7 @@
  837. and r0, r0, r1
  838. ldr r1, =CPU_CORTEX_A9
  839. cmp r0, r1
  840. - bne skip_l2_resume
  841. - adr r0, l2x0_regs_phys
  842. - ldr r0, [r0]
  843. - cmp r0, #0
  844. - beq skip_l2_resume
  845. - ldr r1, [r0, #L2X0_R_PHY_BASE]
  846. - ldr r2, [r1, #L2X0_CTRL]
  847. - tst r2, #0x1
  848. - bne skip_l2_resume
  849. - ldr r2, [r0, #L2X0_R_AUX_CTRL]
  850. - str r2, [r1, #L2X0_AUX_CTRL]
  851. - ldr r2, [r0, #L2X0_R_TAG_LATENCY]
  852. - str r2, [r1, #L2X0_TAG_LATENCY_CTRL]
  853. - ldr r2, [r0, #L2X0_R_DATA_LATENCY]
  854. - str r2, [r1, #L2X0_DATA_LATENCY_CTRL]
  855. - ldr r2, [r0, #L2X0_R_PREFETCH_CTRL]
  856. - str r2, [r1, #L2X0_PREFETCH_CTRL]
  857. - ldr r2, [r0, #L2X0_R_PWR_CTRL]
  858. - str r2, [r1, #L2X0_POWER_CTRL]
  859. - mov r2, #1
  860. - str r2, [r1, #L2X0_CTRL]
  861. -skip_l2_resume:
  862. + bleq l2c310_early_resume
  863. #endif
  864. b cpu_resume
  865. ENDPROC(exynos_cpu_resume)
  866. -#ifdef CONFIG_CACHE_L2X0
  867. - .globl l2x0_regs_phys
  868. -l2x0_regs_phys:
  869. - .long 0
  870. -#endif
  871. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-highbank/highbank.c linux-3.15-rc1/arch/arm/mach-highbank/highbank.c
  872. --- linux-3.15-rc1.orig/arch/arm/mach-highbank/highbank.c 2014-04-13 23:18:35.000000000 +0200
  873. +++ linux-3.15-rc1/arch/arm/mach-highbank/highbank.c 2014-04-25 14:11:13.519375076 +0200
  874. @@ -51,11 +51,13 @@
  875. }
  876. -static void highbank_l2x0_disable(void)
  877. +static void highbank_l2c310_write_sec(unsigned long val, unsigned reg)
  878. {
  879. - outer_flush_all();
  880. - /* Disable PL310 L2 Cache controller */
  881. - highbank_smc1(0x102, 0x0);
  882. + if (reg == L2X0_CTRL)
  883. + highbank_smc1(0x102, val);
  884. + else
  885. + WARN_ONCE(1, "Highbank L2C310: ignoring write to reg 0x%x\n",
  886. + reg);
  887. }
  888. static void __init highbank_init_irq(void)
  889. @@ -66,11 +68,9 @@
  890. highbank_scu_map_io();
  891. /* Enable PL310 L2 Cache controller */
  892. - if (IS_ENABLED(CONFIG_CACHE_L2X0) &&
  893. - of_find_compatible_node(NULL, NULL, "arm,pl310-cache")) {
  894. - highbank_smc1(0x102, 0x1);
  895. - l2x0_of_init(0, ~0UL);
  896. - outer_cache.disable = highbank_l2x0_disable;
  897. + if (IS_ENABLED(CONFIG_CACHE_L2X0)) {
  898. + outer_cache.write_sec = highbank_l2c310_write_sec;
  899. + l2x0_of_init(0, ~0);
  900. }
  901. }
  902. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-imx/clk-imx6q.c linux-3.15-rc1/arch/arm/mach-imx/clk-imx6q.c
  903. --- linux-3.15-rc1.orig/arch/arm/mach-imx/clk-imx6q.c 2014-04-13 23:18:35.000000000 +0200
  904. +++ linux-3.15-rc1/arch/arm/mach-imx/clk-imx6q.c 2014-04-25 14:11:13.519375076 +0200
  905. @@ -258,14 +258,14 @@
  906. clk[ipu2_sel] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
  907. clk[ldb_di0_sel] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
  908. clk[ldb_di1_sel] = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
  909. - clk[ipu1_di0_pre_sel] = imx_clk_mux("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
  910. - clk[ipu1_di1_pre_sel] = imx_clk_mux("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
  911. - clk[ipu2_di0_pre_sel] = imx_clk_mux("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
  912. - clk[ipu2_di1_pre_sel] = imx_clk_mux("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
  913. - clk[ipu1_di0_sel] = imx_clk_mux("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels));
  914. - clk[ipu1_di1_sel] = imx_clk_mux("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels));
  915. - clk[ipu2_di0_sel] = imx_clk_mux("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels));
  916. - clk[ipu2_di1_sel] = imx_clk_mux("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels));
  917. + clk[ipu1_di0_pre_sel] = imx_clk_mux_flags("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
  918. + clk[ipu1_di1_pre_sel] = imx_clk_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
  919. + clk[ipu2_di0_pre_sel] = imx_clk_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
  920. + clk[ipu2_di1_pre_sel] = imx_clk_mux_flags("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
  921. + clk[ipu1_di0_sel] = imx_clk_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels), CLK_SET_RATE_PARENT);
  922. + clk[ipu1_di1_sel] = imx_clk_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels), CLK_SET_RATE_PARENT);
  923. + clk[ipu2_di0_sel] = imx_clk_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels), CLK_SET_RATE_PARENT);
  924. + clk[ipu2_di1_sel] = imx_clk_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels), CLK_SET_RATE_PARENT);
  925. clk[hsi_tx_sel] = imx_clk_mux("hsi_tx_sel", base + 0x30, 28, 1, hsi_tx_sels, ARRAY_SIZE(hsi_tx_sels));
  926. clk[pcie_axi_sel] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels));
  927. clk[ssi1_sel] = imx_clk_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
  928. @@ -445,6 +445,19 @@
  929. clk_set_parent(clk[ldb_di1_sel], clk[pll5_video_div]);
  930. }
  931. + if (cpu_is_imx6dl()) {
  932. + clk_set_parent(clk[ipu1_sel], clk[pll3_pfd1_540m]);
  933. + }
  934. +
  935. + clk_set_parent(clk[ipu1_di0_pre_sel], clk[pll5_video_div]);
  936. + clk_set_parent(clk[ipu1_di1_pre_sel], clk[pll5_video_div]);
  937. + clk_set_parent(clk[ipu2_di0_pre_sel], clk[pll5_video_div]);
  938. + clk_set_parent(clk[ipu2_di1_pre_sel], clk[pll5_video_div]);
  939. + clk_set_parent(clk[ipu1_di0_sel], clk[ipu1_di0_pre]);
  940. + clk_set_parent(clk[ipu1_di1_sel], clk[ipu1_di1_pre]);
  941. + clk_set_parent(clk[ipu2_di0_sel], clk[ipu2_di0_pre]);
  942. + clk_set_parent(clk[ipu2_di1_sel], clk[ipu2_di1_pre]);
  943. +
  944. /*
  945. * The gpmi needs 100MHz frequency in the EDO/Sync mode,
  946. * We can not get the 100MHz from the pll2_pfd0_352m.
  947. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-imx/clk-pllv3.c linux-3.15-rc1/arch/arm/mach-imx/clk-pllv3.c
  948. --- linux-3.15-rc1.orig/arch/arm/mach-imx/clk-pllv3.c 2014-04-13 23:18:35.000000000 +0200
  949. +++ linux-3.15-rc1/arch/arm/mach-imx/clk-pllv3.c 2014-04-25 14:11:13.519375076 +0200
  950. @@ -273,9 +273,10 @@
  951. struct clk_pllv3 *pll = to_clk_pllv3(hw);
  952. unsigned long min_rate = parent_rate * 27;
  953. unsigned long max_rate = parent_rate * 54;
  954. - u32 val, div;
  955. + u32 val, newval, div;
  956. u32 mfn, mfd = 1000000;
  957. s64 temp64;
  958. + int ret;
  959. if (rate < min_rate || rate > max_rate)
  960. return -EINVAL;
  961. @@ -287,13 +288,27 @@
  962. mfn = temp64;
  963. val = readl_relaxed(pll->base);
  964. - val &= ~pll->div_mask;
  965. - val |= div;
  966. - writel_relaxed(val, pll->base);
  967. +
  968. + /* set the PLL into bypass mode */
  969. + newval = val | BM_PLL_BYPASS;
  970. + writel_relaxed(newval, pll->base);
  971. +
  972. + /* configure the new frequency */
  973. + newval &= ~pll->div_mask;
  974. + newval |= div;
  975. + writel_relaxed(newval, pll->base);
  976. writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
  977. - writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
  978. + writel(mfd, pll->base + PLL_DENOM_OFFSET);
  979. +
  980. + ret = clk_pllv3_wait_lock(pll);
  981. + if (ret == 0 && val & BM_PLL_POWER) {
  982. + /* only if it locked can we switch back to the PLL */
  983. + newval &= ~BM_PLL_BYPASS;
  984. + newval |= val & BM_PLL_BYPASS;
  985. + writel(newval, pll->base);
  986. + }
  987. - return clk_pllv3_wait_lock(pll);
  988. + return ret;
  989. }
  990. static const struct clk_ops clk_pllv3_av_ops = {
  991. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-imx/mach-vf610.c linux-3.15-rc1/arch/arm/mach-imx/mach-vf610.c
  992. --- linux-3.15-rc1.orig/arch/arm/mach-imx/mach-vf610.c 2014-04-13 23:18:35.000000000 +0200
  993. +++ linux-3.15-rc1/arch/arm/mach-imx/mach-vf610.c 2014-04-25 14:11:13.519375076 +0200
  994. @@ -22,7 +22,7 @@
  995. static void __init vf610_init_irq(void)
  996. {
  997. - l2x0_of_init(0, ~0UL);
  998. + l2x0_of_init(0, ~0);
  999. irqchip_init();
  1000. }
  1001. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-imx/suspend-imx6.S linux-3.15-rc1/arch/arm/mach-imx/suspend-imx6.S
  1002. --- linux-3.15-rc1.orig/arch/arm/mach-imx/suspend-imx6.S 2014-04-13 23:18:35.000000000 +0200
  1003. +++ linux-3.15-rc1/arch/arm/mach-imx/suspend-imx6.S 2014-04-25 14:11:13.519375076 +0200
  1004. @@ -334,28 +334,10 @@
  1005. * turned into relative ones.
  1006. */
  1007. -#ifdef CONFIG_CACHE_L2X0
  1008. - .macro pl310_resume
  1009. - adr r0, l2x0_saved_regs_offset
  1010. - ldr r2, [r0]
  1011. - add r2, r2, r0
  1012. - ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0
  1013. - ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value
  1014. - str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl
  1015. - mov r1, #0x1
  1016. - str r1, [r0, #L2X0_CTRL] @ re-enable L2
  1017. - .endm
  1018. -
  1019. -l2x0_saved_regs_offset:
  1020. - .word l2x0_saved_regs - .
  1021. -
  1022. -#else
  1023. - .macro pl310_resume
  1024. - .endm
  1025. -#endif
  1026. -
  1027. ENTRY(v7_cpu_resume)
  1028. bl v7_invalidate_l1
  1029. - pl310_resume
  1030. +#ifdef CONFIG_CACHE_L2X0
  1031. + bl l2c310_early_resume
  1032. +#endif
  1033. b cpu_resume
  1034. ENDPROC(v7_cpu_resume)
  1035. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-imx/system.c linux-3.15-rc1/arch/arm/mach-imx/system.c
  1036. --- linux-3.15-rc1.orig/arch/arm/mach-imx/system.c 2014-04-13 23:18:35.000000000 +0200
  1037. +++ linux-3.15-rc1/arch/arm/mach-imx/system.c 2014-04-25 14:11:13.523375094 +0200
  1038. @@ -124,7 +124,7 @@
  1039. }
  1040. /* Configure the L2 PREFETCH and POWER registers */
  1041. - val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
  1042. + val = readl_relaxed(l2x0_base + L310_PREFETCH_CTRL);
  1043. val |= 0x70800000;
  1044. /*
  1045. * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0
  1046. @@ -137,14 +137,12 @@
  1047. */
  1048. if (cpu_is_imx6q())
  1049. val &= ~(1 << 30 | 1 << 23);
  1050. - writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL);
  1051. - val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN;
  1052. - writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL);
  1053. + writel_relaxed(val, l2x0_base + L310_PREFETCH_CTRL);
  1054. iounmap(l2x0_base);
  1055. of_node_put(np);
  1056. out:
  1057. - l2x0_of_init(0, ~0UL);
  1058. + l2x0_of_init(0, ~0);
  1059. }
  1060. #endif
  1061. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-mvebu/board-v7.c linux-3.15-rc1/arch/arm/mach-mvebu/board-v7.c
  1062. --- linux-3.15-rc1.orig/arch/arm/mach-mvebu/board-v7.c 2014-04-13 23:18:35.000000000 +0200
  1063. +++ linux-3.15-rc1/arch/arm/mach-mvebu/board-v7.c 2014-04-25 14:11:13.523375094 +0200
  1064. @@ -60,7 +60,7 @@
  1065. coherency_init();
  1066. BUG_ON(mvebu_mbus_dt_init());
  1067. #ifdef CONFIG_CACHE_L2X0
  1068. - l2x0_of_init(0, ~0UL);
  1069. + l2x0_of_init(0, ~0);
  1070. #endif
  1071. if (of_machine_is_compatible("marvell,armada375"))
  1072. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-nomadik/cpu-8815.c linux-3.15-rc1/arch/arm/mach-nomadik/cpu-8815.c
  1073. --- linux-3.15-rc1.orig/arch/arm/mach-nomadik/cpu-8815.c 2014-04-13 23:18:35.000000000 +0200
  1074. +++ linux-3.15-rc1/arch/arm/mach-nomadik/cpu-8815.c 2014-04-25 14:11:13.523375094 +0200
  1075. @@ -147,7 +147,7 @@
  1076. {
  1077. #ifdef CONFIG_CACHE_L2X0
  1078. /* At full speed latency must be >=2, so 0x249 in low bits */
  1079. - l2x0_of_init(0x00730249, 0xfe000fff);
  1080. + l2x0_of_init(0x00700249, 0xfe0fefff);
  1081. #endif
  1082. of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
  1083. }
  1084. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-omap2/common.h linux-3.15-rc1/arch/arm/mach-omap2/common.h
  1085. --- linux-3.15-rc1.orig/arch/arm/mach-omap2/common.h 2014-04-13 23:18:35.000000000 +0200
  1086. +++ linux-3.15-rc1/arch/arm/mach-omap2/common.h 2014-04-25 14:11:13.523375094 +0200
  1087. @@ -91,6 +91,7 @@
  1088. extern void omap3_secure_sync32k_timer_init(void);
  1089. extern void omap3_gptimer_timer_init(void);
  1090. extern void omap4_local_timer_init(void);
  1091. +int omap_l2_cache_init(void);
  1092. extern void omap5_realtime_timer_init(void);
  1093. void omap2420_init_early(void);
  1094. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-omap2/io.c linux-3.15-rc1/arch/arm/mach-omap2/io.c
  1095. --- linux-3.15-rc1.orig/arch/arm/mach-omap2/io.c 2014-04-13 23:18:35.000000000 +0200
  1096. +++ linux-3.15-rc1/arch/arm/mach-omap2/io.c 2014-04-25 14:11:13.523375094 +0200
  1097. @@ -609,6 +609,7 @@
  1098. am43xx_clockdomains_init();
  1099. am43xx_hwmod_init();
  1100. omap_hwmod_init_postsetup();
  1101. + omap_l2_cache_init();
  1102. omap_clk_soc_init = am43xx_dt_clk_init;
  1103. }
  1104. @@ -640,6 +641,7 @@
  1105. omap44xx_clockdomains_init();
  1106. omap44xx_hwmod_init();
  1107. omap_hwmod_init_postsetup();
  1108. + omap_l2_cache_init();
  1109. omap_clk_soc_init = omap4xxx_dt_clk_init;
  1110. }
  1111. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-omap2/Kconfig linux-3.15-rc1/arch/arm/mach-omap2/Kconfig
  1112. --- linux-3.15-rc1.orig/arch/arm/mach-omap2/Kconfig 2014-04-13 23:18:35.000000000 +0200
  1113. +++ linux-3.15-rc1/arch/arm/mach-omap2/Kconfig 2014-04-25 14:11:13.523375094 +0200
  1114. @@ -65,6 +65,7 @@
  1115. select ARCH_HAS_OPP
  1116. select ARM_GIC
  1117. select MACH_OMAP_GENERIC
  1118. + select MIGHT_HAVE_CACHE_L2X0
  1119. config SOC_DRA7XX
  1120. bool "TI DRA7XX"
  1121. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-omap2/omap4-common.c linux-3.15-rc1/arch/arm/mach-omap2/omap4-common.c
  1122. --- linux-3.15-rc1.orig/arch/arm/mach-omap2/omap4-common.c 2014-04-13 23:18:35.000000000 +0200
  1123. +++ linux-3.15-rc1/arch/arm/mach-omap2/omap4-common.c 2014-04-25 14:11:13.523375094 +0200
  1124. @@ -167,75 +167,57 @@
  1125. return l2cache_base;
  1126. }
  1127. -static void omap4_l2x0_disable(void)
  1128. +static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
  1129. {
  1130. - outer_flush_all();
  1131. - /* Disable PL310 L2 Cache controller */
  1132. - omap_smc1(0x102, 0x0);
  1133. -}
  1134. + unsigned smc_op;
  1135. -static void omap4_l2x0_set_debug(unsigned long val)
  1136. -{
  1137. - /* Program PL310 L2 Cache controller debug register */
  1138. - omap_smc1(0x100, val);
  1139. + switch (reg) {
  1140. + case L2X0_CTRL:
  1141. + smc_op = OMAP4_MON_L2X0_CTRL_INDEX;
  1142. + break;
  1143. +
  1144. + case L2X0_AUX_CTRL:
  1145. + smc_op = OMAP4_MON_L2X0_AUXCTRL_INDEX;
  1146. + break;
  1147. +
  1148. + case L2X0_DEBUG_CTRL:
  1149. + smc_op = OMAP4_MON_L2X0_DBG_CTRL_INDEX;
  1150. + break;
  1151. +
  1152. + case L310_PREFETCH_CTRL:
  1153. + smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
  1154. + break;
  1155. +
  1156. + default:
  1157. + WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
  1158. + return;
  1159. + }
  1160. +
  1161. + omap_smc1(smc_op, val);
  1162. }
  1163. -static int __init omap_l2_cache_init(void)
  1164. +int __init omap_l2_cache_init(void)
  1165. {
  1166. - u32 aux_ctrl = 0;
  1167. -
  1168. - /*
  1169. - * To avoid code running on other OMAPs in
  1170. - * multi-omap builds
  1171. - */
  1172. - if (!cpu_is_omap44xx())
  1173. - return -ENODEV;
  1174. + u32 aux_ctrl;
  1175. /* Static mapping, never released */
  1176. l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
  1177. if (WARN_ON(!l2cache_base))
  1178. return -ENOMEM;
  1179. - /*
  1180. - * 16-way associativity, parity disabled
  1181. - * Way size - 32KB (es1.0)
  1182. - * Way size - 64KB (es2.0 +)
  1183. - */
  1184. - aux_ctrl = ((1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT) |
  1185. - (0x1 << 25) |
  1186. - (0x1 << L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT) |
  1187. - (0x1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT));
  1188. -
  1189. - if (omap_rev() == OMAP4430_REV_ES1_0) {
  1190. - aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT;
  1191. - } else {
  1192. - aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
  1193. - (1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
  1194. - (1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
  1195. - (1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
  1196. - (1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
  1197. - }
  1198. - if (omap_rev() != OMAP4430_REV_ES1_0)
  1199. - omap_smc1(0x109, aux_ctrl);
  1200. -
  1201. - /* Enable PL310 L2 Cache controller */
  1202. - omap_smc1(0x102, 0x1);
  1203. + /* 16-way associativity, parity disabled, way size - 64KB (es2.0 +) */
  1204. + aux_ctrl = L2C_AUX_CTRL_SHARED_OVERRIDE |
  1205. + L310_AUX_CTRL_DATA_PREFETCH |
  1206. + L310_AUX_CTRL_INSTR_PREFETCH;
  1207. + outer_cache.write_sec = omap4_l2c310_write_sec;
  1208. if (of_have_populated_dt())
  1209. - l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK);
  1210. + l2x0_of_init(aux_ctrl, 0xcf9fffff);
  1211. else
  1212. - l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK);
  1213. -
  1214. - /*
  1215. - * Override default outer_cache.disable with a OMAP4
  1216. - * specific one
  1217. - */
  1218. - outer_cache.disable = omap4_l2x0_disable;
  1219. - outer_cache.set_debug = omap4_l2x0_set_debug;
  1220. + l2x0_init(l2cache_base, aux_ctrl, 0xcf9fffff);
  1221. return 0;
  1222. }
  1223. -omap_early_initcall(omap_l2_cache_init);
  1224. #endif
  1225. void __iomem *omap4_get_sar_ram_base(void)
  1226. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-omap2/omap-mpuss-lowpower.c linux-3.15-rc1/arch/arm/mach-omap2/omap-mpuss-lowpower.c
  1227. --- linux-3.15-rc1.orig/arch/arm/mach-omap2/omap-mpuss-lowpower.c 2014-04-13 23:18:35.000000000 +0200
  1228. +++ linux-3.15-rc1/arch/arm/mach-omap2/omap-mpuss-lowpower.c 2014-04-25 14:11:13.523375094 +0200
  1229. @@ -187,19 +187,15 @@
  1230. * in every restore MPUSS OFF path.
  1231. */
  1232. #ifdef CONFIG_CACHE_L2X0
  1233. -static void save_l2x0_context(void)
  1234. +static void __init save_l2x0_context(void)
  1235. {
  1236. - u32 val;
  1237. - void __iomem *l2x0_base = omap4_get_l2cache_base();
  1238. - if (l2x0_base) {
  1239. - val = __raw_readl(l2x0_base + L2X0_AUX_CTRL);
  1240. - __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET);
  1241. - val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL);
  1242. - __raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET);
  1243. - }
  1244. + __raw_writel(l2x0_saved_regs.aux_ctrl,
  1245. + sar_base + L2X0_AUXCTRL_OFFSET);
  1246. + __raw_writel(l2x0_saved_regs.prefetch_ctrl,
  1247. + sar_base + L2X0_PREFETCH_CTRL_OFFSET);
  1248. }
  1249. #else
  1250. -static void save_l2x0_context(void)
  1251. +static void __init save_l2x0_context(void)
  1252. {}
  1253. #endif
  1254. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-prima2/l2x0.c linux-3.15-rc1/arch/arm/mach-prima2/l2x0.c
  1255. --- linux-3.15-rc1.orig/arch/arm/mach-prima2/l2x0.c 2014-04-13 23:18:35.000000000 +0200
  1256. +++ linux-3.15-rc1/arch/arm/mach-prima2/l2x0.c 2014-04-25 14:11:13.523375094 +0200
  1257. @@ -8,42 +8,10 @@
  1258. #include <linux/init.h>
  1259. #include <linux/kernel.h>
  1260. -#include <linux/of.h>
  1261. #include <asm/hardware/cache-l2x0.h>
  1262. -struct l2x0_aux {
  1263. - u32 val;
  1264. - u32 mask;
  1265. -};
  1266. -
  1267. -static const struct l2x0_aux prima2_l2x0_aux __initconst = {
  1268. - .val = 2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT,
  1269. - .mask = 0,
  1270. -};
  1271. -
  1272. -static const struct l2x0_aux marco_l2x0_aux __initconst = {
  1273. - .val = (2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
  1274. - (1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT),
  1275. - .mask = L2X0_AUX_CTRL_MASK,
  1276. -};
  1277. -
  1278. -static const struct of_device_id sirf_l2x0_ids[] __initconst = {
  1279. - { .compatible = "sirf,prima2-pl310-cache", .data = &prima2_l2x0_aux, },
  1280. - { .compatible = "sirf,marco-pl310-cache", .data = &marco_l2x0_aux, },
  1281. - {},
  1282. -};
  1283. -
  1284. static int __init sirfsoc_l2x0_init(void)
  1285. {
  1286. - struct device_node *np;
  1287. - const struct l2x0_aux *aux;
  1288. -
  1289. - np = of_find_matching_node(NULL, sirf_l2x0_ids);
  1290. - if (np) {
  1291. - aux = of_match_node(sirf_l2x0_ids, np)->data;
  1292. - return l2x0_of_init(aux->val, aux->mask);
  1293. - }
  1294. -
  1295. - return 0;
  1296. + return l2x0_of_init(0, ~0);
  1297. }
  1298. early_initcall(sirfsoc_l2x0_init);
  1299. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-prima2/pm.c linux-3.15-rc1/arch/arm/mach-prima2/pm.c
  1300. --- linux-3.15-rc1.orig/arch/arm/mach-prima2/pm.c 2014-04-13 23:18:35.000000000 +0200
  1301. +++ linux-3.15-rc1/arch/arm/mach-prima2/pm.c 2014-04-25 14:11:13.523375094 +0200
  1302. @@ -71,7 +71,6 @@
  1303. case PM_SUSPEND_MEM:
  1304. sirfsoc_pre_suspend_power_off();
  1305. - outer_flush_all();
  1306. outer_disable();
  1307. /* go zzz */
  1308. cpu_suspend(0, sirfsoc_finish_suspend);
  1309. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-realview/realview_eb.c linux-3.15-rc1/arch/arm/mach-realview/realview_eb.c
  1310. --- linux-3.15-rc1.orig/arch/arm/mach-realview/realview_eb.c 2014-04-13 23:18:35.000000000 +0200
  1311. +++ linux-3.15-rc1/arch/arm/mach-realview/realview_eb.c 2014-04-25 14:11:13.523375094 +0200
  1312. @@ -442,8 +442,13 @@
  1313. realview_eb11mp_fixup();
  1314. #ifdef CONFIG_CACHE_L2X0
  1315. - /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled
  1316. - * Bits: .... ...0 0111 1001 0000 .... .... .... */
  1317. + /*
  1318. + * The PL220 needs to be manually configured as the hardware
  1319. + * doesn't report the correct sizes.
  1320. + * 1MB (128KB/way), 8-way associativity, event monitor and
  1321. + * parity enabled, ignore share bit, no force write allocate
  1322. + * Bits: .... ...0 0111 1001 0000 .... .... ....
  1323. + */
  1324. l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff);
  1325. #endif
  1326. platform_device_register(&pmu_device);
  1327. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-realview/realview_pb1176.c linux-3.15-rc1/arch/arm/mach-realview/realview_pb1176.c
  1328. --- linux-3.15-rc1.orig/arch/arm/mach-realview/realview_pb1176.c 2014-04-13 23:18:35.000000000 +0200
  1329. +++ linux-3.15-rc1/arch/arm/mach-realview/realview_pb1176.c 2014-04-25 14:11:13.527375113 +0200
  1330. @@ -355,7 +355,13 @@
  1331. int i;
  1332. #ifdef CONFIG_CACHE_L2X0
  1333. - /* 128Kb (16Kb/way) 8-way associativity. evmon/parity/share enabled. */
  1334. + /*
  1335. + * The PL220 needs to be manually configured as the hardware
  1336. + * doesn't report the correct sizes.
  1337. + * 128kB (16kB/way), 8-way associativity, event monitor and
  1338. + * parity enabled, ignore share bit, no force write allocate
  1339. + * Bits: .... ...0 0111 0011 0000 .... .... ....
  1340. + */
  1341. l2x0_init(__io_address(REALVIEW_PB1176_L220_BASE), 0x00730000, 0xfe000fff);
  1342. #endif
  1343. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-realview/realview_pb11mp.c linux-3.15-rc1/arch/arm/mach-realview/realview_pb11mp.c
  1344. --- linux-3.15-rc1.orig/arch/arm/mach-realview/realview_pb11mp.c 2014-04-13 23:18:35.000000000 +0200
  1345. +++ linux-3.15-rc1/arch/arm/mach-realview/realview_pb11mp.c 2014-04-25 14:11:13.527375113 +0200
  1346. @@ -337,8 +337,13 @@
  1347. int i;
  1348. #ifdef CONFIG_CACHE_L2X0
  1349. - /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled
  1350. - * Bits: .... ...0 0111 1001 0000 .... .... .... */
  1351. + /*
  1352. + * The PL220 needs to be manually configured as the hardware
  1353. + * doesn't report the correct sizes.
  1354. + * 1MB (128KB/way), 8-way associativity, event monitor and
  1355. + * parity enabled, ignore share bit, no force write allocate
  1356. + * Bits: .... ...0 0111 1001 0000 .... .... ....
  1357. + */
  1358. l2x0_init(__io_address(REALVIEW_TC11MP_L220_BASE), 0x00790000, 0xfe000fff);
  1359. #endif
  1360. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-realview/realview_pbx.c linux-3.15-rc1/arch/arm/mach-realview/realview_pbx.c
  1361. --- linux-3.15-rc1.orig/arch/arm/mach-realview/realview_pbx.c 2014-04-13 23:18:35.000000000 +0200
  1362. +++ linux-3.15-rc1/arch/arm/mach-realview/realview_pbx.c 2014-04-25 14:11:13.527375113 +0200
  1363. @@ -370,8 +370,8 @@
  1364. __io_address(REALVIEW_PBX_TILE_L220_BASE);
  1365. /* set RAM latencies to 1 cycle for eASIC */
  1366. - writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
  1367. - writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
  1368. + writel(0, l2x0_base + L310_TAG_LATENCY_CTRL);
  1369. + writel(0, l2x0_base + L310_DATA_LATENCY_CTRL);
  1370. /* 16KB way size, 8-way associativity, parity disabled
  1371. * Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */
  1372. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-rockchip/rockchip.c linux-3.15-rc1/arch/arm/mach-rockchip/rockchip.c
  1373. --- linux-3.15-rc1.orig/arch/arm/mach-rockchip/rockchip.c 2014-04-13 23:18:35.000000000 +0200
  1374. +++ linux-3.15-rc1/arch/arm/mach-rockchip/rockchip.c 2014-04-25 14:11:13.527375113 +0200
  1375. @@ -26,7 +26,7 @@
  1376. static void __init rockchip_dt_init(void)
  1377. {
  1378. - l2x0_of_init(0, ~0UL);
  1379. + l2x0_of_init(0, ~0);
  1380. of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
  1381. }
  1382. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-shmobile/board-armadillo800eva.c linux-3.15-rc1/arch/arm/mach-shmobile/board-armadillo800eva.c
  1383. --- linux-3.15-rc1.orig/arch/arm/mach-shmobile/board-armadillo800eva.c 2014-04-13 23:18:35.000000000 +0200
  1384. +++ linux-3.15-rc1/arch/arm/mach-shmobile/board-armadillo800eva.c 2014-04-25 14:11:13.527375113 +0200
  1385. @@ -1270,8 +1270,8 @@
  1386. #ifdef CONFIG_CACHE_L2X0
  1387. - /* Early BRESP enable, Shared attribute override enable, 32K*8way */
  1388. - l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff);
  1389. + /* Shared attribute override enable, 32K*8way */
  1390. + l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
  1391. #endif
  1392. i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
  1393. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-shmobile/board-armadillo800eva-reference.c linux-3.15-rc1/arch/arm/mach-shmobile/board-armadillo800eva-reference.c
  1394. --- linux-3.15-rc1.orig/arch/arm/mach-shmobile/board-armadillo800eva-reference.c 2014-04-13 23:18:35.000000000 +0200
  1395. +++ linux-3.15-rc1/arch/arm/mach-shmobile/board-armadillo800eva-reference.c 2014-04-25 14:11:13.527375113 +0200
  1396. @@ -164,8 +164,8 @@
  1397. r8a7740_meram_workaround();
  1398. #ifdef CONFIG_CACHE_L2X0
  1399. - /* Early BRESP enable, Shared attribute override enable, 32K*8way */
  1400. - l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff);
  1401. + /* Shared attribute override enable, 32K*8way */
  1402. + l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
  1403. #endif
  1404. r8a7740_add_standard_devices_dt();
  1405. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-shmobile/board-kzm9g.c linux-3.15-rc1/arch/arm/mach-shmobile/board-kzm9g.c
  1406. --- linux-3.15-rc1.orig/arch/arm/mach-shmobile/board-kzm9g.c 2014-04-13 23:18:35.000000000 +0200
  1407. +++ linux-3.15-rc1/arch/arm/mach-shmobile/board-kzm9g.c 2014-04-25 14:11:13.527375113 +0200
  1408. @@ -876,8 +876,8 @@
  1409. gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */
  1410. #ifdef CONFIG_CACHE_L2X0
  1411. - /* Early BRESP enable, Shared attribute override enable, 64K*8way */
  1412. - l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
  1413. + /* Shared attribute override enable, 64K*8way */
  1414. + l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
  1415. #endif
  1416. i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
  1417. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-shmobile/board-kzm9g-reference.c linux-3.15-rc1/arch/arm/mach-shmobile/board-kzm9g-reference.c
  1418. --- linux-3.15-rc1.orig/arch/arm/mach-shmobile/board-kzm9g-reference.c 2014-04-13 23:18:35.000000000 +0200
  1419. +++ linux-3.15-rc1/arch/arm/mach-shmobile/board-kzm9g-reference.c 2014-04-25 14:11:13.527375113 +0200
  1420. @@ -36,8 +36,8 @@
  1421. sh73a0_add_standard_devices_dt();
  1422. #ifdef CONFIG_CACHE_L2X0
  1423. - /* Early BRESP enable, Shared attribute override enable, 64K*8way */
  1424. - l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
  1425. + /* Shared attribute override enable, 64K*8way */
  1426. + l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
  1427. #endif
  1428. }
  1429. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-shmobile/setup-r8a7778.c linux-3.15-rc1/arch/arm/mach-shmobile/setup-r8a7778.c
  1430. --- linux-3.15-rc1.orig/arch/arm/mach-shmobile/setup-r8a7778.c 2014-04-13 23:18:35.000000000 +0200
  1431. +++ linux-3.15-rc1/arch/arm/mach-shmobile/setup-r8a7778.c 2014-04-25 14:11:13.527375113 +0200
  1432. @@ -298,10 +298,10 @@
  1433. void __iomem *base = ioremap_nocache(0xf0100000, 0x1000);
  1434. if (base) {
  1435. /*
  1436. - * Early BRESP enable, Shared attribute override enable, 64K*16way
  1437. + * Shared attribute override enable, 64K*16way
  1438. * don't call iounmap(base)
  1439. */
  1440. - l2x0_init(base, 0x40470000, 0x82000fff);
  1441. + l2x0_init(base, 0x00400000, 0xc20f0fff);
  1442. }
  1443. #endif
  1444. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-shmobile/setup-r8a7779.c linux-3.15-rc1/arch/arm/mach-shmobile/setup-r8a7779.c
  1445. --- linux-3.15-rc1.orig/arch/arm/mach-shmobile/setup-r8a7779.c 2014-04-13 23:18:35.000000000 +0200
  1446. +++ linux-3.15-rc1/arch/arm/mach-shmobile/setup-r8a7779.c 2014-04-25 14:11:13.527375113 +0200
  1447. @@ -700,8 +700,8 @@
  1448. void __init r8a7779_add_standard_devices(void)
  1449. {
  1450. #ifdef CONFIG_CACHE_L2X0
  1451. - /* Early BRESP enable, Shared attribute override enable, 64K*16way */
  1452. - l2x0_init(IOMEM(0xf0100000), 0x40470000, 0x82000fff);
  1453. + /* Shared attribute override enable, 64K*16way */
  1454. + l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
  1455. #endif
  1456. r8a7779_pm_init();
  1457. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-socfpga/socfpga.c linux-3.15-rc1/arch/arm/mach-socfpga/socfpga.c
  1458. --- linux-3.15-rc1.orig/arch/arm/mach-socfpga/socfpga.c 2014-04-13 23:18:35.000000000 +0200
  1459. +++ linux-3.15-rc1/arch/arm/mach-socfpga/socfpga.c 2014-04-25 14:11:13.527375113 +0200
  1460. @@ -100,7 +100,7 @@
  1461. static void __init socfpga_cyclone5_init(void)
  1462. {
  1463. - l2x0_of_init(0, ~0UL);
  1464. + l2x0_of_init(0, ~0);
  1465. of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
  1466. }
  1467. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-spear/platsmp.c linux-3.15-rc1/arch/arm/mach-spear/platsmp.c
  1468. --- linux-3.15-rc1.orig/arch/arm/mach-spear/platsmp.c 2014-04-13 23:18:35.000000000 +0200
  1469. +++ linux-3.15-rc1/arch/arm/mach-spear/platsmp.c 2014-04-25 14:11:13.527375113 +0200
  1470. @@ -20,6 +20,18 @@
  1471. #include <mach/spear.h>
  1472. #include "generic.h"
  1473. +/*
  1474. + * Write pen_release in a way that is guaranteed to be visible to all
  1475. + * observers, irrespective of whether they're taking part in coherency
  1476. + * or not. This is necessary for the hotplug code to work reliably.
  1477. + */
  1478. +static void write_pen_release(int val)
  1479. +{
  1480. + pen_release = val;
  1481. + smp_wmb();
  1482. + sync_cache_w(&pen_release);
  1483. +}
  1484. +
  1485. static DEFINE_SPINLOCK(boot_lock);
  1486. static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
  1487. @@ -30,8 +42,7 @@
  1488. * let the primary processor know we're out of the
  1489. * pen, then head off into the C entry point
  1490. */
  1491. - pen_release = -1;
  1492. - smp_wmb();
  1493. + write_pen_release(-1);
  1494. /*
  1495. * Synchronise with the boot thread.
  1496. @@ -58,9 +69,7 @@
  1497. * Note that "pen_release" is the hardware CPU ID, whereas
  1498. * "cpu" is Linux's internal ID.
  1499. */
  1500. - pen_release = cpu;
  1501. - flush_cache_all();
  1502. - outer_flush_all();
  1503. + write_pen_release(cpu);
  1504. timeout = jiffies + (1 * HZ);
  1505. while (time_before(jiffies, timeout)) {
  1506. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-spear/spear13xx.c linux-3.15-rc1/arch/arm/mach-spear/spear13xx.c
  1507. --- linux-3.15-rc1.orig/arch/arm/mach-spear/spear13xx.c 2014-04-13 23:18:35.000000000 +0200
  1508. +++ linux-3.15-rc1/arch/arm/mach-spear/spear13xx.c 2014-04-25 14:11:13.527375113 +0200
  1509. @@ -38,15 +38,15 @@
  1510. if (!IS_ENABLED(CONFIG_CACHE_L2X0))
  1511. return;
  1512. - writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL);
  1513. + writel_relaxed(0x06, VA_L2CC_BASE + L310_PREFETCH_CTRL);
  1514. /*
  1515. * Program following latencies in order to make
  1516. * SPEAr1340 work at 600 MHz
  1517. */
  1518. - writel_relaxed(0x221, VA_L2CC_BASE + L2X0_TAG_LATENCY_CTRL);
  1519. - writel_relaxed(0x441, VA_L2CC_BASE + L2X0_DATA_LATENCY_CTRL);
  1520. - l2x0_init(VA_L2CC_BASE, 0x70A60001, 0xfe00ffff);
  1521. + writel_relaxed(0x221, VA_L2CC_BASE + L310_TAG_LATENCY_CTRL);
  1522. + writel_relaxed(0x441, VA_L2CC_BASE + L310_DATA_LATENCY_CTRL);
  1523. + l2x0_init(VA_L2CC_BASE, 0x30a00001, 0xfe0fffff);
  1524. }
  1525. /*
  1526. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-sti/board-dt.c linux-3.15-rc1/arch/arm/mach-sti/board-dt.c
  1527. --- linux-3.15-rc1.orig/arch/arm/mach-sti/board-dt.c 2014-04-13 23:18:35.000000000 +0200
  1528. +++ linux-3.15-rc1/arch/arm/mach-sti/board-dt.c 2014-04-25 14:11:13.527375113 +0200
  1529. @@ -16,15 +16,9 @@
  1530. void __init stih41x_l2x0_init(void)
  1531. {
  1532. - u32 way_size = 0x4;
  1533. - u32 aux_ctrl;
  1534. - /* may be this can be encoded in macros like BIT*() */
  1535. - aux_ctrl = (0x1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
  1536. - (0x1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
  1537. - (0x1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
  1538. - (way_size << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
  1539. -
  1540. - l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK);
  1541. + l2x0_of_init(L2C_AUX_CTRL_SHARED_OVERRIDE |
  1542. + L310_AUX_CTRL_DATA_PREFETCH |
  1543. + L310_AUX_CTRL_INSTR_PREFETCH, 0xc00f0fff);
  1544. }
  1545. static void __init stih41x_machine_init(void)
  1546. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-tegra/pm.h linux-3.15-rc1/arch/arm/mach-tegra/pm.h
  1547. --- linux-3.15-rc1.orig/arch/arm/mach-tegra/pm.h 2014-04-13 23:18:35.000000000 +0200
  1548. +++ linux-3.15-rc1/arch/arm/mach-tegra/pm.h 2014-04-25 14:11:13.527375113 +0200
  1549. @@ -35,8 +35,6 @@
  1550. void tegra30_lp1_iram_hook(void);
  1551. void tegra30_sleep_core_init(void);
  1552. -extern unsigned long l2x0_saved_regs_addr;
  1553. -
  1554. void tegra_clear_cpu_in_lp2(void);
  1555. bool tegra_set_cpu_in_lp2(void);
  1556. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-tegra/reset-handler.S linux-3.15-rc1/arch/arm/mach-tegra/reset-handler.S
  1557. --- linux-3.15-rc1.orig/arch/arm/mach-tegra/reset-handler.S 2014-04-13 23:18:35.000000000 +0200
  1558. +++ linux-3.15-rc1/arch/arm/mach-tegra/reset-handler.S 2014-04-25 14:11:13.527375113 +0200
  1559. @@ -19,7 +19,6 @@
  1560. #include <asm/cache.h>
  1561. #include <asm/asm-offsets.h>
  1562. -#include <asm/hardware/cache-l2x0.h>
  1563. #include "flowctrl.h"
  1564. #include "fuse.h"
  1565. @@ -78,8 +77,10 @@
  1566. str r1, [r0]
  1567. #endif
  1568. +#ifdef CONFIG_CACHE_L2X0
  1569. /* L2 cache resume & re-enable */
  1570. - l2_cache_resume r0, r1, r2, l2x0_saved_regs_addr
  1571. + bl l2c310_early_resume
  1572. +#endif
  1573. end_ca9_scu_l2_resume:
  1574. mov32 r9, 0xc0f
  1575. cmp r8, r9
  1576. @@ -89,12 +90,6 @@
  1577. ENDPROC(tegra_resume)
  1578. #endif
  1579. -#ifdef CONFIG_CACHE_L2X0
  1580. - .globl l2x0_saved_regs_addr
  1581. -l2x0_saved_regs_addr:
  1582. - .long 0
  1583. -#endif
  1584. -
  1585. .align L1_CACHE_SHIFT
  1586. ENTRY(__tegra_cpu_reset_handler_start)
  1587. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-tegra/sleep.h linux-3.15-rc1/arch/arm/mach-tegra/sleep.h
  1588. --- linux-3.15-rc1.orig/arch/arm/mach-tegra/sleep.h 2014-04-13 23:18:35.000000000 +0200
  1589. +++ linux-3.15-rc1/arch/arm/mach-tegra/sleep.h 2014-04-25 14:11:13.527375113 +0200
  1590. @@ -120,37 +120,6 @@
  1591. mov \tmp1, \tmp1, lsr #8
  1592. .endm
  1593. -/* Macro to resume & re-enable L2 cache */
  1594. -#ifndef L2X0_CTRL_EN
  1595. -#define L2X0_CTRL_EN 1
  1596. -#endif
  1597. -
  1598. -#ifdef CONFIG_CACHE_L2X0
  1599. -.macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs
  1600. - W(adr) \tmp1, \phys_l2x0_saved_regs
  1601. - ldr \tmp1, [\tmp1]
  1602. - ldr \tmp2, [\tmp1, #L2X0_R_PHY_BASE]
  1603. - ldr \tmp3, [\tmp2, #L2X0_CTRL]
  1604. - tst \tmp3, #L2X0_CTRL_EN
  1605. - bne exit_l2_resume
  1606. - ldr \tmp3, [\tmp1, #L2X0_R_TAG_LATENCY]
  1607. - str \tmp3, [\tmp2, #L2X0_TAG_LATENCY_CTRL]
  1608. - ldr \tmp3, [\tmp1, #L2X0_R_DATA_LATENCY]
  1609. - str \tmp3, [\tmp2, #L2X0_DATA_LATENCY_CTRL]
  1610. - ldr \tmp3, [\tmp1, #L2X0_R_PREFETCH_CTRL]
  1611. - str \tmp3, [\tmp2, #L2X0_PREFETCH_CTRL]
  1612. - ldr \tmp3, [\tmp1, #L2X0_R_PWR_CTRL]
  1613. - str \tmp3, [\tmp2, #L2X0_POWER_CTRL]
  1614. - ldr \tmp3, [\tmp1, #L2X0_R_AUX_CTRL]
  1615. - str \tmp3, [\tmp2, #L2X0_AUX_CTRL]
  1616. - mov \tmp3, #L2X0_CTRL_EN
  1617. - str \tmp3, [\tmp2, #L2X0_CTRL]
  1618. -exit_l2_resume:
  1619. -.endm
  1620. -#else /* CONFIG_CACHE_L2X0 */
  1621. -.macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs
  1622. -.endm
  1623. -#endif /* CONFIG_CACHE_L2X0 */
  1624. #else
  1625. void tegra_pen_lock(void);
  1626. void tegra_pen_unlock(void);
  1627. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-tegra/tegra.c linux-3.15-rc1/arch/arm/mach-tegra/tegra.c
  1628. --- linux-3.15-rc1.orig/arch/arm/mach-tegra/tegra.c 2014-04-13 23:18:35.000000000 +0200
  1629. +++ linux-3.15-rc1/arch/arm/mach-tegra/tegra.c 2014-04-25 14:11:13.527375113 +0200
  1630. @@ -73,27 +73,7 @@
  1631. static void __init tegra_init_cache(void)
  1632. {
  1633. #ifdef CONFIG_CACHE_L2X0
  1634. - static const struct of_device_id pl310_ids[] __initconst = {
  1635. - { .compatible = "arm,pl310-cache", },
  1636. - {}
  1637. - };
  1638. -
  1639. - struct device_node *np;
  1640. - int ret;
  1641. - void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
  1642. - u32 aux_ctrl, cache_type;
  1643. -
  1644. - np = of_find_matching_node(NULL, pl310_ids);
  1645. - if (!np)
  1646. - return;
  1647. -
  1648. - cache_type = readl(p + L2X0_CACHE_TYPE);
  1649. - aux_ctrl = (cache_type & 0x700) << (17-8);
  1650. - aux_ctrl |= 0x7C400001;
  1651. -
  1652. - ret = l2x0_of_init(aux_ctrl, 0x8200c3fe);
  1653. - if (!ret)
  1654. - l2x0_saved_regs_addr = virt_to_phys(&l2x0_saved_regs);
  1655. + l2x0_of_init(0x3c400001, 0xc20fc3fe);
  1656. #endif
  1657. }
  1658. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-ux500/cache-l2x0.c linux-3.15-rc1/arch/arm/mach-ux500/cache-l2x0.c
  1659. --- linux-3.15-rc1.orig/arch/arm/mach-ux500/cache-l2x0.c 2014-04-13 23:18:35.000000000 +0200
  1660. +++ linux-3.15-rc1/arch/arm/mach-ux500/cache-l2x0.c 2014-04-25 14:11:13.527375113 +0200
  1661. @@ -35,10 +35,16 @@
  1662. return 0;
  1663. }
  1664. -static int __init ux500_l2x0_init(void)
  1665. +static void ux500_l2c310_write_sec(unsigned long val, unsigned reg)
  1666. {
  1667. - u32 aux_val = 0x3e000000;
  1668. + /*
  1669. + * We can't write to secure registers as we are in non-secure
  1670. + * mode, until we have some SMI service available.
  1671. + */
  1672. +}
  1673. +static int __init ux500_l2x0_init(void)
  1674. +{
  1675. if (cpu_is_u8500_family() || cpu_is_ux540_family())
  1676. l2x0_base = __io_address(U8500_L2CC_BASE);
  1677. else
  1678. @@ -48,28 +54,12 @@
  1679. /* Unlock before init */
  1680. ux500_l2x0_unlock();
  1681. - /* DBx540's L2 has 128KB way size */
  1682. - if (cpu_is_ux540_family())
  1683. - /* 128KB way size */
  1684. - aux_val |= (0x4 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
  1685. - else
  1686. - /* 64KB way size */
  1687. - aux_val |= (0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
  1688. + outer_cache.write_sec = ux500_l2c310_write_sec;
  1689. - /* 64KB way size, 8 way associativity, force WA */
  1690. if (of_have_populated_dt())
  1691. - l2x0_of_init(aux_val, 0xc0000fff);
  1692. + l2x0_of_init(0, ~0);
  1693. else
  1694. - l2x0_init(l2x0_base, aux_val, 0xc0000fff);
  1695. -
  1696. - /*
  1697. - * We can't disable l2 as we are in non secure mode, currently
  1698. - * this seems be called only during kexec path. So let's
  1699. - * override outer.disable with nasty assignment until we have
  1700. - * some SMI service available.
  1701. - */
  1702. - outer_cache.disable = NULL;
  1703. - outer_cache.set_debug = NULL;
  1704. + l2x0_init(l2x0_base, 0, ~0);
  1705. return 0;
  1706. }
  1707. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-vexpress/ct-ca9x4.c linux-3.15-rc1/arch/arm/mach-vexpress/ct-ca9x4.c
  1708. --- linux-3.15-rc1.orig/arch/arm/mach-vexpress/ct-ca9x4.c 2014-04-13 23:18:35.000000000 +0200
  1709. +++ linux-3.15-rc1/arch/arm/mach-vexpress/ct-ca9x4.c 2014-04-25 14:11:13.527375113 +0200
  1710. @@ -45,6 +45,23 @@
  1711. iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
  1712. }
  1713. +static void __init ca9x4_l2_init(void)
  1714. +{
  1715. +#ifdef CONFIG_CACHE_L2X0
  1716. + void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
  1717. +
  1718. + if (l2x0_base) {
  1719. + /* set RAM latencies to 1 cycle for this core tile. */
  1720. + writel(0, l2x0_base + L310_TAG_LATENCY_CTRL);
  1721. + writel(0, l2x0_base + L310_DATA_LATENCY_CTRL);
  1722. +
  1723. + l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
  1724. + } else {
  1725. + pr_err("L2C: unable to map L2 cache controller\n");
  1726. + }
  1727. +#endif
  1728. +}
  1729. +
  1730. #ifdef CONFIG_HAVE_ARM_TWD
  1731. static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER);
  1732. @@ -63,6 +80,7 @@
  1733. gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K),
  1734. ioremap(A9_MPCORE_GIC_CPU, SZ_256));
  1735. ca9x4_twd_init();
  1736. + ca9x4_l2_init();
  1737. }
  1738. static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
  1739. @@ -141,16 +159,6 @@
  1740. {
  1741. int i;
  1742. -#ifdef CONFIG_CACHE_L2X0
  1743. - void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
  1744. -
  1745. - /* set RAM latencies to 1 cycle for this core tile. */
  1746. - writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
  1747. - writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
  1748. -
  1749. - l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
  1750. -#endif
  1751. -
  1752. for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
  1753. amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
  1754. diff -Nur linux-3.15-rc1.orig/arch/arm/mach-zynq/common.c linux-3.15-rc1/arch/arm/mach-zynq/common.c
  1755. --- linux-3.15-rc1.orig/arch/arm/mach-zynq/common.c 2014-04-13 23:18:35.000000000 +0200
  1756. +++ linux-3.15-rc1/arch/arm/mach-zynq/common.c 2014-04-25 14:11:13.531375131 +0200
  1757. @@ -70,7 +70,7 @@
  1758. /*
  1759. * 64KB way size, 8-way associativity, parity disabled
  1760. */
  1761. - l2x0_of_init(0x02060000, 0xF0F0FFFF);
  1762. + l2x0_of_init(0x02000000, 0xf0ffffff);
  1763. of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
  1764. diff -Nur linux-3.15-rc1.orig/arch/arm/mm/cache-feroceon-l2.c linux-3.15-rc1/arch/arm/mm/cache-feroceon-l2.c
  1765. --- linux-3.15-rc1.orig/arch/arm/mm/cache-feroceon-l2.c 2014-04-13 23:18:35.000000000 +0200
  1766. +++ linux-3.15-rc1/arch/arm/mm/cache-feroceon-l2.c 2014-04-25 14:11:13.531375131 +0200
  1767. @@ -350,7 +350,6 @@
  1768. outer_cache.inv_range = feroceon_l2_inv_range;
  1769. outer_cache.clean_range = feroceon_l2_clean_range;
  1770. outer_cache.flush_range = feroceon_l2_flush_range;
  1771. - outer_cache.inv_all = l2_inv_all;
  1772. enable_l2();
  1773. diff -Nur linux-3.15-rc1.orig/arch/arm/mm/cache-l2x0.c linux-3.15-rc1/arch/arm/mm/cache-l2x0.c
  1774. --- linux-3.15-rc1.orig/arch/arm/mm/cache-l2x0.c 2014-04-13 23:18:35.000000000 +0200
  1775. +++ linux-3.15-rc1/arch/arm/mm/cache-l2x0.c 2014-04-25 14:11:13.531375131 +0200
  1776. @@ -16,18 +16,33 @@
  1777. * along with this program; if not, write to the Free Software
  1778. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  1779. */
  1780. +#include <linux/cpu.h>
  1781. #include <linux/err.h>
  1782. #include <linux/init.h>
  1783. +#include <linux/smp.h>
  1784. #include <linux/spinlock.h>
  1785. #include <linux/io.h>
  1786. #include <linux/of.h>
  1787. #include <linux/of_address.h>
  1788. #include <asm/cacheflush.h>
  1789. +#include <asm/cp15.h>
  1790. +#include <asm/cputype.h>
  1791. #include <asm/hardware/cache-l2x0.h>
  1792. #include "cache-tauros3.h"
  1793. #include "cache-aurora-l2.h"
  1794. +struct l2c_init_data {
  1795. + const char *type;
  1796. + unsigned way_size_0;
  1797. + unsigned num_lock;
  1798. + void (*of_parse)(const struct device_node *, u32 *, u32 *);
  1799. + void (*enable)(void __iomem *, u32, unsigned);
  1800. + void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
  1801. + void (*save)(void __iomem *);
  1802. + struct outer_cache_fns outer_cache;
  1803. +};
  1804. +
  1805. #define CACHE_LINE_SIZE 32
  1806. static void __iomem *l2x0_base;
  1807. @@ -36,96 +51,116 @@
  1808. static u32 l2x0_size;
  1809. static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
  1810. -/* Aurora don't have the cache ID register available, so we have to
  1811. - * pass it though the device tree */
  1812. -static u32 cache_id_part_number_from_dt;
  1813. -
  1814. struct l2x0_regs l2x0_saved_regs;
  1815. -struct l2x0_of_data {
  1816. - void (*setup)(const struct device_node *, u32 *, u32 *);
  1817. - void (*save)(void);
  1818. - struct outer_cache_fns outer_cache;
  1819. -};
  1820. -
  1821. -static bool of_init = false;
  1822. -
  1823. -static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
  1824. +/*
  1825. + * Common code for all cache controllers.
  1826. + */
  1827. +static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
  1828. {
  1829. /* wait for cache operation by line or way to complete */
  1830. while (readl_relaxed(reg) & mask)
  1831. cpu_relax();
  1832. }
  1833. -#ifdef CONFIG_CACHE_PL310
  1834. -static inline void cache_wait(void __iomem *reg, unsigned long mask)
  1835. +/*
  1836. + * By default, we write directly to secure registers. Platforms must
  1837. + * override this if they are running non-secure.
  1838. + */
  1839. +static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
  1840. {
  1841. - /* cache operations by line are atomic on PL310 */
  1842. + if (val == readl_relaxed(base + reg))
  1843. + return;
  1844. + if (outer_cache.write_sec)
  1845. + outer_cache.write_sec(val, reg);
  1846. + else
  1847. + writel_relaxed(val, base + reg);
  1848. }
  1849. -#else
  1850. -#define cache_wait cache_wait_way
  1851. -#endif
  1852. -static inline void cache_sync(void)
  1853. +/*
  1854. + * This should only be called when we have a requirement that the
  1855. + * register be written due to a work-around, as platforms running
  1856. + * in non-secure mode may not be able to access this register.
  1857. + */
  1858. +static inline void l2c_set_debug(void __iomem *base, unsigned long val)
  1859. {
  1860. - void __iomem *base = l2x0_base;
  1861. -
  1862. - writel_relaxed(0, base + sync_reg_offset);
  1863. - cache_wait(base + L2X0_CACHE_SYNC, 1);
  1864. + l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
  1865. }
  1866. -static inline void l2x0_clean_line(unsigned long addr)
  1867. +static void __l2c_op_way(void __iomem *reg)
  1868. {
  1869. - void __iomem *base = l2x0_base;
  1870. - cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
  1871. - writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
  1872. + writel_relaxed(l2x0_way_mask, reg);
  1873. + l2c_wait_mask(reg, l2x0_way_mask);
  1874. }
  1875. -static inline void l2x0_inv_line(unsigned long addr)
  1876. +static inline void l2c_unlock(void __iomem *base, unsigned num)
  1877. {
  1878. - void __iomem *base = l2x0_base;
  1879. - cache_wait(base + L2X0_INV_LINE_PA, 1);
  1880. - writel_relaxed(addr, base + L2X0_INV_LINE_PA);
  1881. + unsigned i;
  1882. +
  1883. + for (i = 0; i < num; i++) {
  1884. + writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
  1885. + i * L2X0_LOCKDOWN_STRIDE);
  1886. + writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
  1887. + i * L2X0_LOCKDOWN_STRIDE);
  1888. + }
  1889. }
  1890. -#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
  1891. -static inline void debug_writel(unsigned long val)
  1892. +/*
  1893. + * Enable the L2 cache controller. This function must only be
  1894. + * called when the cache controller is known to be disabled.
  1895. + */
  1896. +static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
  1897. {
  1898. - if (outer_cache.set_debug)
  1899. - outer_cache.set_debug(val);
  1900. + unsigned long flags;
  1901. +
  1902. + l2c_write_sec(aux, base, L2X0_AUX_CTRL);
  1903. +
  1904. + l2c_unlock(base, num_lock);
  1905. +
  1906. + local_irq_save(flags);
  1907. + __l2c_op_way(base + L2X0_INV_WAY);
  1908. + writel_relaxed(0, base + sync_reg_offset);
  1909. + l2c_wait_mask(base + sync_reg_offset, 1);
  1910. + local_irq_restore(flags);
  1911. +
  1912. + l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
  1913. }
  1914. -static void pl310_set_debug(unsigned long val)
  1915. +static void l2c_disable(void)
  1916. {
  1917. - writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
  1918. + void __iomem *base = l2x0_base;
  1919. +
  1920. + outer_cache.flush_all();
  1921. + l2c_write_sec(0, base, L2X0_CTRL);
  1922. + dsb(st);
  1923. }
  1924. -#else
  1925. -/* Optimised out for non-errata case */
  1926. -static inline void debug_writel(unsigned long val)
  1927. +
  1928. +#ifdef CONFIG_CACHE_PL310
  1929. +static inline void cache_wait(void __iomem *reg, unsigned long mask)
  1930. {
  1931. + /* cache operations by line are atomic on PL310 */
  1932. }
  1933. -
  1934. -#define pl310_set_debug NULL
  1935. +#else
  1936. +#define cache_wait l2c_wait_mask
  1937. #endif
  1938. -#ifdef CONFIG_PL310_ERRATA_588369
  1939. -static inline void l2x0_flush_line(unsigned long addr)
  1940. +static inline void cache_sync(void)
  1941. {
  1942. void __iomem *base = l2x0_base;
  1943. - /* Clean by PA followed by Invalidate by PA */
  1944. - cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
  1945. - writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
  1946. - cache_wait(base + L2X0_INV_LINE_PA, 1);
  1947. - writel_relaxed(addr, base + L2X0_INV_LINE_PA);
  1948. + writel_relaxed(0, base + sync_reg_offset);
  1949. + cache_wait(base + L2X0_CACHE_SYNC, 1);
  1950. }
  1951. -#else
  1952. -static inline void l2x0_flush_line(unsigned long addr)
  1953. +#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
  1954. +static inline void debug_writel(unsigned long val)
  1955. +{
  1956. + l2c_set_debug(l2x0_base, val);
  1957. +}
  1958. +#else
  1959. +/* Optimised out for non-errata case */
  1960. +static inline void debug_writel(unsigned long val)
  1961. {
  1962. - void __iomem *base = l2x0_base;
  1963. - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
  1964. - writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
  1965. }
  1966. #endif
  1967. @@ -141,8 +176,7 @@
  1968. static void __l2x0_flush_all(void)
  1969. {
  1970. debug_writel(0x03);
  1971. - writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
  1972. - cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
  1973. + __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
  1974. cache_sync();
  1975. debug_writel(0x00);
  1976. }
  1977. @@ -157,274 +191,882 @@
  1978. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  1979. }
  1980. -static void l2x0_clean_all(void)
  1981. +static void l2x0_disable(void)
  1982. {
  1983. unsigned long flags;
  1984. - /* clean all ways */
  1985. raw_spin_lock_irqsave(&l2x0_lock, flags);
  1986. - writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
  1987. - cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
  1988. - cache_sync();
  1989. + __l2x0_flush_all();
  1990. + l2c_write_sec(0, l2x0_base, L2X0_CTRL);
  1991. + dsb(st);
  1992. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  1993. }
  1994. -static void l2x0_inv_all(void)
  1995. +static void l2c_save(void __iomem *base)
  1996. {
  1997. - unsigned long flags;
  1998. + l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  1999. +}
  2000. - /* invalidate all ways */
  2001. - raw_spin_lock_irqsave(&l2x0_lock, flags);
  2002. - /* Invalidating when L2 is enabled is a nono */
  2003. - BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
  2004. - writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
  2005. - cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
  2006. - cache_sync();
  2007. - raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2008. +/*
  2009. + * L2C-210 specific code.
  2010. + *
  2011. + * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
  2012. + * ensure that no background operation is running. The way operations
  2013. + * are all background tasks.
  2014. + *
  2015. + * While a background operation is in progress, any new operation is
  2016. + * ignored (unspecified whether this causes an error.) Thankfully, not
  2017. + * used on SMP.
  2018. + *
  2019. + * Never has a different sync register other than L2X0_CACHE_SYNC, but
  2020. + * we use sync_reg_offset here so we can share some of this with L2C-310.
  2021. + */
  2022. +static void __l2c210_cache_sync(void __iomem *base)
  2023. +{
  2024. + writel_relaxed(0, base + sync_reg_offset);
  2025. }
  2026. -static void l2x0_inv_range(unsigned long start, unsigned long end)
  2027. +static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
  2028. + unsigned long end)
  2029. +{
  2030. + while (start < end) {
  2031. + writel_relaxed(start, reg);
  2032. + start += CACHE_LINE_SIZE;
  2033. + }
  2034. +}
  2035. +
  2036. +static void l2c210_inv_range(unsigned long start, unsigned long end)
  2037. {
  2038. void __iomem *base = l2x0_base;
  2039. - unsigned long flags;
  2040. - raw_spin_lock_irqsave(&l2x0_lock, flags);
  2041. if (start & (CACHE_LINE_SIZE - 1)) {
  2042. start &= ~(CACHE_LINE_SIZE - 1);
  2043. - debug_writel(0x03);
  2044. - l2x0_flush_line(start);
  2045. - debug_writel(0x00);
  2046. + writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
  2047. start += CACHE_LINE_SIZE;
  2048. }
  2049. if (end & (CACHE_LINE_SIZE - 1)) {
  2050. end &= ~(CACHE_LINE_SIZE - 1);
  2051. - debug_writel(0x03);
  2052. - l2x0_flush_line(end);
  2053. - debug_writel(0x00);
  2054. + writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
  2055. }
  2056. + __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
  2057. + __l2c210_cache_sync(base);
  2058. +}
  2059. +
  2060. +static void l2c210_clean_range(unsigned long start, unsigned long end)
  2061. +{
  2062. + void __iomem *base = l2x0_base;
  2063. +
  2064. + start &= ~(CACHE_LINE_SIZE - 1);
  2065. + __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
  2066. + __l2c210_cache_sync(base);
  2067. +}
  2068. +
  2069. +static void l2c210_flush_range(unsigned long start, unsigned long end)
  2070. +{
  2071. + void __iomem *base = l2x0_base;
  2072. +
  2073. + start &= ~(CACHE_LINE_SIZE - 1);
  2074. + __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
  2075. + __l2c210_cache_sync(base);
  2076. +}
  2077. +
  2078. +static void l2c210_flush_all(void)
  2079. +{
  2080. + void __iomem *base = l2x0_base;
  2081. +
  2082. + BUG_ON(!irqs_disabled());
  2083. +
  2084. + __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
  2085. + __l2c210_cache_sync(base);
  2086. +}
  2087. +
  2088. +static void l2c210_sync(void)
  2089. +{
  2090. + __l2c210_cache_sync(l2x0_base);
  2091. +}
  2092. +
  2093. +static void l2c210_resume(void)
  2094. +{
  2095. + void __iomem *base = l2x0_base;
  2096. +
  2097. + if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
  2098. + l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
  2099. +}
  2100. +
  2101. +static const struct l2c_init_data l2c210_data __initconst = {
  2102. + .type = "L2C-210",
  2103. + .way_size_0 = SZ_8K,
  2104. + .num_lock = 1,
  2105. + .enable = l2c_enable,
  2106. + .save = l2c_save,
  2107. + .outer_cache = {
  2108. + .inv_range = l2c210_inv_range,
  2109. + .clean_range = l2c210_clean_range,
  2110. + .flush_range = l2c210_flush_range,
  2111. + .flush_all = l2c210_flush_all,
  2112. + .disable = l2c_disable,
  2113. + .sync = l2c210_sync,
  2114. + .resume = l2c210_resume,
  2115. + },
  2116. +};
  2117. +
  2118. +/*
  2119. + * L2C-220 specific code.
  2120. + *
  2121. + * All operations are background operations: they have to be waited for.
  2122. + * Conflicting requests generate a slave error (which will cause an
  2123. + * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
  2124. + * sync register here.
  2125. + *
  2126. + * However, we can re-use the l2c210_resume call.
  2127. + */
  2128. +static inline void __l2c220_cache_sync(void __iomem *base)
  2129. +{
  2130. + writel_relaxed(0, base + L2X0_CACHE_SYNC);
  2131. + l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
  2132. +}
  2133. +
  2134. +static void l2c220_op_way(void __iomem *base, unsigned reg)
  2135. +{
  2136. + unsigned long flags;
  2137. +
  2138. + raw_spin_lock_irqsave(&l2x0_lock, flags);
  2139. + __l2c_op_way(base + reg);
  2140. + __l2c220_cache_sync(base);
  2141. + raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2142. +}
  2143. +
  2144. +static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
  2145. + unsigned long end, unsigned long flags)
  2146. +{
  2147. + raw_spinlock_t *lock = &l2x0_lock;
  2148. +
  2149. while (start < end) {
  2150. unsigned long blk_end = start + min(end - start, 4096UL);
  2151. while (start < blk_end) {
  2152. - l2x0_inv_line(start);
  2153. + l2c_wait_mask(reg, 1);
  2154. + writel_relaxed(start, reg);
  2155. start += CACHE_LINE_SIZE;
  2156. }
  2157. if (blk_end < end) {
  2158. - raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2159. - raw_spin_lock_irqsave(&l2x0_lock, flags);
  2160. + raw_spin_unlock_irqrestore(lock, flags);
  2161. + raw_spin_lock_irqsave(lock, flags);
  2162. }
  2163. }
  2164. - cache_wait(base + L2X0_INV_LINE_PA, 1);
  2165. - cache_sync();
  2166. - raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2167. +
  2168. + return flags;
  2169. }
  2170. -static void l2x0_clean_range(unsigned long start, unsigned long end)
  2171. +static void l2c220_inv_range(unsigned long start, unsigned long end)
  2172. {
  2173. void __iomem *base = l2x0_base;
  2174. unsigned long flags;
  2175. - if ((end - start) >= l2x0_size) {
  2176. - l2x0_clean_all();
  2177. - return;
  2178. - }
  2179. -
  2180. raw_spin_lock_irqsave(&l2x0_lock, flags);
  2181. - start &= ~(CACHE_LINE_SIZE - 1);
  2182. - while (start < end) {
  2183. - unsigned long blk_end = start + min(end - start, 4096UL);
  2184. -
  2185. - while (start < blk_end) {
  2186. - l2x0_clean_line(start);
  2187. + if ((start | end) & (CACHE_LINE_SIZE - 1)) {
  2188. + if (start & (CACHE_LINE_SIZE - 1)) {
  2189. + start &= ~(CACHE_LINE_SIZE - 1);
  2190. + writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
  2191. start += CACHE_LINE_SIZE;
  2192. }
  2193. - if (blk_end < end) {
  2194. - raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2195. - raw_spin_lock_irqsave(&l2x0_lock, flags);
  2196. + if (end & (CACHE_LINE_SIZE - 1)) {
  2197. + end &= ~(CACHE_LINE_SIZE - 1);
  2198. + l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
  2199. + writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
  2200. }
  2201. }
  2202. - cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
  2203. - cache_sync();
  2204. +
  2205. + flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
  2206. + start, end, flags);
  2207. + l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
  2208. + __l2c220_cache_sync(base);
  2209. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2210. }
  2211. -static void l2x0_flush_range(unsigned long start, unsigned long end)
  2212. +static void l2c220_clean_range(unsigned long start, unsigned long end)
  2213. {
  2214. void __iomem *base = l2x0_base;
  2215. unsigned long flags;
  2216. + start &= ~(CACHE_LINE_SIZE - 1);
  2217. if ((end - start) >= l2x0_size) {
  2218. - l2x0_flush_all();
  2219. + l2c220_op_way(base, L2X0_CLEAN_WAY);
  2220. return;
  2221. }
  2222. raw_spin_lock_irqsave(&l2x0_lock, flags);
  2223. + flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
  2224. + start, end, flags);
  2225. + l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
  2226. + __l2c220_cache_sync(base);
  2227. + raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2228. +}
  2229. +
  2230. +static void l2c220_flush_range(unsigned long start, unsigned long end)
  2231. +{
  2232. + void __iomem *base = l2x0_base;
  2233. + unsigned long flags;
  2234. +
  2235. start &= ~(CACHE_LINE_SIZE - 1);
  2236. + if ((end - start) >= l2x0_size) {
  2237. + l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
  2238. + return;
  2239. + }
  2240. +
  2241. + raw_spin_lock_irqsave(&l2x0_lock, flags);
  2242. + flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
  2243. + start, end, flags);
  2244. + l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
  2245. + __l2c220_cache_sync(base);
  2246. + raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2247. +}
  2248. +
  2249. +static void l2c220_flush_all(void)
  2250. +{
  2251. + l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
  2252. +}
  2253. +
  2254. +static void l2c220_sync(void)
  2255. +{
  2256. + unsigned long flags;
  2257. +
  2258. + raw_spin_lock_irqsave(&l2x0_lock, flags);
  2259. + __l2c220_cache_sync(l2x0_base);
  2260. + raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2261. +}
  2262. +
  2263. +static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock)
  2264. +{
  2265. + /*
  2266. + * Always enable non-secure access to the lockdown registers -
  2267. + * we write to them as part of the L2C enable sequence so they
  2268. + * need to be accessible.
  2269. + */
  2270. + aux |= L220_AUX_CTRL_NS_LOCKDOWN;
  2271. +
  2272. + l2c_enable(base, aux, num_lock);
  2273. +}
  2274. +
  2275. +static const struct l2c_init_data l2c220_data = {
  2276. + .type = "L2C-220",
  2277. + .way_size_0 = SZ_8K,
  2278. + .num_lock = 1,
  2279. + .enable = l2c220_enable,
  2280. + .save = l2c_save,
  2281. + .outer_cache = {
  2282. + .inv_range = l2c220_inv_range,
  2283. + .clean_range = l2c220_clean_range,
  2284. + .flush_range = l2c220_flush_range,
  2285. + .flush_all = l2c220_flush_all,
  2286. + .disable = l2c_disable,
  2287. + .sync = l2c220_sync,
  2288. + .resume = l2c210_resume,
  2289. + },
  2290. +};
  2291. +
  2292. +/*
  2293. + * L2C-310 specific code.
  2294. + *
  2295. + * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
  2296. + * and the way operations are all background tasks. However, issuing an
  2297. + * operation while a background operation is in progress results in a
  2298. + * SLVERR response. We can reuse:
  2299. + *
  2300. + * __l2c210_cache_sync (using sync_reg_offset)
  2301. + * l2c210_sync
  2302. + * l2c210_inv_range (if 588369 is not applicable)
  2303. + * l2c210_clean_range
  2304. + * l2c210_flush_range (if 588369 is not applicable)
  2305. + * l2c210_flush_all (if 727915 is not applicable)
  2306. + *
  2307. + * Errata:
  2308. + * 588369: PL310 R0P0->R1P0, fixed R2P0.
  2309. + * Affects: all clean+invalidate operations
  2310. + * clean and invalidate skips the invalidate step, so we need to issue
  2311. + * separate operations. We also require the above debug workaround
  2312. + * enclosing this code fragment on affected parts. On unaffected parts,
  2313. + * we must not use this workaround without the debug register writes
  2314. + * to avoid exposing a problem similar to 727915.
  2315. + *
  2316. + * 727915: PL310 R2P0->R3P0, fixed R3P1.
  2317. + * Affects: clean+invalidate by way
  2318. + * clean and invalidate by way runs in the background, and a store can
  2319. + * hit the line between the clean operation and invalidate operation,
  2320. + * resulting in the store being lost.
  2321. + *
  2322. + * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
  2323. + * Affects: 8x64-bit (double fill) line fetches
  2324. + * double fill line fetches can fail to cause dirty data to be evicted
  2325. + * from the cache before the new data overwrites the second line.
  2326. + *
  2327. + * 753970: PL310 R3P0, fixed R3P1.
  2328. + * Affects: sync
  2329. + * prevents merging writes after the sync operation, until another L2C
  2330. + * operation is performed (or a number of other conditions.)
  2331. + *
  2332. + * 769419: PL310 R0P0->R3P1, fixed R3P2.
  2333. + * Affects: store buffer
  2334. + * store buffer is not automatically drained.
  2335. + */
  2336. +static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
  2337. +{
  2338. + void __iomem *base = l2x0_base;
  2339. +
  2340. + if ((start | end) & (CACHE_LINE_SIZE - 1)) {
  2341. + unsigned long flags;
  2342. +
  2343. + /* Erratum 588369 for both clean+invalidate operations */
  2344. + raw_spin_lock_irqsave(&l2x0_lock, flags);
  2345. + l2c_set_debug(base, 0x03);
  2346. +
  2347. + if (start & (CACHE_LINE_SIZE - 1)) {
  2348. + start &= ~(CACHE_LINE_SIZE - 1);
  2349. + writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
  2350. + writel_relaxed(start, base + L2X0_INV_LINE_PA);
  2351. + start += CACHE_LINE_SIZE;
  2352. + }
  2353. +
  2354. + if (end & (CACHE_LINE_SIZE - 1)) {
  2355. + end &= ~(CACHE_LINE_SIZE - 1);
  2356. + writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
  2357. + writel_relaxed(end, base + L2X0_INV_LINE_PA);
  2358. + }
  2359. +
  2360. + l2c_set_debug(base, 0x00);
  2361. + raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2362. + }
  2363. +
  2364. + __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
  2365. + __l2c210_cache_sync(base);
  2366. +}
  2367. +
  2368. +static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
  2369. +{
  2370. + raw_spinlock_t *lock = &l2x0_lock;
  2371. + unsigned long flags;
  2372. + void __iomem *base = l2x0_base;
  2373. +
  2374. + raw_spin_lock_irqsave(lock, flags);
  2375. while (start < end) {
  2376. unsigned long blk_end = start + min(end - start, 4096UL);
  2377. - debug_writel(0x03);
  2378. + l2c_set_debug(base, 0x03);
  2379. while (start < blk_end) {
  2380. - l2x0_flush_line(start);
  2381. + writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
  2382. + writel_relaxed(start, base + L2X0_INV_LINE_PA);
  2383. start += CACHE_LINE_SIZE;
  2384. }
  2385. - debug_writel(0x00);
  2386. + l2c_set_debug(base, 0x00);
  2387. if (blk_end < end) {
  2388. - raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2389. - raw_spin_lock_irqsave(&l2x0_lock, flags);
  2390. + raw_spin_unlock_irqrestore(lock, flags);
  2391. + raw_spin_lock_irqsave(lock, flags);
  2392. }
  2393. }
  2394. - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
  2395. - cache_sync();
  2396. - raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2397. + raw_spin_unlock_irqrestore(lock, flags);
  2398. + __l2c210_cache_sync(base);
  2399. }
  2400. -static void l2x0_disable(void)
  2401. +static void l2c310_flush_all_erratum(void)
  2402. {
  2403. + void __iomem *base = l2x0_base;
  2404. unsigned long flags;
  2405. raw_spin_lock_irqsave(&l2x0_lock, flags);
  2406. - __l2x0_flush_all();
  2407. - writel_relaxed(0, l2x0_base + L2X0_CTRL);
  2408. - dsb(st);
  2409. + l2c_set_debug(base, 0x03);
  2410. + __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
  2411. + l2c_set_debug(base, 0x00);
  2412. + __l2c210_cache_sync(base);
  2413. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2414. }
  2415. -static void l2x0_unlock(u32 cache_id)
  2416. +static void __init l2c310_save(void __iomem *base)
  2417. {
  2418. - int lockregs;
  2419. - int i;
  2420. + unsigned revision;
  2421. - switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
  2422. - case L2X0_CACHE_ID_PART_L310:
  2423. - lockregs = 8;
  2424. - break;
  2425. - case AURORA_CACHE_ID:
  2426. - lockregs = 4;
  2427. + l2c_save(base);
  2428. +
  2429. + l2x0_saved_regs.tag_latency = readl_relaxed(base +
  2430. + L310_TAG_LATENCY_CTRL);
  2431. + l2x0_saved_regs.data_latency = readl_relaxed(base +
  2432. + L310_DATA_LATENCY_CTRL);
  2433. + l2x0_saved_regs.filter_end = readl_relaxed(base +
  2434. + L310_ADDR_FILTER_END);
  2435. + l2x0_saved_regs.filter_start = readl_relaxed(base +
  2436. + L310_ADDR_FILTER_START);
  2437. +
  2438. + revision = readl_relaxed(base + L2X0_CACHE_ID) &
  2439. + L2X0_CACHE_ID_RTL_MASK;
  2440. +
  2441. + /* From r2p0, there is Prefetch offset/control register */
  2442. + if (revision >= L310_CACHE_ID_RTL_R2P0)
  2443. + l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
  2444. + L310_PREFETCH_CTRL);
  2445. +
  2446. + /* From r3p0, there is Power control register */
  2447. + if (revision >= L310_CACHE_ID_RTL_R3P0)
  2448. + l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
  2449. + L310_POWER_CTRL);
  2450. +}
  2451. +
  2452. +static void l2c310_resume(void)
  2453. +{
  2454. + void __iomem *base = l2x0_base;
  2455. +
  2456. + if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  2457. + unsigned revision;
  2458. +
  2459. + /* restore pl310 setup */
  2460. + writel_relaxed(l2x0_saved_regs.tag_latency,
  2461. + base + L310_TAG_LATENCY_CTRL);
  2462. + writel_relaxed(l2x0_saved_regs.data_latency,
  2463. + base + L310_DATA_LATENCY_CTRL);
  2464. + writel_relaxed(l2x0_saved_regs.filter_end,
  2465. + base + L310_ADDR_FILTER_END);
  2466. + writel_relaxed(l2x0_saved_regs.filter_start,
  2467. + base + L310_ADDR_FILTER_START);
  2468. +
  2469. + revision = readl_relaxed(base + L2X0_CACHE_ID) &
  2470. + L2X0_CACHE_ID_RTL_MASK;
  2471. +
  2472. + if (revision >= L310_CACHE_ID_RTL_R2P0)
  2473. + l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
  2474. + L310_PREFETCH_CTRL);
  2475. + if (revision >= L310_CACHE_ID_RTL_R3P0)
  2476. + l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
  2477. + L310_POWER_CTRL);
  2478. +
  2479. + l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
  2480. +
  2481. + /* Re-enable full-line-of-zeros for Cortex-A9 */
  2482. + if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
  2483. + set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
  2484. + }
  2485. +}
  2486. +
  2487. +static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
  2488. +{
  2489. + switch (act & ~CPU_TASKS_FROZEN) {
  2490. + case CPU_STARTING:
  2491. + set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
  2492. break;
  2493. - default:
  2494. - /* L210 and unknown types */
  2495. - lockregs = 1;
  2496. + case CPU_DYING:
  2497. + set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
  2498. break;
  2499. }
  2500. + return NOTIFY_OK;
  2501. +}
  2502. - for (i = 0; i < lockregs; i++) {
  2503. - writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
  2504. - i * L2X0_LOCKDOWN_STRIDE);
  2505. - writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
  2506. - i * L2X0_LOCKDOWN_STRIDE);
  2507. +static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
  2508. +{
  2509. + unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
  2510. + bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
  2511. +
  2512. + if (rev >= L310_CACHE_ID_RTL_R2P0) {
  2513. + if (cortex_a9) {
  2514. + aux |= L310_AUX_CTRL_EARLY_BRESP;
  2515. + pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
  2516. + } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
  2517. + pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
  2518. + aux &= ~L310_AUX_CTRL_EARLY_BRESP;
  2519. + }
  2520. + }
  2521. +
  2522. + if (cortex_a9) {
  2523. + u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
  2524. + u32 acr = get_auxcr();
  2525. +
  2526. + pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
  2527. +
  2528. + if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
  2529. + pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
  2530. +
  2531. + if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
  2532. + pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
  2533. +
  2534. + if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
  2535. + aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
  2536. + pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
  2537. + }
  2538. + } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
  2539. + pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
  2540. + aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
  2541. + }
  2542. +
  2543. + if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
  2544. + u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
  2545. +
  2546. + pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
  2547. + aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
  2548. + aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
  2549. + 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
  2550. + }
  2551. +
  2552. + /* r3p0 or later has power control register */
  2553. + if (rev >= L310_CACHE_ID_RTL_R3P0) {
  2554. + u32 power_ctrl;
  2555. +
  2556. + l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN,
  2557. + base, L310_POWER_CTRL);
  2558. + power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
  2559. + pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
  2560. + power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
  2561. + power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
  2562. + }
  2563. +
  2564. + /*
  2565. + * Always enable non-secure access to the lockdown registers -
  2566. + * we write to them as part of the L2C enable sequence so they
  2567. + * need to be accessible.
  2568. + */
  2569. + aux |= L310_AUX_CTRL_NS_LOCKDOWN;
  2570. +
  2571. + l2c_enable(base, aux, num_lock);
  2572. +
  2573. + if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
  2574. + set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
  2575. + cpu_notifier(l2c310_cpu_enable_flz, 0);
  2576. }
  2577. }
  2578. -void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
  2579. +static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
  2580. + struct outer_cache_fns *fns)
  2581. {
  2582. - u32 aux;
  2583. - u32 cache_id;
  2584. - u32 way_size = 0;
  2585. - int ways;
  2586. - int way_size_shift = L2X0_WAY_SIZE_SHIFT;
  2587. - const char *type;
  2588. + unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
  2589. + const char *errata[8];
  2590. + unsigned n = 0;
  2591. - l2x0_base = base;
  2592. - if (cache_id_part_number_from_dt)
  2593. - cache_id = cache_id_part_number_from_dt;
  2594. - else
  2595. - cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
  2596. - aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  2597. + if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
  2598. + revision < L310_CACHE_ID_RTL_R2P0 &&
  2599. + /* For bcm compatibility */
  2600. + fns->inv_range == l2c210_inv_range) {
  2601. + fns->inv_range = l2c310_inv_range_erratum;
  2602. + fns->flush_range = l2c310_flush_range_erratum;
  2603. + errata[n++] = "588369";
  2604. + }
  2605. +
  2606. + if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
  2607. + revision >= L310_CACHE_ID_RTL_R2P0 &&
  2608. + revision < L310_CACHE_ID_RTL_R3P1) {
  2609. + fns->flush_all = l2c310_flush_all_erratum;
  2610. + errata[n++] = "727915";
  2611. + }
  2612. +
  2613. + if (revision >= L310_CACHE_ID_RTL_R3P0 &&
  2614. + revision < L310_CACHE_ID_RTL_R3P2) {
  2615. + u32 val = readl_relaxed(base + L310_PREFETCH_CTRL);
  2616. + /* I don't think bit23 is required here... but iMX6 does so */
  2617. + if (val & (BIT(30) | BIT(23))) {
  2618. + val &= ~(BIT(30) | BIT(23));
  2619. + l2c_write_sec(val, base, L310_PREFETCH_CTRL);
  2620. + errata[n++] = "752271";
  2621. + }
  2622. + }
  2623. +
  2624. + if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
  2625. + revision == L310_CACHE_ID_RTL_R3P0) {
  2626. + sync_reg_offset = L2X0_DUMMY_REG;
  2627. + errata[n++] = "753970";
  2628. + }
  2629. +
  2630. + if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
  2631. + errata[n++] = "769419";
  2632. +
  2633. + if (n) {
  2634. + unsigned i;
  2635. +
  2636. + pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
  2637. + for (i = 0; i < n; i++)
  2638. + pr_cont(" %s", errata[i]);
  2639. + pr_cont(" enabled\n");
  2640. + }
  2641. +}
  2642. +
  2643. +static void l2c310_disable(void)
  2644. +{
  2645. + /*
  2646. + * If full-line-of-zeros is enabled, we must first disable it in the
  2647. + * Cortex-A9 auxiliary control register before disabling the L2 cache.
  2648. + */
  2649. + if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
  2650. + set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
  2651. + l2c_disable();
  2652. +}
  2653. +
  2654. +static const struct l2c_init_data l2c310_init_fns __initconst = {
  2655. + .type = "L2C-310",
  2656. + .way_size_0 = SZ_8K,
  2657. + .num_lock = 8,
  2658. + .enable = l2c310_enable,
  2659. + .fixup = l2c310_fixup,
  2660. + .save = l2c310_save,
  2661. + .outer_cache = {
  2662. + .inv_range = l2c210_inv_range,
  2663. + .clean_range = l2c210_clean_range,
  2664. + .flush_range = l2c210_flush_range,
  2665. + .flush_all = l2c210_flush_all,
  2666. + .disable = l2c310_disable,
  2667. + .sync = l2c210_sync,
  2668. + .resume = l2c310_resume,
  2669. + },
  2670. +};
  2671. +
  2672. +static void __init __l2c_init(const struct l2c_init_data *data,
  2673. + u32 aux_val, u32 aux_mask, u32 cache_id)
  2674. +{
  2675. + struct outer_cache_fns fns;
  2676. + unsigned way_size_bits, ways;
  2677. + u32 aux, old_aux;
  2678. +
  2679. + /*
  2680. + * Sanity check the aux values. aux_mask is the bits we preserve
  2681. + * from reading the hardware register, and aux_val is the bits we
  2682. + * set.
  2683. + */
  2684. + if (aux_val & aux_mask)
  2685. + pr_alert("L2C: platform provided aux values permit register corruption.\n");
  2686. +
  2687. + old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  2688. aux &= aux_mask;
  2689. aux |= aux_val;
  2690. + if (old_aux != aux)
  2691. + pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
  2692. + old_aux, aux);
  2693. +
  2694. /* Determine the number of ways */
  2695. switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
  2696. case L2X0_CACHE_ID_PART_L310:
  2697. + if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
  2698. + pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
  2699. if (aux & (1 << 16))
  2700. ways = 16;
  2701. else
  2702. ways = 8;
  2703. - type = "L310";
  2704. -#ifdef CONFIG_PL310_ERRATA_753970
  2705. - /* Unmapped register. */
  2706. - sync_reg_offset = L2X0_DUMMY_REG;
  2707. -#endif
  2708. - if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
  2709. - outer_cache.set_debug = pl310_set_debug;
  2710. break;
  2711. +
  2712. case L2X0_CACHE_ID_PART_L210:
  2713. + case L2X0_CACHE_ID_PART_L220:
  2714. ways = (aux >> 13) & 0xf;
  2715. - type = "L210";
  2716. break;
  2717. case AURORA_CACHE_ID:
  2718. - sync_reg_offset = AURORA_SYNC_REG;
  2719. ways = (aux >> 13) & 0xf;
  2720. ways = 2 << ((ways + 1) >> 2);
  2721. - way_size_shift = AURORA_WAY_SIZE_SHIFT;
  2722. - type = "Aurora";
  2723. break;
  2724. +
  2725. default:
  2726. /* Assume unknown chips have 8 ways */
  2727. ways = 8;
  2728. - type = "L2x0 series";
  2729. break;
  2730. }
  2731. l2x0_way_mask = (1 << ways) - 1;
  2732. /*
  2733. - * L2 cache Size = Way size * Number of ways
  2734. + * way_size_0 is the size that a way_size value of zero would be
  2735. + * given the calculation: way_size = way_size_0 << way_size_bits.
  2736. + * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
  2737. + * then way_size_0 would be 8k.
  2738. + *
  2739. + * L2 cache size = number of ways * way size.
  2740. + */
  2741. + way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
  2742. + L2C_AUX_CTRL_WAY_SIZE_SHIFT;
  2743. + l2x0_size = ways * (data->way_size_0 << way_size_bits);
  2744. +
  2745. + fns = data->outer_cache;
  2746. + fns.write_sec = outer_cache.write_sec;
  2747. + if (data->fixup)
  2748. + data->fixup(l2x0_base, cache_id, &fns);
  2749. +
  2750. + /*
  2751. + * Check if l2x0 controller is already enabled. If we are booting
  2752. + * in non-secure mode accessing the below registers will fault.
  2753. */
  2754. - way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
  2755. - way_size = 1 << (way_size + way_size_shift);
  2756. + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
  2757. + data->enable(l2x0_base, aux, data->num_lock);
  2758. - l2x0_size = ways * way_size * SZ_1K;
  2759. + outer_cache = fns;
  2760. /*
  2761. - * Check if l2x0 controller is already enabled.
  2762. - * If you are booting from non-secure mode
  2763. - * accessing the below registers will fault.
  2764. + * It is strange to save the register state before initialisation,
  2765. + * but hey, this is what the DT implementations decided to do.
  2766. */
  2767. - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  2768. - /* Make sure that I&D is not locked down when starting */
  2769. - l2x0_unlock(cache_id);
  2770. + if (data->save)
  2771. + data->save(l2x0_base);
  2772. +
  2773. + /* Re-read it in case some bits are reserved. */
  2774. + aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  2775. +
  2776. + pr_info("%s cache controller enabled, %d ways, %d kB\n",
  2777. + data->type, ways, l2x0_size >> 10);
  2778. + pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
  2779. + data->type, cache_id, aux);
  2780. +}
  2781. - /* l2x0 controller is disabled */
  2782. - writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
  2783. +void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
  2784. +{
  2785. + const struct l2c_init_data *data;
  2786. + u32 cache_id;
  2787. - l2x0_inv_all();
  2788. + l2x0_base = base;
  2789. - /* enable L2X0 */
  2790. - writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
  2791. + cache_id = readl_relaxed(base + L2X0_CACHE_ID);
  2792. +
  2793. + switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
  2794. + default:
  2795. + case L2X0_CACHE_ID_PART_L210:
  2796. + data = &l2c210_data;
  2797. + break;
  2798. +
  2799. + case L2X0_CACHE_ID_PART_L220:
  2800. + data = &l2c220_data;
  2801. + break;
  2802. +
  2803. + case L2X0_CACHE_ID_PART_L310:
  2804. + data = &l2c310_init_fns;
  2805. + break;
  2806. }
  2807. - /* Re-read it in case some bits are reserved. */
  2808. - aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  2809. + __l2c_init(data, aux_val, aux_mask, cache_id);
  2810. +}
  2811. +
  2812. +#ifdef CONFIG_OF
  2813. +static int l2_wt_override;
  2814. +
  2815. +/* Aurora don't have the cache ID register available, so we have to
  2816. + * pass it though the device tree */
  2817. +static u32 cache_id_part_number_from_dt;
  2818. +
  2819. +static void __init l2x0_of_parse(const struct device_node *np,
  2820. + u32 *aux_val, u32 *aux_mask)
  2821. +{
  2822. + u32 data[2] = { 0, 0 };
  2823. + u32 tag = 0;
  2824. + u32 dirty = 0;
  2825. + u32 val = 0, mask = 0;
  2826. +
  2827. + of_property_read_u32(np, "arm,tag-latency", &tag);
  2828. + if (tag) {
  2829. + mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
  2830. + val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
  2831. + }
  2832. +
  2833. + of_property_read_u32_array(np, "arm,data-latency",
  2834. + data, ARRAY_SIZE(data));
  2835. + if (data[0] && data[1]) {
  2836. + mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
  2837. + L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
  2838. + val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
  2839. + ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
  2840. + }
  2841. +
  2842. + of_property_read_u32(np, "arm,dirty-latency", &dirty);
  2843. + if (dirty) {
  2844. + mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
  2845. + val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
  2846. + }
  2847. - /* Save the value for resuming. */
  2848. - l2x0_saved_regs.aux_ctrl = aux;
  2849. + *aux_val &= ~mask;
  2850. + *aux_val |= val;
  2851. + *aux_mask &= ~mask;
  2852. +}
  2853. +
  2854. +static const struct l2c_init_data of_l2c210_data __initconst = {
  2855. + .type = "L2C-210",
  2856. + .way_size_0 = SZ_8K,
  2857. + .num_lock = 1,
  2858. + .of_parse = l2x0_of_parse,
  2859. + .enable = l2c_enable,
  2860. + .save = l2c_save,
  2861. + .outer_cache = {
  2862. + .inv_range = l2c210_inv_range,
  2863. + .clean_range = l2c210_clean_range,
  2864. + .flush_range = l2c210_flush_range,
  2865. + .flush_all = l2c210_flush_all,
  2866. + .disable = l2c_disable,
  2867. + .sync = l2c210_sync,
  2868. + .resume = l2c210_resume,
  2869. + },
  2870. +};
  2871. +
  2872. +static const struct l2c_init_data of_l2c220_data __initconst = {
  2873. + .type = "L2C-220",
  2874. + .way_size_0 = SZ_8K,
  2875. + .num_lock = 1,
  2876. + .of_parse = l2x0_of_parse,
  2877. + .enable = l2c220_enable,
  2878. + .save = l2c_save,
  2879. + .outer_cache = {
  2880. + .inv_range = l2c220_inv_range,
  2881. + .clean_range = l2c220_clean_range,
  2882. + .flush_range = l2c220_flush_range,
  2883. + .flush_all = l2c220_flush_all,
  2884. + .disable = l2c_disable,
  2885. + .sync = l2c220_sync,
  2886. + .resume = l2c210_resume,
  2887. + },
  2888. +};
  2889. +
  2890. +static void __init l2c310_of_parse(const struct device_node *np,
  2891. + u32 *aux_val, u32 *aux_mask)
  2892. +{
  2893. + u32 data[3] = { 0, 0, 0 };
  2894. + u32 tag[3] = { 0, 0, 0 };
  2895. + u32 filter[2] = { 0, 0 };
  2896. +
  2897. + of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
  2898. + if (tag[0] && tag[1] && tag[2])
  2899. + writel_relaxed(
  2900. + L310_LATENCY_CTRL_RD(tag[0] - 1) |
  2901. + L310_LATENCY_CTRL_WR(tag[1] - 1) |
  2902. + L310_LATENCY_CTRL_SETUP(tag[2] - 1),
  2903. + l2x0_base + L310_TAG_LATENCY_CTRL);
  2904. +
  2905. + of_property_read_u32_array(np, "arm,data-latency",
  2906. + data, ARRAY_SIZE(data));
  2907. + if (data[0] && data[1] && data[2])
  2908. + writel_relaxed(
  2909. + L310_LATENCY_CTRL_RD(data[0] - 1) |
  2910. + L310_LATENCY_CTRL_WR(data[1] - 1) |
  2911. + L310_LATENCY_CTRL_SETUP(data[2] - 1),
  2912. + l2x0_base + L310_DATA_LATENCY_CTRL);
  2913. - if (!of_init) {
  2914. - outer_cache.inv_range = l2x0_inv_range;
  2915. - outer_cache.clean_range = l2x0_clean_range;
  2916. - outer_cache.flush_range = l2x0_flush_range;
  2917. - outer_cache.sync = l2x0_cache_sync;
  2918. - outer_cache.flush_all = l2x0_flush_all;
  2919. - outer_cache.inv_all = l2x0_inv_all;
  2920. - outer_cache.disable = l2x0_disable;
  2921. - }
  2922. -
  2923. - pr_info("%s cache controller enabled\n", type);
  2924. - pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
  2925. - ways, cache_id, aux, l2x0_size >> 10);
  2926. + of_property_read_u32_array(np, "arm,filter-ranges",
  2927. + filter, ARRAY_SIZE(filter));
  2928. + if (filter[1]) {
  2929. + writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
  2930. + l2x0_base + L310_ADDR_FILTER_END);
  2931. + writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN,
  2932. + l2x0_base + L310_ADDR_FILTER_START);
  2933. + }
  2934. }
  2935. -#ifdef CONFIG_OF
  2936. -static int l2_wt_override;
  2937. +static const struct l2c_init_data of_l2c310_data __initconst = {
  2938. + .type = "L2C-310",
  2939. + .way_size_0 = SZ_8K,
  2940. + .num_lock = 8,
  2941. + .of_parse = l2c310_of_parse,
  2942. + .enable = l2c310_enable,
  2943. + .fixup = l2c310_fixup,
  2944. + .save = l2c310_save,
  2945. + .outer_cache = {
  2946. + .inv_range = l2c210_inv_range,
  2947. + .clean_range = l2c210_clean_range,
  2948. + .flush_range = l2c210_flush_range,
  2949. + .flush_all = l2c210_flush_all,
  2950. + .disable = l2c310_disable,
  2951. + .sync = l2c210_sync,
  2952. + .resume = l2c310_resume,
  2953. + },
  2954. +};
  2955. /*
  2956. * Note that the end addresses passed to Linux primitives are
  2957. @@ -524,6 +1166,100 @@
  2958. }
  2959. }
  2960. +static void aurora_save(void __iomem *base)
  2961. +{
  2962. + l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
  2963. + l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
  2964. +}
  2965. +
  2966. +static void aurora_resume(void)
  2967. +{
  2968. + void __iomem *base = l2x0_base;
  2969. +
  2970. + if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  2971. + writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
  2972. + writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
  2973. + }
  2974. +}
  2975. +
  2976. +/*
  2977. + * For Aurora cache in no outer mode, enable via the CP15 coprocessor
  2978. + * broadcasting of cache commands to L2.
  2979. + */
  2980. +static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
  2981. + unsigned num_lock)
  2982. +{
  2983. + u32 u;
  2984. +
  2985. + asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
  2986. + u |= AURORA_CTRL_FW; /* Set the FW bit */
  2987. + asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
  2988. +
  2989. + isb();
  2990. +
  2991. + l2c_enable(base, aux, num_lock);
  2992. +}
  2993. +
  2994. +static void __init aurora_fixup(void __iomem *base, u32 cache_id,
  2995. + struct outer_cache_fns *fns)
  2996. +{
  2997. + sync_reg_offset = AURORA_SYNC_REG;
  2998. +}
  2999. +
  3000. +static void __init aurora_of_parse(const struct device_node *np,
  3001. + u32 *aux_val, u32 *aux_mask)
  3002. +{
  3003. + u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
  3004. + u32 mask = AURORA_ACR_REPLACEMENT_MASK;
  3005. +
  3006. + of_property_read_u32(np, "cache-id-part",
  3007. + &cache_id_part_number_from_dt);
  3008. +
  3009. + /* Determine and save the write policy */
  3010. + l2_wt_override = of_property_read_bool(np, "wt-override");
  3011. +
  3012. + if (l2_wt_override) {
  3013. + val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
  3014. + mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
  3015. + }
  3016. +
  3017. + *aux_val &= ~mask;
  3018. + *aux_val |= val;
  3019. + *aux_mask &= ~mask;
  3020. +}
  3021. +
  3022. +static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
  3023. + .type = "Aurora",
  3024. + .way_size_0 = SZ_4K,
  3025. + .num_lock = 4,
  3026. + .of_parse = aurora_of_parse,
  3027. + .enable = l2c_enable,
  3028. + .fixup = aurora_fixup,
  3029. + .save = aurora_save,
  3030. + .outer_cache = {
  3031. + .inv_range = aurora_inv_range,
  3032. + .clean_range = aurora_clean_range,
  3033. + .flush_range = aurora_flush_range,
  3034. + .flush_all = l2x0_flush_all,
  3035. + .disable = l2x0_disable,
  3036. + .sync = l2x0_cache_sync,
  3037. + .resume = aurora_resume,
  3038. + },
  3039. +};
  3040. +
  3041. +static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
  3042. + .type = "Aurora",
  3043. + .way_size_0 = SZ_4K,
  3044. + .num_lock = 4,
  3045. + .of_parse = aurora_of_parse,
  3046. + .enable = aurora_enable_no_outer,
  3047. + .fixup = aurora_fixup,
  3048. + .save = aurora_save,
  3049. + .outer_cache = {
  3050. + .resume = aurora_resume,
  3051. + },
  3052. +};
  3053. +
  3054. /*
  3055. * For certain Broadcom SoCs, depending on the address range, different offsets
  3056. * need to be added to the address before passing it to L2 for
  3057. @@ -588,16 +1324,16 @@
  3058. /* normal case, no cross section between start and end */
  3059. if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
  3060. - l2x0_inv_range(new_start, new_end);
  3061. + l2c210_inv_range(new_start, new_end);
  3062. return;
  3063. }
  3064. /* They cross sections, so it can only be a cross from section
  3065. * 2 to section 3
  3066. */
  3067. - l2x0_inv_range(new_start,
  3068. + l2c210_inv_range(new_start,
  3069. bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
  3070. - l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  3071. + l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  3072. new_end);
  3073. }
  3074. @@ -610,26 +1346,21 @@
  3075. if (unlikely(end <= start))
  3076. return;
  3077. - if ((end - start) >= l2x0_size) {
  3078. - l2x0_clean_all();
  3079. - return;
  3080. - }
  3081. -
  3082. new_start = bcm_l2_phys_addr(start);
  3083. new_end = bcm_l2_phys_addr(end);
  3084. /* normal case, no cross section between start and end */
  3085. if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
  3086. - l2x0_clean_range(new_start, new_end);
  3087. + l2c210_clean_range(new_start, new_end);
  3088. return;
  3089. }
  3090. /* They cross sections, so it can only be a cross from section
  3091. * 2 to section 3
  3092. */
  3093. - l2x0_clean_range(new_start,
  3094. + l2c210_clean_range(new_start,
  3095. bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
  3096. - l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  3097. + l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  3098. new_end);
  3099. }
  3100. @@ -643,7 +1374,7 @@
  3101. return;
  3102. if ((end - start) >= l2x0_size) {
  3103. - l2x0_flush_all();
  3104. + outer_cache.flush_all();
  3105. return;
  3106. }
  3107. @@ -652,283 +1383,67 @@
  3108. /* normal case, no cross section between start and end */
  3109. if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
  3110. - l2x0_flush_range(new_start, new_end);
  3111. + l2c210_flush_range(new_start, new_end);
  3112. return;
  3113. }
  3114. /* They cross sections, so it can only be a cross from section
  3115. * 2 to section 3
  3116. */
  3117. - l2x0_flush_range(new_start,
  3118. + l2c210_flush_range(new_start,
  3119. bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
  3120. - l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  3121. + l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  3122. new_end);
  3123. }
  3124. -static void __init l2x0_of_setup(const struct device_node *np,
  3125. - u32 *aux_val, u32 *aux_mask)
  3126. -{
  3127. - u32 data[2] = { 0, 0 };
  3128. - u32 tag = 0;
  3129. - u32 dirty = 0;
  3130. - u32 val = 0, mask = 0;
  3131. -
  3132. - of_property_read_u32(np, "arm,tag-latency", &tag);
  3133. - if (tag) {
  3134. - mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
  3135. - val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
  3136. - }
  3137. -
  3138. - of_property_read_u32_array(np, "arm,data-latency",
  3139. - data, ARRAY_SIZE(data));
  3140. - if (data[0] && data[1]) {
  3141. - mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
  3142. - L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
  3143. - val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
  3144. - ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
  3145. - }
  3146. -
  3147. - of_property_read_u32(np, "arm,dirty-latency", &dirty);
  3148. - if (dirty) {
  3149. - mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
  3150. - val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
  3151. - }
  3152. -
  3153. - *aux_val &= ~mask;
  3154. - *aux_val |= val;
  3155. - *aux_mask &= ~mask;
  3156. -}
  3157. -
  3158. -static void __init pl310_of_setup(const struct device_node *np,
  3159. - u32 *aux_val, u32 *aux_mask)
  3160. -{
  3161. - u32 data[3] = { 0, 0, 0 };
  3162. - u32 tag[3] = { 0, 0, 0 };
  3163. - u32 filter[2] = { 0, 0 };
  3164. -
  3165. - of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
  3166. - if (tag[0] && tag[1] && tag[2])
  3167. - writel_relaxed(
  3168. - ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
  3169. - ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
  3170. - ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
  3171. - l2x0_base + L2X0_TAG_LATENCY_CTRL);
  3172. -
  3173. - of_property_read_u32_array(np, "arm,data-latency",
  3174. - data, ARRAY_SIZE(data));
  3175. - if (data[0] && data[1] && data[2])
  3176. - writel_relaxed(
  3177. - ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
  3178. - ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
  3179. - ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
  3180. - l2x0_base + L2X0_DATA_LATENCY_CTRL);
  3181. -
  3182. - of_property_read_u32_array(np, "arm,filter-ranges",
  3183. - filter, ARRAY_SIZE(filter));
  3184. - if (filter[1]) {
  3185. - writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
  3186. - l2x0_base + L2X0_ADDR_FILTER_END);
  3187. - writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
  3188. - l2x0_base + L2X0_ADDR_FILTER_START);
  3189. - }
  3190. -}
  3191. -
  3192. -static void __init pl310_save(void)
  3193. -{
  3194. - u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
  3195. - L2X0_CACHE_ID_RTL_MASK;
  3196. -
  3197. - l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
  3198. - L2X0_TAG_LATENCY_CTRL);
  3199. - l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
  3200. - L2X0_DATA_LATENCY_CTRL);
  3201. - l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
  3202. - L2X0_ADDR_FILTER_END);
  3203. - l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
  3204. - L2X0_ADDR_FILTER_START);
  3205. -
  3206. - if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
  3207. - /*
  3208. - * From r2p0, there is Prefetch offset/control register
  3209. - */
  3210. - l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
  3211. - L2X0_PREFETCH_CTRL);
  3212. - /*
  3213. - * From r3p0, there is Power control register
  3214. - */
  3215. - if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
  3216. - l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
  3217. - L2X0_POWER_CTRL);
  3218. - }
  3219. -}
  3220. +/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
  3221. +static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
  3222. + .type = "BCM-L2C-310",
  3223. + .way_size_0 = SZ_8K,
  3224. + .num_lock = 8,
  3225. + .of_parse = l2c310_of_parse,
  3226. + .enable = l2c310_enable,
  3227. + .save = l2c310_save,
  3228. + .outer_cache = {
  3229. + .inv_range = bcm_inv_range,
  3230. + .clean_range = bcm_clean_range,
  3231. + .flush_range = bcm_flush_range,
  3232. + .flush_all = l2c210_flush_all,
  3233. + .disable = l2c310_disable,
  3234. + .sync = l2c210_sync,
  3235. + .resume = l2c310_resume,
  3236. + },
  3237. +};
  3238. -static void aurora_save(void)
  3239. +static void __init tauros3_save(void __iomem *base)
  3240. {
  3241. - l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
  3242. - l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  3243. -}
  3244. + l2c_save(base);
  3245. -static void __init tauros3_save(void)
  3246. -{
  3247. l2x0_saved_regs.aux2_ctrl =
  3248. - readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL);
  3249. + readl_relaxed(base + TAUROS3_AUX2_CTRL);
  3250. l2x0_saved_regs.prefetch_ctrl =
  3251. - readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
  3252. -}
  3253. -
  3254. -static void l2x0_resume(void)
  3255. -{
  3256. - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  3257. - /* restore aux ctrl and enable l2 */
  3258. - l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
  3259. -
  3260. - writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
  3261. - L2X0_AUX_CTRL);
  3262. -
  3263. - l2x0_inv_all();
  3264. -
  3265. - writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
  3266. - }
  3267. -}
  3268. -
  3269. -static void pl310_resume(void)
  3270. -{
  3271. - u32 l2x0_revision;
  3272. -
  3273. - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  3274. - /* restore pl310 setup */
  3275. - writel_relaxed(l2x0_saved_regs.tag_latency,
  3276. - l2x0_base + L2X0_TAG_LATENCY_CTRL);
  3277. - writel_relaxed(l2x0_saved_regs.data_latency,
  3278. - l2x0_base + L2X0_DATA_LATENCY_CTRL);
  3279. - writel_relaxed(l2x0_saved_regs.filter_end,
  3280. - l2x0_base + L2X0_ADDR_FILTER_END);
  3281. - writel_relaxed(l2x0_saved_regs.filter_start,
  3282. - l2x0_base + L2X0_ADDR_FILTER_START);
  3283. -
  3284. - l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
  3285. - L2X0_CACHE_ID_RTL_MASK;
  3286. -
  3287. - if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
  3288. - writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
  3289. - l2x0_base + L2X0_PREFETCH_CTRL);
  3290. - if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
  3291. - writel_relaxed(l2x0_saved_regs.pwr_ctrl,
  3292. - l2x0_base + L2X0_POWER_CTRL);
  3293. - }
  3294. - }
  3295. -
  3296. - l2x0_resume();
  3297. -}
  3298. -
  3299. -static void aurora_resume(void)
  3300. -{
  3301. - if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  3302. - writel_relaxed(l2x0_saved_regs.aux_ctrl,
  3303. - l2x0_base + L2X0_AUX_CTRL);
  3304. - writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
  3305. - }
  3306. + readl_relaxed(base + L310_PREFETCH_CTRL);
  3307. }
  3308. static void tauros3_resume(void)
  3309. {
  3310. - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  3311. + void __iomem *base = l2x0_base;
  3312. +
  3313. + if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  3314. writel_relaxed(l2x0_saved_regs.aux2_ctrl,
  3315. - l2x0_base + TAUROS3_AUX2_CTRL);
  3316. + base + TAUROS3_AUX2_CTRL);
  3317. writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
  3318. - l2x0_base + L2X0_PREFETCH_CTRL);
  3319. - }
  3320. -
  3321. - l2x0_resume();
  3322. -}
  3323. -
  3324. -static void __init aurora_broadcast_l2_commands(void)
  3325. -{
  3326. - __u32 u;
  3327. - /* Enable Broadcasting of cache commands to L2*/
  3328. - __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
  3329. - u |= AURORA_CTRL_FW; /* Set the FW bit */
  3330. - __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
  3331. - isb();
  3332. -}
  3333. -
  3334. -static void __init aurora_of_setup(const struct device_node *np,
  3335. - u32 *aux_val, u32 *aux_mask)
  3336. -{
  3337. - u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
  3338. - u32 mask = AURORA_ACR_REPLACEMENT_MASK;
  3339. + base + L310_PREFETCH_CTRL);
  3340. - of_property_read_u32(np, "cache-id-part",
  3341. - &cache_id_part_number_from_dt);
  3342. -
  3343. - /* Determine and save the write policy */
  3344. - l2_wt_override = of_property_read_bool(np, "wt-override");
  3345. -
  3346. - if (l2_wt_override) {
  3347. - val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
  3348. - mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
  3349. + l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
  3350. }
  3351. -
  3352. - *aux_val &= ~mask;
  3353. - *aux_val |= val;
  3354. - *aux_mask &= ~mask;
  3355. }
  3356. -static const struct l2x0_of_data pl310_data = {
  3357. - .setup = pl310_of_setup,
  3358. - .save = pl310_save,
  3359. - .outer_cache = {
  3360. - .resume = pl310_resume,
  3361. - .inv_range = l2x0_inv_range,
  3362. - .clean_range = l2x0_clean_range,
  3363. - .flush_range = l2x0_flush_range,
  3364. - .sync = l2x0_cache_sync,
  3365. - .flush_all = l2x0_flush_all,
  3366. - .inv_all = l2x0_inv_all,
  3367. - .disable = l2x0_disable,
  3368. - },
  3369. -};
  3370. -
  3371. -static const struct l2x0_of_data l2x0_data = {
  3372. - .setup = l2x0_of_setup,
  3373. - .save = NULL,
  3374. - .outer_cache = {
  3375. - .resume = l2x0_resume,
  3376. - .inv_range = l2x0_inv_range,
  3377. - .clean_range = l2x0_clean_range,
  3378. - .flush_range = l2x0_flush_range,
  3379. - .sync = l2x0_cache_sync,
  3380. - .flush_all = l2x0_flush_all,
  3381. - .inv_all = l2x0_inv_all,
  3382. - .disable = l2x0_disable,
  3383. - },
  3384. -};
  3385. -
  3386. -static const struct l2x0_of_data aurora_with_outer_data = {
  3387. - .setup = aurora_of_setup,
  3388. - .save = aurora_save,
  3389. - .outer_cache = {
  3390. - .resume = aurora_resume,
  3391. - .inv_range = aurora_inv_range,
  3392. - .clean_range = aurora_clean_range,
  3393. - .flush_range = aurora_flush_range,
  3394. - .sync = l2x0_cache_sync,
  3395. - .flush_all = l2x0_flush_all,
  3396. - .inv_all = l2x0_inv_all,
  3397. - .disable = l2x0_disable,
  3398. - },
  3399. -};
  3400. -
  3401. -static const struct l2x0_of_data aurora_no_outer_data = {
  3402. - .setup = aurora_of_setup,
  3403. - .save = aurora_save,
  3404. - .outer_cache = {
  3405. - .resume = aurora_resume,
  3406. - },
  3407. -};
  3408. -
  3409. -static const struct l2x0_of_data tauros3_data = {
  3410. - .setup = NULL,
  3411. +static const struct l2c_init_data of_tauros3_data __initconst = {
  3412. + .type = "Tauros3",
  3413. + .way_size_0 = SZ_8K,
  3414. + .num_lock = 8,
  3415. + .enable = l2c_enable,
  3416. .save = tauros3_save,
  3417. /* Tauros3 broadcasts L1 cache operations to L2 */
  3418. .outer_cache = {
  3419. @@ -936,43 +1451,26 @@
  3420. },
  3421. };
  3422. -static const struct l2x0_of_data bcm_l2x0_data = {
  3423. - .setup = pl310_of_setup,
  3424. - .save = pl310_save,
  3425. - .outer_cache = {
  3426. - .resume = pl310_resume,
  3427. - .inv_range = bcm_inv_range,
  3428. - .clean_range = bcm_clean_range,
  3429. - .flush_range = bcm_flush_range,
  3430. - .sync = l2x0_cache_sync,
  3431. - .flush_all = l2x0_flush_all,
  3432. - .inv_all = l2x0_inv_all,
  3433. - .disable = l2x0_disable,
  3434. - },
  3435. -};
  3436. -
  3437. +#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
  3438. static const struct of_device_id l2x0_ids[] __initconst = {
  3439. - { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
  3440. - { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
  3441. - { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
  3442. - { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */
  3443. - .data = (void *)&bcm_l2x0_data},
  3444. - { .compatible = "brcm,bcm11351-a2-pl310-cache",
  3445. - .data = (void *)&bcm_l2x0_data},
  3446. - { .compatible = "marvell,aurora-outer-cache",
  3447. - .data = (void *)&aurora_with_outer_data},
  3448. - { .compatible = "marvell,aurora-system-cache",
  3449. - .data = (void *)&aurora_no_outer_data},
  3450. - { .compatible = "marvell,tauros3-cache",
  3451. - .data = (void *)&tauros3_data },
  3452. + L2C_ID("arm,l210-cache", of_l2c210_data),
  3453. + L2C_ID("arm,l220-cache", of_l2c220_data),
  3454. + L2C_ID("arm,pl310-cache", of_l2c310_data),
  3455. + L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
  3456. + L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
  3457. + L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
  3458. + L2C_ID("marvell,tauros3-cache", of_tauros3_data),
  3459. + /* Deprecated IDs */
  3460. + L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
  3461. {}
  3462. };
  3463. int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
  3464. {
  3465. + const struct l2c_init_data *data;
  3466. struct device_node *np;
  3467. - const struct l2x0_of_data *data;
  3468. struct resource res;
  3469. + u32 cache_id, old_aux;
  3470. np = of_find_matching_node(NULL, l2x0_ids);
  3471. if (!np)
  3472. @@ -989,23 +1487,29 @@
  3473. data = of_match_node(l2x0_ids, np)->data;
  3474. - /* L2 configuration can only be changed if the cache is disabled */
  3475. - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  3476. - if (data->setup)
  3477. - data->setup(np, &aux_val, &aux_mask);
  3478. -
  3479. - /* For aurora cache in no outer mode select the
  3480. - * correct mode using the coprocessor*/
  3481. - if (data == &aurora_no_outer_data)
  3482. - aurora_broadcast_l2_commands();
  3483. + old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  3484. + if (old_aux != ((old_aux & aux_mask) | aux_val)) {
  3485. + pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
  3486. + old_aux, (old_aux & aux_mask) | aux_val);
  3487. + } else if (aux_mask != ~0U && aux_val != 0) {
  3488. + pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
  3489. }
  3490. - if (data->save)
  3491. - data->save();
  3492. + /* All L2 caches are unified, so this property should be specified */
  3493. + if (!of_property_read_bool(np, "cache-unified"))
  3494. + pr_err("L2C: device tree omits to specify unified cache\n");
  3495. +
  3496. + /* L2 configuration can only be changed if the cache is disabled */
  3497. + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
  3498. + if (data->of_parse)
  3499. + data->of_parse(np, &aux_val, &aux_mask);
  3500. +
  3501. + if (cache_id_part_number_from_dt)
  3502. + cache_id = cache_id_part_number_from_dt;
  3503. + else
  3504. + cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
  3505. - of_init = true;
  3506. - memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
  3507. - l2x0_init(l2x0_base, aux_val, aux_mask);
  3508. + __l2c_init(data, aux_val, aux_mask, cache_id);
  3509. return 0;
  3510. }
  3511. diff -Nur linux-3.15-rc1.orig/arch/arm/mm/Kconfig linux-3.15-rc1/arch/arm/mm/Kconfig
  3512. --- linux-3.15-rc1.orig/arch/arm/mm/Kconfig 2014-04-13 23:18:35.000000000 +0200
  3513. +++ linux-3.15-rc1/arch/arm/mm/Kconfig 2014-04-25 14:11:13.531375131 +0200
  3514. @@ -897,6 +897,57 @@
  3515. This option enables optimisations for the PL310 cache
  3516. controller.
  3517. +config PL310_ERRATA_588369
  3518. + bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
  3519. + depends on CACHE_L2X0
  3520. + help
  3521. + The PL310 L2 cache controller implements three types of Clean &
  3522. + Invalidate maintenance operations: by Physical Address
  3523. + (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC).
  3524. + They are architecturally defined to behave as the execution of a
  3525. + clean operation followed immediately by an invalidate operation,
  3526. + both performing to the same memory location. This functionality
  3527. + is not correctly implemented in PL310 as clean lines are not
  3528. + invalidated as a result of these operations.
  3529. +
  3530. +config PL310_ERRATA_727915
  3531. + bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
  3532. + depends on CACHE_L2X0
  3533. + help
  3534. + PL310 implements the Clean & Invalidate by Way L2 cache maintenance
  3535. + operation (offset 0x7FC). This operation runs in background so that
  3536. + PL310 can handle normal accesses while it is in progress. Under very
  3537. + rare circumstances, due to this erratum, write data can be lost when
  3538. + PL310 treats a cacheable write transaction during a Clean &
  3539. + Invalidate by Way operation.
  3540. +
  3541. +config PL310_ERRATA_753970
  3542. + bool "PL310 errata: cache sync operation may be faulty"
  3543. + depends on CACHE_PL310
  3544. + help
  3545. + This option enables the workaround for the 753970 PL310 (r3p0) erratum.
  3546. +
  3547. + Under some condition the effect of cache sync operation on
  3548. + the store buffer still remains when the operation completes.
  3549. + This means that the store buffer is always asked to drain and
  3550. + this prevents it from merging any further writes. The workaround
  3551. + is to replace the normal offset of cache sync operation (0x730)
  3552. + by another offset targeting an unmapped PL310 register 0x740.
  3553. + This has the same effect as the cache sync operation: store buffer
  3554. + drain and waiting for all buffers empty.
  3555. +
  3556. +config PL310_ERRATA_769419
  3557. + bool "PL310 errata: no automatic Store Buffer drain"
  3558. + depends on CACHE_L2X0
  3559. + help
  3560. + On revisions of the PL310 prior to r3p2, the Store Buffer does
  3561. + not automatically drain. This can cause normal, non-cacheable
  3562. + writes to be retained when the memory system is idle, leading
  3563. + to suboptimal I/O performance for drivers using coherent DMA.
  3564. + This option adds a write barrier to the cpu_idle loop so that,
  3565. + on systems with an outer cache, the store buffer is drained
  3566. + explicitly.
  3567. +
  3568. config CACHE_TAUROS2
  3569. bool "Enable the Tauros2 L2 cache controller"
  3570. depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4)
  3571. diff -Nur linux-3.15-rc1.orig/arch/arm/mm/l2c-common.c linux-3.15-rc1/arch/arm/mm/l2c-common.c
  3572. --- linux-3.15-rc1.orig/arch/arm/mm/l2c-common.c 1970-01-01 01:00:00.000000000 +0100
  3573. +++ linux-3.15-rc1/arch/arm/mm/l2c-common.c 2014-04-25 14:11:13.531375131 +0200
  3574. @@ -0,0 +1,20 @@
  3575. +/*
  3576. + * Copyright (C) 2010 ARM Ltd.
  3577. + * Written by Catalin Marinas <catalin.marinas@arm.com>
  3578. + *
  3579. + * This program is free software; you can redistribute it and/or modify
  3580. + * it under the terms of the GNU General Public License version 2 as
  3581. + * published by the Free Software Foundation.
  3582. + */
  3583. +#include <linux/bug.h>
  3584. +#include <linux/smp.h>
  3585. +#include <asm/outercache.h>
  3586. +
  3587. +void outer_disable(void)
  3588. +{
  3589. + WARN_ON(!irqs_disabled());
  3590. + WARN_ON(num_online_cpus() > 1);
  3591. +
  3592. + if (outer_cache.disable)
  3593. + outer_cache.disable();
  3594. +}
  3595. diff -Nur linux-3.15-rc1.orig/arch/arm/mm/l2c-l2x0-resume.S linux-3.15-rc1/arch/arm/mm/l2c-l2x0-resume.S
  3596. --- linux-3.15-rc1.orig/arch/arm/mm/l2c-l2x0-resume.S 1970-01-01 01:00:00.000000000 +0100
  3597. +++ linux-3.15-rc1/arch/arm/mm/l2c-l2x0-resume.S 2014-04-25 14:11:13.531375131 +0200
  3598. @@ -0,0 +1,58 @@
  3599. +/*
  3600. + * L2C-310 early resume code. This can be used by platforms to restore
  3601. + * the settings of their L2 cache controller before restoring the
  3602. + * processor state.
  3603. + *
  3604. + * This code can only be used to if you are running in the secure world.
  3605. + */
  3606. +#include <linux/linkage.h>
  3607. +#include <asm/hardware/cache-l2x0.h>
  3608. +
  3609. + .text
  3610. +
  3611. +ENTRY(l2c310_early_resume)
  3612. + adr r0, 1f
  3613. + ldr r2, [r0]
  3614. + add r0, r2, r0
  3615. +
  3616. + ldmia r0, {r1, r2, r3, r4, r5, r6, r7, r8}
  3617. + @ r1 = phys address of L2C-310 controller
  3618. + @ r2 = aux_ctrl
  3619. + @ r3 = tag_latency
  3620. + @ r4 = data_latency
  3621. + @ r5 = filter_start
  3622. + @ r6 = filter_end
  3623. + @ r7 = prefetch_ctrl
  3624. + @ r8 = pwr_ctrl
  3625. +
  3626. + @ Check that the address has been initialised
  3627. + teq r1, #0
  3628. + moveq pc, lr
  3629. +
  3630. + @ The prefetch and power control registers are revision dependent
  3631. + @ and can be written whether or not the L2 cache is enabled
  3632. + ldr r0, [r1, #L2X0_CACHE_ID]
  3633. + and r0, r0, #L2X0_CACHE_ID_RTL_MASK
  3634. + cmp r0, #L310_CACHE_ID_RTL_R2P0
  3635. + strcs r7, [r1, #L310_PREFETCH_CTRL]
  3636. + cmp r0, #L310_CACHE_ID_RTL_R3P0
  3637. + strcs r8, [r1, #L310_POWER_CTRL]
  3638. +
  3639. + @ Don't setup the L2 cache if it is already enabled
  3640. + ldr r0, [r1, #L2X0_CTRL]
  3641. + tst r0, #L2X0_CTRL_EN
  3642. + movne pc, lr
  3643. +
  3644. + str r3, [r1, #L310_TAG_LATENCY_CTRL]
  3645. + str r4, [r1, #L310_DATA_LATENCY_CTRL]
  3646. + str r6, [r1, #L310_ADDR_FILTER_END]
  3647. + str r5, [r1, #L310_ADDR_FILTER_START]
  3648. +
  3649. + str r2, [r1, #L2X0_AUX_CTRL]
  3650. + mov r9, #L2X0_CTRL_EN
  3651. + str r9, [r1, #L2X0_CTRL]
  3652. + mov pc, lr
  3653. +ENDPROC(l2c310_early_resume)
  3654. +
  3655. + .align
  3656. +1: .long l2x0_saved_regs - .
  3657. diff -Nur linux-3.15-rc1.orig/arch/arm/mm/Makefile linux-3.15-rc1/arch/arm/mm/Makefile
  3658. --- linux-3.15-rc1.orig/arch/arm/mm/Makefile 2014-04-13 23:18:35.000000000 +0200
  3659. +++ linux-3.15-rc1/arch/arm/mm/Makefile 2014-04-25 14:11:13.531375131 +0200
  3660. @@ -95,7 +95,8 @@
  3661. AFLAGS_proc-v6.o :=-Wa,-march=armv6
  3662. AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
  3663. +obj-$(CONFIG_OUTER_CACHE) += l2c-common.o
  3664. obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
  3665. -obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
  3666. +obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o
  3667. obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
  3668. obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o
  3669. diff -Nur linux-3.15-rc1.orig/arch/arm/plat-samsung/s5p-sleep.S linux-3.15-rc1/arch/arm/plat-samsung/s5p-sleep.S
  3670. --- linux-3.15-rc1.orig/arch/arm/plat-samsung/s5p-sleep.S 2014-04-13 23:18:35.000000000 +0200
  3671. +++ linux-3.15-rc1/arch/arm/plat-samsung/s5p-sleep.S 2014-04-25 14:11:13.531375131 +0200
  3672. @@ -22,7 +22,6 @@
  3673. */
  3674. #include <linux/linkage.h>
  3675. -#include <asm/asm-offsets.h>
  3676. .data
  3677. .align
  3678. diff -Nur linux-3.15-rc1.orig/Documentation/devicetree/bindings/leds/leds-pwm.txt linux-3.15-rc1/Documentation/devicetree/bindings/leds/leds-pwm.txt
  3679. --- linux-3.15-rc1.orig/Documentation/devicetree/bindings/leds/leds-pwm.txt 2014-04-13 23:18:35.000000000 +0200
  3680. +++ linux-3.15-rc1/Documentation/devicetree/bindings/leds/leds-pwm.txt 2014-04-25 14:11:13.515375059 +0200
  3681. @@ -13,6 +13,8 @@
  3682. For the pwms and pwm-names property please refer to:
  3683. Documentation/devicetree/bindings/pwm/pwm.txt
  3684. - max-brightness : Maximum brightness possible for the LED
  3685. +- active-low : (optional) For PWMs where the LED is wired to supply
  3686. + rather than ground.
  3687. - label : (optional)
  3688. see Documentation/devicetree/bindings/leds/common.txt
  3689. - linux,default-trigger : (optional)
  3690. diff -Nur linux-3.15-rc1.orig/Documentation/devicetree/bindings/mmc/mmc.txt linux-3.15-rc1/Documentation/devicetree/bindings/mmc/mmc.txt
  3691. --- linux-3.15-rc1.orig/Documentation/devicetree/bindings/mmc/mmc.txt 2014-04-13 23:18:35.000000000 +0200
  3692. +++ linux-3.15-rc1/Documentation/devicetree/bindings/mmc/mmc.txt 2014-04-25 14:11:13.515375059 +0200
  3693. @@ -5,6 +5,8 @@
  3694. Interpreted by the OF core:
  3695. - reg: Registers location and length.
  3696. - interrupts: Interrupts used by the MMC controller.
  3697. +- clocks: Clocks needed for the host controller, if any.
  3698. +- clock-names: Goes with clocks above.
  3699. Card detection:
  3700. If no property below is supplied, host native card detect is used.
  3701. @@ -39,6 +41,15 @@
  3702. - mmc-hs200-1_8v: eMMC HS200 mode(1.8V I/O) is supported
  3703. - mmc-hs200-1_2v: eMMC HS200 mode(1.2V I/O) is supported
  3704. +Card power and reset control:
  3705. +The following properties can be specified for cases where the MMC
  3706. +peripheral needs additional reset, regulator and clock lines. It is for
  3707. +example common for WiFi/BT adapters to have these separate from the main
  3708. +MMC bus:
  3709. + - card-reset-gpios: Specify GPIOs for card reset (reset active low)
  3710. + - card-external-vcc-supply: Regulator to drive (independent) card VCC
  3711. + - clock with name "card_ext_clock": External clock provided to the card
  3712. +
  3713. *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
  3714. polarity properties, we have to fix the meaning of the "normal" and "inverted"
  3715. line levels. We choose to follow the SDHCI standard, which specifies both those
  3716. diff -Nur linux-3.15-rc1.orig/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt linux-3.15-rc1/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt
  3717. --- linux-3.15-rc1.orig/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt 2014-04-13 23:18:35.000000000 +0200
  3718. +++ linux-3.15-rc1/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt 2014-04-25 14:11:13.515375059 +0200
  3719. @@ -60,7 +60,8 @@
  3720. - compatible: Should be "fsl,imx-parallel-display"
  3721. Optional properties:
  3722. - interface_pix_fmt: How this display is connected to the
  3723. - display interface. Currently supported types: "rgb24", "rgb565", "bgr666"
  3724. + display interface. Currently supported types: "rgb24", "rgb565", "bgr666",
  3725. + "rgb666"
  3726. - edid: verbatim EDID data block describing attached display.
  3727. - ddc: phandle describing the i2c bus handling the display data
  3728. channel
  3729. diff -Nur linux-3.15-rc1.orig/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml linux-3.15-rc1/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml
  3730. --- linux-3.15-rc1.orig/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml 2014-04-13 23:18:35.000000000 +0200
  3731. +++ linux-3.15-rc1/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml 2014-04-25 14:11:13.511375042 +0200
  3732. @@ -279,6 +279,45 @@
  3733. <entry></entry>
  3734. <entry></entry>
  3735. </row>
  3736. + <row id="V4L2-PIX-FMT-RGB666">
  3737. + <entry><constant>V4L2_PIX_FMT_RGB666</constant></entry>
  3738. + <entry>'RGBH'</entry>
  3739. + <entry></entry>
  3740. + <entry>r<subscript>5</subscript></entry>
  3741. + <entry>r<subscript>4</subscript></entry>
  3742. + <entry>r<subscript>3</subscript></entry>
  3743. + <entry>r<subscript>2</subscript></entry>
  3744. + <entry>r<subscript>1</subscript></entry>
  3745. + <entry>r<subscript>0</subscript></entry>
  3746. + <entry>g<subscript>5</subscript></entry>
  3747. + <entry>g<subscript>4</subscript></entry>
  3748. + <entry></entry>
  3749. + <entry>g<subscript>3</subscript></entry>
  3750. + <entry>g<subscript>2</subscript></entry>
  3751. + <entry>g<subscript>1</subscript></entry>
  3752. + <entry>g<subscript>0</subscript></entry>
  3753. + <entry>b<subscript>5</subscript></entry>
  3754. + <entry>b<subscript>4</subscript></entry>
  3755. + <entry>b<subscript>3</subscript></entry>
  3756. + <entry>b<subscript>2</subscript></entry>
  3757. + <entry></entry>
  3758. + <entry>b<subscript>1</subscript></entry>
  3759. + <entry>b<subscript>0</subscript></entry>
  3760. + <entry></entry>
  3761. + <entry></entry>
  3762. + <entry></entry>
  3763. + <entry></entry>
  3764. + <entry></entry>
  3765. + <entry></entry>
  3766. + <entry></entry>
  3767. + <entry></entry>
  3768. + <entry></entry>
  3769. + <entry></entry>
  3770. + <entry></entry>
  3771. + <entry></entry>
  3772. + <entry></entry>
  3773. + <entry></entry>
  3774. + </row>
  3775. <row id="V4L2-PIX-FMT-BGR24">
  3776. <entry><constant>V4L2_PIX_FMT_BGR24</constant></entry>
  3777. <entry>'BGR3'</entry>
  3778. diff -Nur linux-3.15-rc1.orig/drivers/ata/ahci_imx.c linux-3.15-rc1/drivers/ata/ahci_imx.c
  3779. --- linux-3.15-rc1.orig/drivers/ata/ahci_imx.c 2014-04-13 23:18:35.000000000 +0200
  3780. +++ linux-3.15-rc1/drivers/ata/ahci_imx.c 2014-04-25 14:11:13.531375131 +0200
  3781. @@ -46,6 +46,7 @@
  3782. struct regmap *gpr;
  3783. bool no_device;
  3784. bool first_time;
  3785. + u32 phy_params;
  3786. };
  3787. static int ahci_imx_hotplug;
  3788. @@ -90,14 +91,7 @@
  3789. IMX6Q_GPR13_SATA_TX_LVL_MASK |
  3790. IMX6Q_GPR13_SATA_MPLL_CLK_EN |
  3791. IMX6Q_GPR13_SATA_TX_EDGE_RATE,
  3792. - IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
  3793. - IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
  3794. - IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
  3795. - IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
  3796. - IMX6Q_GPR13_SATA_MPLL_SS_EN |
  3797. - IMX6Q_GPR13_SATA_TX_ATTEN_9_16 |
  3798. - IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB |
  3799. - IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
  3800. + imxpriv->phy_params);
  3801. regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
  3802. IMX6Q_GPR13_SATA_MPLL_CLK_EN,
  3803. IMX6Q_GPR13_SATA_MPLL_CLK_EN);
  3804. @@ -160,6 +154,10 @@
  3805. writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
  3806. imx_sata_disable(hpriv);
  3807. imxpriv->no_device = true;
  3808. +
  3809. + dev_info(ap->dev, "no device found, disabling link.\n");
  3810. + dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX
  3811. + ".hotplug=1 to enable hotplug\n");
  3812. }
  3813. static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
  3814. @@ -200,6 +198,165 @@
  3815. };
  3816. MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
  3817. +struct reg_value {
  3818. + u32 of_value;
  3819. + u32 reg_value;
  3820. +};
  3821. +
  3822. +struct reg_property {
  3823. + const char *name;
  3824. + const struct reg_value *values;
  3825. + size_t num_values;
  3826. + u32 def_value;
  3827. + u32 set_value;
  3828. +};
  3829. +
  3830. +static const struct reg_value gpr13_tx_level[] = {
  3831. + { 937, IMX6Q_GPR13_SATA_TX_LVL_0_937_V },
  3832. + { 947, IMX6Q_GPR13_SATA_TX_LVL_0_947_V },
  3833. + { 957, IMX6Q_GPR13_SATA_TX_LVL_0_957_V },
  3834. + { 966, IMX6Q_GPR13_SATA_TX_LVL_0_966_V },
  3835. + { 976, IMX6Q_GPR13_SATA_TX_LVL_0_976_V },
  3836. + { 986, IMX6Q_GPR13_SATA_TX_LVL_0_986_V },
  3837. + { 996, IMX6Q_GPR13_SATA_TX_LVL_0_996_V },
  3838. + { 1005, IMX6Q_GPR13_SATA_TX_LVL_1_005_V },
  3839. + { 1015, IMX6Q_GPR13_SATA_TX_LVL_1_015_V },
  3840. + { 1025, IMX6Q_GPR13_SATA_TX_LVL_1_025_V },
  3841. + { 1035, IMX6Q_GPR13_SATA_TX_LVL_1_035_V },
  3842. + { 1045, IMX6Q_GPR13_SATA_TX_LVL_1_045_V },
  3843. + { 1054, IMX6Q_GPR13_SATA_TX_LVL_1_054_V },
  3844. + { 1064, IMX6Q_GPR13_SATA_TX_LVL_1_064_V },
  3845. + { 1074, IMX6Q_GPR13_SATA_TX_LVL_1_074_V },
  3846. + { 1084, IMX6Q_GPR13_SATA_TX_LVL_1_084_V },
  3847. + { 1094, IMX6Q_GPR13_SATA_TX_LVL_1_094_V },
  3848. + { 1104, IMX6Q_GPR13_SATA_TX_LVL_1_104_V },
  3849. + { 1113, IMX6Q_GPR13_SATA_TX_LVL_1_113_V },
  3850. + { 1123, IMX6Q_GPR13_SATA_TX_LVL_1_123_V },
  3851. + { 1133, IMX6Q_GPR13_SATA_TX_LVL_1_133_V },
  3852. + { 1143, IMX6Q_GPR13_SATA_TX_LVL_1_143_V },
  3853. + { 1152, IMX6Q_GPR13_SATA_TX_LVL_1_152_V },
  3854. + { 1162, IMX6Q_GPR13_SATA_TX_LVL_1_162_V },
  3855. + { 1172, IMX6Q_GPR13_SATA_TX_LVL_1_172_V },
  3856. + { 1182, IMX6Q_GPR13_SATA_TX_LVL_1_182_V },
  3857. + { 1191, IMX6Q_GPR13_SATA_TX_LVL_1_191_V },
  3858. + { 1201, IMX6Q_GPR13_SATA_TX_LVL_1_201_V },
  3859. + { 1211, IMX6Q_GPR13_SATA_TX_LVL_1_211_V },
  3860. + { 1221, IMX6Q_GPR13_SATA_TX_LVL_1_221_V },
  3861. + { 1230, IMX6Q_GPR13_SATA_TX_LVL_1_230_V },
  3862. + { 1240, IMX6Q_GPR13_SATA_TX_LVL_1_240_V }
  3863. +};
  3864. +
  3865. +static const struct reg_value gpr13_tx_boost[] = {
  3866. + { 0, IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB },
  3867. + { 370, IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB },
  3868. + { 740, IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB },
  3869. + { 111, IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB },
  3870. + { 148, IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB },
  3871. + { 185, IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB },
  3872. + { 222, IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB },
  3873. + { 259, IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB },
  3874. + { 296, IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB },
  3875. + { 333, IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB },
  3876. + { 370, IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB },
  3877. + { 407, IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB },
  3878. + { 444, IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB },
  3879. + { 481, IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB },
  3880. + { 528, IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB },
  3881. + { 575, IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB }
  3882. +};
  3883. +
  3884. +static const struct reg_value gpr13_tx_atten[] = {
  3885. + { 8, IMX6Q_GPR13_SATA_TX_ATTEN_8_16 },
  3886. + { 9, IMX6Q_GPR13_SATA_TX_ATTEN_9_16 },
  3887. + { 10, IMX6Q_GPR13_SATA_TX_ATTEN_10_16 },
  3888. + { 12, IMX6Q_GPR13_SATA_TX_ATTEN_12_16 },
  3889. + { 14, IMX6Q_GPR13_SATA_TX_ATTEN_14_16 },
  3890. + { 16, IMX6Q_GPR13_SATA_TX_ATTEN_16_16 },
  3891. +};
  3892. +
  3893. +static const struct reg_value gpr13_rx_eq[] = {
  3894. + { 500, IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB },
  3895. + { 1000, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB },
  3896. + { 1500, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB },
  3897. + { 2000, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB },
  3898. + { 2500, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB },
  3899. + { 3000, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB },
  3900. + { 3500, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB },
  3901. + { 4000, IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB },
  3902. +};
  3903. +
  3904. +static const struct reg_property gpr13_props[] = {
  3905. + {
  3906. + .name = "fsl,transmit-level-mV",
  3907. + .values = gpr13_tx_level,
  3908. + .num_values = ARRAY_SIZE(gpr13_tx_level),
  3909. + .def_value = IMX6Q_GPR13_SATA_TX_LVL_1_025_V,
  3910. + }, {
  3911. + .name = "fsl,transmit-boost-mdB",
  3912. + .values = gpr13_tx_boost,
  3913. + .num_values = ARRAY_SIZE(gpr13_tx_boost),
  3914. + .def_value = IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB,
  3915. + }, {
  3916. + .name = "fsl,transmit-atten-16ths",
  3917. + .values = gpr13_tx_atten,
  3918. + .num_values = ARRAY_SIZE(gpr13_tx_atten),
  3919. + .def_value = IMX6Q_GPR13_SATA_TX_ATTEN_9_16,
  3920. + }, {
  3921. + .name = "fsl,receive-eq-mdB",
  3922. + .values = gpr13_rx_eq,
  3923. + .num_values = ARRAY_SIZE(gpr13_rx_eq),
  3924. + .def_value = IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB,
  3925. + }, {
  3926. + .name = "fsl,no-spread-spectrum",
  3927. + .def_value = IMX6Q_GPR13_SATA_MPLL_SS_EN,
  3928. + .set_value = 0,
  3929. + },
  3930. +};
  3931. +
  3932. +static u32 imx_ahci_parse_props(struct device *dev,
  3933. + const struct reg_property *prop, size_t num)
  3934. +{
  3935. + struct device_node *np = dev->of_node;
  3936. + u32 reg_value = 0;
  3937. + int i, j;
  3938. +
  3939. + for (i = 0; i < num; i++, prop++) {
  3940. + u32 of_val;
  3941. +
  3942. + if (prop->num_values == 0) {
  3943. + if (of_property_read_bool(np, prop->name))
  3944. + reg_value |= prop->set_value;
  3945. + else
  3946. + reg_value |= prop->def_value;
  3947. + continue;
  3948. + }
  3949. +
  3950. + if (of_property_read_u32(np, prop->name, &of_val)) {
  3951. + dev_info(dev, "%s not specified, using %08x\n",
  3952. + prop->name, prop->def_value);
  3953. + reg_value |= prop->def_value;
  3954. + continue;
  3955. + }
  3956. +
  3957. + for (j = 0; j < prop->num_values; j++) {
  3958. + if (prop->values[j].of_value == of_val) {
  3959. + dev_info(dev, "%s value %u, using %08x\n",
  3960. + prop->name, of_val, prop->values[j].reg_value);
  3961. + reg_value |= prop->values[j].reg_value;
  3962. + break;
  3963. + }
  3964. + }
  3965. +
  3966. + if (j == prop->num_values) {
  3967. + dev_err(dev, "DT property %s is not a valid value\n",
  3968. + prop->name);
  3969. + reg_value |= prop->def_value;
  3970. + }
  3971. + }
  3972. +
  3973. + return reg_value;
  3974. +}
  3975. +
  3976. static int imx_ahci_probe(struct platform_device *pdev)
  3977. {
  3978. struct device *dev = &pdev->dev;
  3979. @@ -227,6 +384,8 @@
  3980. }
  3981. if (imxpriv->type == AHCI_IMX6Q) {
  3982. + u32 reg_value;
  3983. +
  3984. imxpriv->gpr = syscon_regmap_lookup_by_compatible(
  3985. "fsl,imx6q-iomuxc-gpr");
  3986. if (IS_ERR(imxpriv->gpr)) {
  3987. @@ -234,6 +393,15 @@
  3988. "failed to find fsl,imx6q-iomux-gpr regmap\n");
  3989. return PTR_ERR(imxpriv->gpr);
  3990. }
  3991. +
  3992. + reg_value = imx_ahci_parse_props(dev, gpr13_props,
  3993. + ARRAY_SIZE(gpr13_props));
  3994. +
  3995. + imxpriv->phy_params =
  3996. + IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
  3997. + IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
  3998. + IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
  3999. + reg_value;
  4000. }
  4001. hpriv = ahci_platform_get_resources(pdev);
  4002. diff -Nur linux-3.15-rc1.orig/drivers/cec/cec-dev.c linux-3.15-rc1/drivers/cec/cec-dev.c
  4003. --- linux-3.15-rc1.orig/drivers/cec/cec-dev.c 1970-01-01 01:00:00.000000000 +0100
  4004. +++ linux-3.15-rc1/drivers/cec/cec-dev.c 2014-04-25 14:11:13.535375148 +0200
  4005. @@ -0,0 +1,384 @@
  4006. +/*
  4007. + * HDMI Consumer Electronics Control
  4008. + *
  4009. + * This provides the user API for communication with HDMI CEC complaint
  4010. + * devices in kernel drivers, and is based upon the protocol developed
  4011. + * by Freescale for their i.MX SoCs.
  4012. + *
  4013. + * This program is free software; you can redistribute it and/or modify
  4014. + * it under the terms of the GNU General Public License version 2 as
  4015. + * published by the Free Software Foundation.
  4016. + */
  4017. +#include <linux/cec-dev.h>
  4018. +#include <linux/device.h>
  4019. +#include <linux/fs.h>
  4020. +#include <linux/module.h>
  4021. +#include <linux/poll.h>
  4022. +#include <linux/sched.h>
  4023. +#include <linux/slab.h>
  4024. +
  4025. +struct cec_event {
  4026. + struct cec_user_event usr;
  4027. + struct list_head node;
  4028. +};
  4029. +
  4030. +static struct class *cec_class;
  4031. +static int cec_major;
  4032. +
  4033. +static void cec_dev_send_message(struct cec_dev *cec_dev, u8 *msg,
  4034. + size_t count)
  4035. +{
  4036. + unsigned long flags;
  4037. +
  4038. + spin_lock_irqsave(&cec_dev->lock, flags);
  4039. + cec_dev->retries = 5;
  4040. + cec_dev->write_busy = 1;
  4041. + cec_dev->send_message(cec_dev, msg, count);
  4042. + spin_unlock_irqrestore(&cec_dev->lock, flags);
  4043. +}
  4044. +
  4045. +void cec_dev_event(struct cec_dev *cec_dev, int type, u8 *msg, size_t len)
  4046. +{
  4047. + struct cec_event *event;
  4048. + unsigned long flags;
  4049. +
  4050. + event = kzalloc(sizeof(*event), GFP_ATOMIC);
  4051. + if (event) {
  4052. + event->usr.event_type = type;
  4053. + event->usr.msg_len = len;
  4054. + if (msg)
  4055. + memcpy(event->usr.msg, msg, len);
  4056. +
  4057. + spin_lock_irqsave(&cec_dev->lock, flags);
  4058. + list_add_tail(&event->node, &cec_dev->events);
  4059. + spin_unlock_irqrestore(&cec_dev->lock, flags);
  4060. + wake_up(&cec_dev->waitq);
  4061. + }
  4062. +}
  4063. +EXPORT_SYMBOL_GPL(cec_dev_event);
  4064. +
  4065. +static int cec_dev_lock_write(struct cec_dev *cec_dev, struct file *file)
  4066. + __acquires(cec_dev->mutex)
  4067. +{
  4068. + int ret;
  4069. +
  4070. + do {
  4071. + if (file->f_flags & O_NONBLOCK) {
  4072. + if (cec_dev->write_busy)
  4073. + return -EAGAIN;
  4074. + } else {
  4075. + ret = wait_event_interruptible(cec_dev->waitq,
  4076. + !cec_dev->write_busy);
  4077. + if (ret)
  4078. + break;
  4079. + }
  4080. +
  4081. + ret = mutex_lock_interruptible(&cec_dev->mutex);
  4082. + if (ret)
  4083. + break;
  4084. +
  4085. + if (!cec_dev->write_busy)
  4086. + break;
  4087. +
  4088. + mutex_unlock(&cec_dev->mutex);
  4089. + } while (1);
  4090. +
  4091. + return ret;
  4092. +}
  4093. +
  4094. +static ssize_t cec_dev_read(struct file *file, char __user *buf,
  4095. + size_t count, loff_t *ppos)
  4096. +{
  4097. + struct cec_dev *cec_dev = file->private_data;
  4098. + ssize_t ret;
  4099. +
  4100. + if (count > sizeof(struct cec_user_event))
  4101. + count = sizeof(struct cec_user_event);
  4102. +
  4103. + if (!access_ok(VERIFY_WRITE, buf, count))
  4104. + return -EFAULT;
  4105. +
  4106. + do {
  4107. + struct cec_event *event = NULL;
  4108. + unsigned long flags;
  4109. +
  4110. + spin_lock_irqsave(&cec_dev->lock, flags);
  4111. + if (!list_empty(&cec_dev->events)) {
  4112. + event = list_first_entry(&cec_dev->events,
  4113. + struct cec_event, node);
  4114. + list_del(&event->node);
  4115. + }
  4116. + spin_unlock_irqrestore(&cec_dev->lock, flags);
  4117. +
  4118. + if (event) {
  4119. + ret = __copy_to_user(buf, &event->usr, count) ?
  4120. + -EFAULT : count;
  4121. + kfree(event);
  4122. + break;
  4123. + }
  4124. +
  4125. + if (file->f_flags & O_NONBLOCK) {
  4126. + ret = -EAGAIN;
  4127. + break;
  4128. + }
  4129. +
  4130. + ret = wait_event_interruptible(cec_dev->waitq,
  4131. + !list_empty(&cec_dev->events));
  4132. + if (ret)
  4133. + break;
  4134. + } while (1);
  4135. +
  4136. + return ret;
  4137. +}
  4138. +
  4139. +static ssize_t cec_dev_write(struct file *file, const char __user *buf,
  4140. + size_t count, loff_t *ppos)
  4141. +{
  4142. + struct cec_dev *cec_dev = file->private_data;
  4143. + u8 msg[MAX_MESSAGE_LEN];
  4144. + int ret;
  4145. +
  4146. + if (count > sizeof(msg))
  4147. + return -E2BIG;
  4148. +
  4149. + if (copy_from_user(msg, buf, count))
  4150. + return -EFAULT;
  4151. +
  4152. + ret = cec_dev_lock_write(cec_dev, file);
  4153. + if (ret)
  4154. + return ret;
  4155. +
  4156. + cec_dev_send_message(cec_dev, msg, count);
  4157. +
  4158. + mutex_unlock(&cec_dev->mutex);
  4159. +
  4160. + return count;
  4161. +}
  4162. +
  4163. +static long cec_dev_ioctl(struct file *file, u_int cmd, unsigned long arg)
  4164. +{
  4165. + struct cec_dev *cec_dev = file->private_data;
  4166. + int ret;
  4167. +
  4168. + switch (cmd) {
  4169. + case HDMICEC_IOC_O_SETLOGICALADDRESS:
  4170. + case HDMICEC_IOC_SETLOGICALADDRESS:
  4171. + if (arg > 15) {
  4172. + ret = -EINVAL;
  4173. + break;
  4174. + }
  4175. +
  4176. + ret = cec_dev_lock_write(cec_dev, file);
  4177. + if (ret == 0) {
  4178. + unsigned char msg[1];
  4179. +
  4180. + cec_dev->addresses = BIT(arg);
  4181. + cec_dev->set_address(cec_dev, cec_dev->addresses);
  4182. +
  4183. + /*
  4184. + * Send a ping message with the source and destination
  4185. + * set to our address; the result indicates whether
  4186. + * unit has chosen our address simultaneously.
  4187. + */
  4188. + msg[0] = arg << 4 | arg;
  4189. + cec_dev_send_message(cec_dev, msg, sizeof(msg));
  4190. + mutex_unlock(&cec_dev->mutex);
  4191. + }
  4192. + break;
  4193. +
  4194. + case HDMICEC_IOC_STARTDEVICE:
  4195. + ret = mutex_lock_interruptible(&cec_dev->mutex);
  4196. + if (ret == 0) {
  4197. + cec_dev->addresses = BIT(15);
  4198. + cec_dev->set_address(cec_dev, cec_dev->addresses);
  4199. + mutex_unlock(&cec_dev->mutex);
  4200. + }
  4201. + break;
  4202. +
  4203. + case HDMICEC_IOC_STOPDEVICE:
  4204. + ret = 0;
  4205. + break;
  4206. +
  4207. + case HDMICEC_IOC_GETPHYADDRESS:
  4208. + ret = put_user(cec_dev->physical, (u16 __user *)arg);
  4209. + ret = -ENOIOCTLCMD;
  4210. + break;
  4211. +
  4212. + default:
  4213. + ret = -ENOIOCTLCMD;
  4214. + break;
  4215. + }
  4216. +
  4217. + return ret;
  4218. +}
  4219. +
  4220. +static unsigned cec_dev_poll(struct file *file, poll_table *wait)
  4221. +{
  4222. + struct cec_dev *cec_dev = file->private_data;
  4223. + unsigned mask = 0;
  4224. +
  4225. + poll_wait(file, &cec_dev->waitq, wait);
  4226. +
  4227. + if (cec_dev->write_busy == 0)
  4228. + mask |= POLLOUT | POLLWRNORM;
  4229. + if (!list_empty(&cec_dev->events))
  4230. + mask |= POLLIN | POLLRDNORM;
  4231. +
  4232. + return mask;
  4233. +}
  4234. +
  4235. +static int cec_dev_release(struct inode *inode, struct file *file)
  4236. +{
  4237. + struct cec_dev *cec_dev = file->private_data;
  4238. +
  4239. + mutex_lock(&cec_dev->mutex);
  4240. + if (cec_dev->users >= 1)
  4241. + cec_dev->users -= 1;
  4242. + if (cec_dev->users == 0) {
  4243. + /*
  4244. + * Wait for any write to complete before shutting down.
  4245. + * A message should complete in a maximum of 2.75ms *
  4246. + * 160 bits + 4.7ms, or 444.7ms. Let's call that 500ms.
  4247. + * If we time out, shutdown anyway.
  4248. + */
  4249. + wait_event_timeout(cec_dev->waitq, !cec_dev->write_busy,
  4250. + msecs_to_jiffies(500));
  4251. +
  4252. + cec_dev->release(cec_dev);
  4253. +
  4254. + while (!list_empty(&cec_dev->events)) {
  4255. + struct cec_event *event;
  4256. +
  4257. + event = list_first_entry(&cec_dev->events,
  4258. + struct cec_event, node);
  4259. + list_del(&event->node);
  4260. + kfree(event);
  4261. + }
  4262. + }
  4263. + mutex_unlock(&cec_dev->mutex);
  4264. + return 0;
  4265. +}
  4266. +
  4267. +static int cec_dev_open(struct inode *inode, struct file *file)
  4268. +{
  4269. + struct cec_dev *cec_dev = container_of(inode->i_cdev, struct cec_dev,
  4270. + cdev);
  4271. + int ret = 0;
  4272. +
  4273. + nonseekable_open(inode, file);
  4274. +
  4275. + file->private_data = cec_dev;
  4276. +
  4277. + ret = mutex_lock_interruptible(&cec_dev->mutex);
  4278. + if (ret)
  4279. + return ret;
  4280. +
  4281. + if (cec_dev->users++ == 0) {
  4282. + cec_dev->addresses = BIT(15);
  4283. +
  4284. + ret = cec_dev->open(cec_dev);
  4285. + if (ret < 0)
  4286. + cec_dev->users = 0;
  4287. + }
  4288. + mutex_unlock(&cec_dev->mutex);
  4289. +
  4290. + return ret;
  4291. +}
  4292. +
  4293. +static const struct file_operations hdmi_cec_fops = {
  4294. + .owner = THIS_MODULE,
  4295. + .read = cec_dev_read,
  4296. + .write = cec_dev_write,
  4297. + .open = cec_dev_open,
  4298. + .unlocked_ioctl = cec_dev_ioctl,
  4299. + .release = cec_dev_release,
  4300. + .poll = cec_dev_poll,
  4301. +};
  4302. +
  4303. +void cec_dev_init(struct cec_dev *cec_dev, struct module *module)
  4304. +{
  4305. + cec_dev->devn = MKDEV(cec_major, 0);
  4306. +
  4307. + INIT_LIST_HEAD(&cec_dev->events);
  4308. + init_waitqueue_head(&cec_dev->waitq);
  4309. + spin_lock_init(&cec_dev->lock);
  4310. + mutex_init(&cec_dev->mutex);
  4311. +
  4312. + cec_dev->addresses = BIT(15);
  4313. +
  4314. + cdev_init(&cec_dev->cdev, &hdmi_cec_fops);
  4315. + cec_dev->cdev.owner = module;
  4316. +}
  4317. +EXPORT_SYMBOL_GPL(cec_dev_init);
  4318. +
  4319. +int cec_dev_add(struct cec_dev *cec_dev, struct device *dev, const char *name)
  4320. +{
  4321. + struct device *cd;
  4322. + int ret;
  4323. +
  4324. + ret = cdev_add(&cec_dev->cdev, cec_dev->devn, 1);
  4325. + if (ret < 0)
  4326. + goto err_cdev;
  4327. +
  4328. + cd = device_create(cec_class, dev, cec_dev->devn, NULL, name);
  4329. + if (IS_ERR(cd)) {
  4330. + ret = PTR_ERR(cd);
  4331. + dev_err(dev, "can't create device: %d\n", ret);
  4332. + goto err_dev;
  4333. + }
  4334. +
  4335. + return 0;
  4336. +
  4337. + err_dev:
  4338. + cdev_del(&cec_dev->cdev);
  4339. + err_cdev:
  4340. + return ret;
  4341. +}
  4342. +EXPORT_SYMBOL_GPL(cec_dev_add);
  4343. +
  4344. +void cec_dev_remove(struct cec_dev *cec_dev)
  4345. +{
  4346. + device_destroy(cec_class, cec_dev->devn);
  4347. + cdev_del(&cec_dev->cdev);
  4348. +}
  4349. +EXPORT_SYMBOL_GPL(cec_dev_remove);
  4350. +
  4351. +static int cec_init(void)
  4352. +{
  4353. + dev_t dev;
  4354. + int ret;
  4355. +
  4356. + cec_class = class_create(THIS_MODULE, "hdmi-cec");
  4357. + if (IS_ERR(cec_class)) {
  4358. + ret = PTR_ERR(cec_class);
  4359. + pr_err("cec: can't create cec class: %d\n", ret);
  4360. + goto err_class;
  4361. + }
  4362. +
  4363. + ret = alloc_chrdev_region(&dev, 0, 1, "hdmi-cec");
  4364. + if (ret) {
  4365. + pr_err("cec: can't create character devices: %d\n", ret);
  4366. + goto err_chrdev;
  4367. + }
  4368. +
  4369. + cec_major = MAJOR(dev);
  4370. +
  4371. + return 0;
  4372. +
  4373. + err_chrdev:
  4374. + class_destroy(cec_class);
  4375. + err_class:
  4376. + return ret;
  4377. +}
  4378. +subsys_initcall(cec_init);
  4379. +
  4380. +static void cec_exit(void)
  4381. +{
  4382. + unregister_chrdev_region(MKDEV(cec_major, 0), 1);
  4383. + class_destroy(cec_class);
  4384. +}
  4385. +module_exit(cec_exit);
  4386. +
  4387. +MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
  4388. +MODULE_DESCRIPTION("Generic HDMI CEC driver");
  4389. +MODULE_LICENSE("GPL");
  4390. diff -Nur linux-3.15-rc1.orig/drivers/cec/Kconfig linux-3.15-rc1/drivers/cec/Kconfig
  4391. --- linux-3.15-rc1.orig/drivers/cec/Kconfig 1970-01-01 01:00:00.000000000 +0100
  4392. +++ linux-3.15-rc1/drivers/cec/Kconfig 2014-04-25 14:11:13.531375131 +0200
  4393. @@ -0,0 +1,14 @@
  4394. +#
  4395. +# Consumer Electroncs Control support
  4396. +#
  4397. +
  4398. +menu "Consumer Electronics Control devices"
  4399. +
  4400. +config CEC
  4401. + bool
  4402. +
  4403. +config HDMI_CEC_CORE
  4404. + tristate
  4405. + select CEC
  4406. +
  4407. +endmenu
  4408. diff -Nur linux-3.15-rc1.orig/drivers/cec/Makefile linux-3.15-rc1/drivers/cec/Makefile
  4409. --- linux-3.15-rc1.orig/drivers/cec/Makefile 1970-01-01 01:00:00.000000000 +0100
  4410. +++ linux-3.15-rc1/drivers/cec/Makefile 2014-04-25 14:11:13.531375131 +0200
  4411. @@ -0,0 +1 @@
  4412. +obj-$(CONFIG_HDMI_CEC_CORE) += cec-dev.o
  4413. diff -Nur linux-3.15-rc1.orig/drivers/gpu/drm/drm_crtc_helper.c linux-3.15-rc1/drivers/gpu/drm/drm_crtc_helper.c
  4414. --- linux-3.15-rc1.orig/drivers/gpu/drm/drm_crtc_helper.c 2014-04-13 23:18:35.000000000 +0200
  4415. +++ linux-3.15-rc1/drivers/gpu/drm/drm_crtc_helper.c 2014-04-25 14:11:13.535375148 +0200
  4416. @@ -281,16 +281,10 @@
  4417. static void __drm_helper_disable_unused_functions(struct drm_device *dev)
  4418. {
  4419. struct drm_encoder *encoder;
  4420. - struct drm_connector *connector;
  4421. struct drm_crtc *crtc;
  4422. drm_warn_on_modeset_not_all_locked(dev);
  4423. - list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  4424. - if (!connector->encoder)
  4425. - continue;
  4426. - }
  4427. -
  4428. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  4429. if (!drm_helper_encoder_in_use(encoder)) {
  4430. drm_encoder_disable(encoder);
  4431. diff -Nur linux-3.15-rc1.orig/drivers/Kconfig linux-3.15-rc1/drivers/Kconfig
  4432. --- linux-3.15-rc1.orig/drivers/Kconfig 2014-04-13 23:18:35.000000000 +0200
  4433. +++ linux-3.15-rc1/drivers/Kconfig 2014-04-25 14:11:13.531375131 +0200
  4434. @@ -174,4 +174,6 @@
  4435. source "drivers/mcb/Kconfig"
  4436. +source "drivers/cec/Kconfig"
  4437. +
  4438. endmenu
  4439. diff -Nur linux-3.15-rc1.orig/drivers/leds/leds-pwm.c linux-3.15-rc1/drivers/leds/leds-pwm.c
  4440. --- linux-3.15-rc1.orig/drivers/leds/leds-pwm.c 2014-04-13 23:18:35.000000000 +0200
  4441. +++ linux-3.15-rc1/drivers/leds/leds-pwm.c 2014-04-25 14:11:13.535375148 +0200
  4442. @@ -69,6 +69,10 @@
  4443. duty *= brightness;
  4444. do_div(duty, max);
  4445. +
  4446. + if (led_dat->active_low)
  4447. + duty = led_dat->period - duty;
  4448. +
  4449. led_dat->duty = duty;
  4450. if (led_dat->can_sleep)
  4451. @@ -92,55 +96,75 @@
  4452. }
  4453. }
  4454. -static int led_pwm_create_of(struct platform_device *pdev,
  4455. - struct led_pwm_priv *priv)
  4456. +static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
  4457. + struct led_pwm *led, struct device_node *child)
  4458. {
  4459. - struct device_node *child;
  4460. + struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
  4461. int ret;
  4462. - for_each_child_of_node(pdev->dev.of_node, child) {
  4463. - struct led_pwm_data *led_dat = &priv->leds[priv->num_leds];
  4464. + led_data->active_low = led->active_low;
  4465. + led_data->period = led->pwm_period_ns;
  4466. + led_data->cdev.name = led->name;
  4467. + led_data->cdev.default_trigger = led->default_trigger;
  4468. + led_data->cdev.brightness_set = led_pwm_set;
  4469. + led_data->cdev.brightness = LED_OFF;
  4470. + led_data->cdev.max_brightness = led->max_brightness;
  4471. + led_data->cdev.flags = LED_CORE_SUSPENDRESUME;
  4472. +
  4473. + if (child)
  4474. + led_data->pwm = devm_of_pwm_get(dev, child, NULL);
  4475. + else
  4476. + led_data->pwm = devm_pwm_get(dev, led->name);
  4477. + if (IS_ERR(led_data->pwm)) {
  4478. + ret = PTR_ERR(led_data->pwm);
  4479. + dev_err(dev, "unable to request PWM for %s: %d\n",
  4480. + led->name, ret);
  4481. + return ret;
  4482. + }
  4483. - led_dat->cdev.name = of_get_property(child, "label",
  4484. - NULL) ? : child->name;
  4485. + if (child)
  4486. + led_data->period = pwm_get_period(led_data->pwm);
  4487. - led_dat->pwm = devm_of_pwm_get(&pdev->dev, child, NULL);
  4488. - if (IS_ERR(led_dat->pwm)) {
  4489. - dev_err(&pdev->dev, "unable to request PWM for %s\n",
  4490. - led_dat->cdev.name);
  4491. - ret = PTR_ERR(led_dat->pwm);
  4492. - goto err;
  4493. - }
  4494. - /* Get the period from PWM core when n*/
  4495. - led_dat->period = pwm_get_period(led_dat->pwm);
  4496. + led_data->can_sleep = pwm_can_sleep(led_data->pwm);
  4497. + if (led_data->can_sleep)
  4498. + INIT_WORK(&led_data->work, led_pwm_work);
  4499. - led_dat->cdev.default_trigger = of_get_property(child,
  4500. + ret = led_classdev_register(dev, &led_data->cdev);
  4501. + if (ret == 0) {
  4502. + priv->num_leds++;
  4503. + } else {
  4504. + dev_err(dev, "failed to register PWM led for %s: %d\n",
  4505. + led->name, ret);
  4506. + }
  4507. +
  4508. + return ret;
  4509. +}
  4510. +
  4511. +static int led_pwm_create_of(struct device *dev, struct led_pwm_priv *priv)
  4512. +{
  4513. + struct device_node *child;
  4514. + struct led_pwm led;
  4515. + int ret = 0;
  4516. +
  4517. + memset(&led, 0, sizeof(led));
  4518. +
  4519. + for_each_child_of_node(dev->of_node, child) {
  4520. + led.name = of_get_property(child, "label", NULL) ? :
  4521. + child->name;
  4522. +
  4523. + led.default_trigger = of_get_property(child,
  4524. "linux,default-trigger", NULL);
  4525. + led.active_low = of_property_read_bool(child, "active-low");
  4526. of_property_read_u32(child, "max-brightness",
  4527. - &led_dat->cdev.max_brightness);
  4528. + &led.max_brightness);
  4529. - led_dat->cdev.brightness_set = led_pwm_set;
  4530. - led_dat->cdev.brightness = LED_OFF;
  4531. - led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
  4532. -
  4533. - led_dat->can_sleep = pwm_can_sleep(led_dat->pwm);
  4534. - if (led_dat->can_sleep)
  4535. - INIT_WORK(&led_dat->work, led_pwm_work);
  4536. -
  4537. - ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
  4538. - if (ret < 0) {
  4539. - dev_err(&pdev->dev, "failed to register for %s\n",
  4540. - led_dat->cdev.name);
  4541. + ret = led_pwm_add(dev, priv, &led, child);
  4542. + if (ret) {
  4543. of_node_put(child);
  4544. - goto err;
  4545. + break;
  4546. }
  4547. - priv->num_leds++;
  4548. }
  4549. - return 0;
  4550. -err:
  4551. - led_pwm_cleanup(priv);
  4552. -
  4553. return ret;
  4554. }
  4555. @@ -166,51 +190,23 @@
  4556. if (pdata) {
  4557. for (i = 0; i < count; i++) {
  4558. - struct led_pwm *cur_led = &pdata->leds[i];
  4559. - struct led_pwm_data *led_dat = &priv->leds[i];
  4560. -
  4561. - led_dat->pwm = devm_pwm_get(&pdev->dev, cur_led->name);
  4562. - if (IS_ERR(led_dat->pwm)) {
  4563. - ret = PTR_ERR(led_dat->pwm);
  4564. - dev_err(&pdev->dev,
  4565. - "unable to request PWM for %s\n",
  4566. - cur_led->name);
  4567. - goto err;
  4568. - }
  4569. -
  4570. - led_dat->cdev.name = cur_led->name;
  4571. - led_dat->cdev.default_trigger = cur_led->default_trigger;
  4572. - led_dat->active_low = cur_led->active_low;
  4573. - led_dat->period = cur_led->pwm_period_ns;
  4574. - led_dat->cdev.brightness_set = led_pwm_set;
  4575. - led_dat->cdev.brightness = LED_OFF;
  4576. - led_dat->cdev.max_brightness = cur_led->max_brightness;
  4577. - led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
  4578. -
  4579. - led_dat->can_sleep = pwm_can_sleep(led_dat->pwm);
  4580. - if (led_dat->can_sleep)
  4581. - INIT_WORK(&led_dat->work, led_pwm_work);
  4582. -
  4583. - ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
  4584. - if (ret < 0)
  4585. - goto err;
  4586. + ret = led_pwm_add(&pdev->dev, priv, &pdata->leds[i],
  4587. + NULL);
  4588. + if (ret)
  4589. + break;
  4590. }
  4591. - priv->num_leds = count;
  4592. } else {
  4593. - ret = led_pwm_create_of(pdev, priv);
  4594. - if (ret)
  4595. - return ret;
  4596. + ret = led_pwm_create_of(&pdev->dev, priv);
  4597. + }
  4598. +
  4599. + if (ret) {
  4600. + led_pwm_cleanup(priv);
  4601. + return ret;
  4602. }
  4603. platform_set_drvdata(pdev, priv);
  4604. return 0;
  4605. -
  4606. -err:
  4607. - priv->num_leds = i;
  4608. - led_pwm_cleanup(priv);
  4609. -
  4610. - return ret;
  4611. }
  4612. static int led_pwm_remove(struct platform_device *pdev)
  4613. diff -Nur linux-3.15-rc1.orig/drivers/Makefile linux-3.15-rc1/drivers/Makefile
  4614. --- linux-3.15-rc1.orig/drivers/Makefile 2014-04-13 23:18:35.000000000 +0200
  4615. +++ linux-3.15-rc1/drivers/Makefile 2014-04-25 14:11:13.531375131 +0200
  4616. @@ -157,3 +157,4 @@
  4617. obj-$(CONFIG_FMC) += fmc/
  4618. obj-$(CONFIG_POWERCAP) += powercap/
  4619. obj-$(CONFIG_MCB) += mcb/
  4620. +obj-$(CONFIG_CEC) += cec/
  4621. diff -Nur linux-3.15-rc1.orig/drivers/mmc/core/core.c linux-3.15-rc1/drivers/mmc/core/core.c
  4622. --- linux-3.15-rc1.orig/drivers/mmc/core/core.c 2014-04-13 23:18:35.000000000 +0200
  4623. +++ linux-3.15-rc1/drivers/mmc/core/core.c 2014-04-25 14:11:13.535375148 +0200
  4624. @@ -13,11 +13,13 @@
  4625. #include <linux/module.h>
  4626. #include <linux/init.h>
  4627. #include <linux/interrupt.h>
  4628. +#include <linux/clk.h>
  4629. #include <linux/completion.h>
  4630. #include <linux/device.h>
  4631. #include <linux/delay.h>
  4632. #include <linux/pagemap.h>
  4633. #include <linux/err.h>
  4634. +#include <linux/gpio.h>
  4635. #include <linux/leds.h>
  4636. #include <linux/scatterlist.h>
  4637. #include <linux/log2.h>
  4638. @@ -1504,6 +1506,43 @@
  4639. mmc_host_clk_release(host);
  4640. }
  4641. +static void mmc_card_power_up(struct mmc_host *host)
  4642. +{
  4643. + int i;
  4644. + struct gpio_desc **gds = host->card_reset_gpios;
  4645. +
  4646. + for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
  4647. + if (gds[i]) {
  4648. + dev_dbg(host->parent, "Asserting reset line %d", i);
  4649. + gpiod_set_value(gds[i], 1);
  4650. + }
  4651. + }
  4652. +
  4653. + if (host->card_regulator) {
  4654. + dev_dbg(host->parent, "Enabling external regulator");
  4655. + if (regulator_enable(host->card_regulator))
  4656. + dev_err(host->parent, "Failed to enable external regulator");
  4657. + }
  4658. +
  4659. + if (host->card_clk) {
  4660. + dev_dbg(host->parent, "Enabling external clock");
  4661. + clk_prepare_enable(host->card_clk);
  4662. + }
  4663. +
  4664. + /* 2ms delay to let clocks and power settle */
  4665. + mmc_delay(20);
  4666. +
  4667. + for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
  4668. + if (gds[i]) {
  4669. + dev_dbg(host->parent, "Deasserting reset line %d", i);
  4670. + gpiod_set_value(gds[i], 0);
  4671. + }
  4672. + }
  4673. +
  4674. + /* 2ms delay to after reset release */
  4675. + mmc_delay(20);
  4676. +}
  4677. +
  4678. /*
  4679. * Apply power to the MMC stack. This is a two-stage process.
  4680. * First, we enable power to the card without the clock running.
  4681. @@ -1520,6 +1559,9 @@
  4682. if (host->ios.power_mode == MMC_POWER_ON)
  4683. return;
  4684. + /* Power up the card/module first, if needed */
  4685. + mmc_card_power_up(host);
  4686. +
  4687. mmc_host_clk_hold(host);
  4688. host->ios.vdd = fls(ocr) - 1;
  4689. diff -Nur linux-3.15-rc1.orig/drivers/mmc/core/host.c linux-3.15-rc1/drivers/mmc/core/host.c
  4690. --- linux-3.15-rc1.orig/drivers/mmc/core/host.c 2014-04-13 23:18:35.000000000 +0200
  4691. +++ linux-3.15-rc1/drivers/mmc/core/host.c 2014-04-25 14:11:13.535375148 +0200
  4692. @@ -12,14 +12,18 @@
  4693. * MMC host class device management
  4694. */
  4695. +#include <linux/kernel.h>
  4696. +#include <linux/clk.h>
  4697. #include <linux/device.h>
  4698. #include <linux/err.h>
  4699. +#include <linux/gpio/consumer.h>
  4700. #include <linux/idr.h>
  4701. #include <linux/of.h>
  4702. #include <linux/of_gpio.h>
  4703. #include <linux/pagemap.h>
  4704. #include <linux/export.h>
  4705. #include <linux/leds.h>
  4706. +#include <linux/regulator/consumer.h>
  4707. #include <linux/slab.h>
  4708. #include <linux/suspend.h>
  4709. @@ -457,6 +461,66 @@
  4710. EXPORT_SYMBOL(mmc_of_parse);
  4711. +static int mmc_of_parse_child(struct mmc_host *host)
  4712. +{
  4713. + struct device_node *np;
  4714. + struct clk *clk;
  4715. + int i;
  4716. +
  4717. + if (!host->parent || !host->parent->of_node)
  4718. + return 0;
  4719. +
  4720. + np = host->parent->of_node;
  4721. +
  4722. + host->card_regulator = regulator_get(host->parent, "card-external-vcc");
  4723. + if (IS_ERR(host->card_regulator)) {
  4724. + if (PTR_ERR(host->card_regulator) == -EPROBE_DEFER)
  4725. + return PTR_ERR(host->card_regulator);
  4726. + host->card_regulator = NULL;
  4727. + }
  4728. +
  4729. + /* Parse card power/reset/clock control */
  4730. + if (of_find_property(np, "card-reset-gpios", NULL)) {
  4731. + struct gpio_desc *gpd;
  4732. + int level = 0;
  4733. +
  4734. + /*
  4735. + * If the regulator is enabled, then we can hold the
  4736. + * card in reset with an active high resets. Otherwise,
  4737. + * hold the resets low.
  4738. + */
  4739. + if (host->card_regulator && regulator_is_enabled(host->card_regulator))
  4740. + level = 1;
  4741. +
  4742. + for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
  4743. + gpd = devm_gpiod_get_index(host->parent, "card-reset", i);
  4744. + if (IS_ERR(gpd)) {
  4745. + if (PTR_ERR(gpd) == -EPROBE_DEFER)
  4746. + return PTR_ERR(gpd);
  4747. + break;
  4748. + }
  4749. + gpiod_direction_output(gpd, gpiod_is_active_low(gpd) | level);
  4750. + host->card_reset_gpios[i] = gpd;
  4751. + }
  4752. +
  4753. + gpd = devm_gpiod_get_index(host->parent, "card-reset", ARRAY_SIZE(host->card_reset_gpios));
  4754. + if (!IS_ERR(gpd)) {
  4755. + dev_warn(host->parent, "More reset gpios than we can handle");
  4756. + gpiod_put(gpd);
  4757. + }
  4758. + }
  4759. +
  4760. + clk = of_clk_get_by_name(np, "card_ext_clock");
  4761. + if (IS_ERR(clk)) {
  4762. + if (PTR_ERR(clk) == -EPROBE_DEFER)
  4763. + return PTR_ERR(clk);
  4764. + clk = NULL;
  4765. + }
  4766. + host->card_clk = clk;
  4767. +
  4768. + return 0;
  4769. +}
  4770. +
  4771. /**
  4772. * mmc_alloc_host - initialise the per-host structure.
  4773. * @extra: sizeof private data structure
  4774. @@ -536,6 +600,10 @@
  4775. {
  4776. int err;
  4777. + err = mmc_of_parse_child(host);
  4778. + if (err)
  4779. + return err;
  4780. +
  4781. WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
  4782. !host->ops->enable_sdio_irq);
  4783. diff -Nur linux-3.15-rc1.orig/drivers/mmc/core/sdio_irq.c linux-3.15-rc1/drivers/mmc/core/sdio_irq.c
  4784. --- linux-3.15-rc1.orig/drivers/mmc/core/sdio_irq.c 2014-04-13 23:18:35.000000000 +0200
  4785. +++ linux-3.15-rc1/drivers/mmc/core/sdio_irq.c 2014-04-25 14:11:13.535375148 +0200
  4786. @@ -90,6 +90,15 @@
  4787. return ret;
  4788. }
  4789. +void sdio_run_irqs(struct mmc_host *host)
  4790. +{
  4791. + mmc_claim_host(host);
  4792. + host->sdio_irq_pending = true;
  4793. + process_sdio_pending_irqs(host);
  4794. + mmc_release_host(host);
  4795. +}
  4796. +EXPORT_SYMBOL_GPL(sdio_run_irqs);
  4797. +
  4798. static int sdio_irq_thread(void *_host)
  4799. {
  4800. struct mmc_host *host = _host;
  4801. @@ -189,14 +198,20 @@
  4802. WARN_ON(!host->claimed);
  4803. if (!host->sdio_irqs++) {
  4804. - atomic_set(&host->sdio_irq_thread_abort, 0);
  4805. - host->sdio_irq_thread =
  4806. - kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
  4807. - mmc_hostname(host));
  4808. - if (IS_ERR(host->sdio_irq_thread)) {
  4809. - int err = PTR_ERR(host->sdio_irq_thread);
  4810. - host->sdio_irqs--;
  4811. - return err;
  4812. + if (!(host->caps2 & MMC_CAP2_SDIO_NOTHREAD)) {
  4813. + atomic_set(&host->sdio_irq_thread_abort, 0);
  4814. + host->sdio_irq_thread =
  4815. + kthread_run(sdio_irq_thread, host,
  4816. + "ksdioirqd/%s", mmc_hostname(host));
  4817. + if (IS_ERR(host->sdio_irq_thread)) {
  4818. + int err = PTR_ERR(host->sdio_irq_thread);
  4819. + host->sdio_irqs--;
  4820. + return err;
  4821. + }
  4822. + } else {
  4823. + mmc_host_clk_hold(host);
  4824. + host->ops->enable_sdio_irq(host, 1);
  4825. + mmc_host_clk_release(host);
  4826. }
  4827. }
  4828. @@ -211,8 +226,14 @@
  4829. BUG_ON(host->sdio_irqs < 1);
  4830. if (!--host->sdio_irqs) {
  4831. - atomic_set(&host->sdio_irq_thread_abort, 1);
  4832. - kthread_stop(host->sdio_irq_thread);
  4833. + if (!(host->caps2 & MMC_CAP2_SDIO_NOTHREAD)) {
  4834. + atomic_set(&host->sdio_irq_thread_abort, 1);
  4835. + kthread_stop(host->sdio_irq_thread);
  4836. + } else {
  4837. + mmc_host_clk_hold(host);
  4838. + host->ops->enable_sdio_irq(host, 0);
  4839. + mmc_host_clk_release(host);
  4840. + }
  4841. }
  4842. return 0;
  4843. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/dw_mmc.c linux-3.15-rc1/drivers/mmc/host/dw_mmc.c
  4844. --- linux-3.15-rc1.orig/drivers/mmc/host/dw_mmc.c 2014-04-13 23:18:35.000000000 +0200
  4845. +++ linux-3.15-rc1/drivers/mmc/host/dw_mmc.c 2014-04-25 14:11:13.539375164 +0200
  4846. @@ -2140,6 +2140,8 @@
  4847. if (!mmc)
  4848. return -ENOMEM;
  4849. + mmc_of_parse(mmc);
  4850. +
  4851. slot = mmc_priv(mmc);
  4852. slot->id = id;
  4853. slot->mmc = mmc;
  4854. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/Kconfig linux-3.15-rc1/drivers/mmc/host/Kconfig
  4855. --- linux-3.15-rc1.orig/drivers/mmc/host/Kconfig 2014-04-13 23:18:35.000000000 +0200
  4856. +++ linux-3.15-rc1/drivers/mmc/host/Kconfig 2014-04-25 14:11:13.539375164 +0200
  4857. @@ -25,8 +25,7 @@
  4858. If unsure, say N.
  4859. config MMC_SDHCI
  4860. - tristate "Secure Digital Host Controller Interface support"
  4861. - depends on HAS_DMA
  4862. + tristate
  4863. help
  4864. This selects the generic Secure Digital Host Controller Interface.
  4865. It is used by manufacturers such as Texas Instruments(R), Ricoh(R)
  4866. @@ -59,7 +58,8 @@
  4867. config MMC_SDHCI_PCI
  4868. tristate "SDHCI support on PCI bus"
  4869. - depends on MMC_SDHCI && PCI
  4870. + depends on PCI && HAS_DMA
  4871. + select MMC_SDHCI
  4872. help
  4873. This selects the PCI Secure Digital Host Controller Interface.
  4874. Most controllers found today are PCI devices.
  4875. @@ -83,7 +83,8 @@
  4876. config MMC_SDHCI_ACPI
  4877. tristate "SDHCI support for ACPI enumerated SDHCI controllers"
  4878. - depends on MMC_SDHCI && ACPI
  4879. + depends on ACPI && HAS_DMA
  4880. + select MMC_SDHCI
  4881. help
  4882. This selects support for ACPI enumerated SDHCI controllers,
  4883. identified by ACPI Compatibility ID PNP0D40 or specific
  4884. @@ -94,8 +95,8 @@
  4885. If unsure, say N.
  4886. config MMC_SDHCI_PLTFM
  4887. - tristate "SDHCI platform and OF driver helper"
  4888. - depends on MMC_SDHCI
  4889. + tristate
  4890. + select MMC_SDHCI
  4891. help
  4892. This selects the common helper functions support for Secure Digital
  4893. Host Controller Interface based platform and OF drivers.
  4894. @@ -106,8 +107,8 @@
  4895. config MMC_SDHCI_OF_ARASAN
  4896. tristate "SDHCI OF support for the Arasan SDHCI controllers"
  4897. - depends on MMC_SDHCI_PLTFM
  4898. - depends on OF
  4899. + depends on OF && HAS_DMA
  4900. + select MMC_SDHCI_PLTFM
  4901. help
  4902. This selects the Arasan Secure Digital Host Controller Interface
  4903. (SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
  4904. @@ -118,9 +119,9 @@
  4905. config MMC_SDHCI_OF_ESDHC
  4906. tristate "SDHCI OF support for the Freescale eSDHC controller"
  4907. - depends on MMC_SDHCI_PLTFM
  4908. - depends on PPC_OF
  4909. + depends on PPC_OF && HAS_DMA
  4910. select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
  4911. + select MMC_SDHCI_PLTFM
  4912. help
  4913. This selects the Freescale eSDHC controller support.
  4914. @@ -130,9 +131,9 @@
  4915. config MMC_SDHCI_OF_HLWD
  4916. tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers"
  4917. - depends on MMC_SDHCI_PLTFM
  4918. - depends on PPC_OF
  4919. + depends on PPC_OF && HAS_DMA
  4920. select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
  4921. + select MMC_SDHCI_PLTFM
  4922. help
  4923. This selects the Secure Digital Host Controller Interface (SDHCI)
  4924. found in the "Hollywood" chipset of the Nintendo Wii video game
  4925. @@ -144,8 +145,8 @@
  4926. config MMC_SDHCI_CNS3XXX
  4927. tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
  4928. - depends on ARCH_CNS3XXX
  4929. - depends on MMC_SDHCI_PLTFM
  4930. + depends on ARCH_CNS3XXX && HAS_DMA
  4931. + select MMC_SDHCI_PLTFM
  4932. help
  4933. This selects the SDHCI support for CNS3xxx System-on-Chip devices.
  4934. @@ -155,9 +156,9 @@
  4935. config MMC_SDHCI_ESDHC_IMX
  4936. tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller"
  4937. - depends on ARCH_MXC
  4938. - depends on MMC_SDHCI_PLTFM
  4939. + depends on ARCH_MXC && HAS_DMA
  4940. select MMC_SDHCI_IO_ACCESSORS
  4941. + select MMC_SDHCI_PLTFM
  4942. help
  4943. This selects the Freescale eSDHC/uSDHC controller support
  4944. found on i.MX25, i.MX35 i.MX5x and i.MX6x.
  4945. @@ -168,9 +169,9 @@
  4946. config MMC_SDHCI_DOVE
  4947. tristate "SDHCI support on Marvell's Dove SoC"
  4948. - depends on ARCH_DOVE
  4949. - depends on MMC_SDHCI_PLTFM
  4950. + depends on ARCH_DOVE && HAS_DMA
  4951. select MMC_SDHCI_IO_ACCESSORS
  4952. + select MMC_SDHCI_PLTFM
  4953. help
  4954. This selects the Secure Digital Host Controller Interface in
  4955. Marvell's Dove SoC.
  4956. @@ -181,9 +182,9 @@
  4957. config MMC_SDHCI_TEGRA
  4958. tristate "SDHCI platform support for the Tegra SD/MMC Controller"
  4959. - depends on ARCH_TEGRA
  4960. - depends on MMC_SDHCI_PLTFM
  4961. + depends on ARCH_TEGRA && HAS_DMA
  4962. select MMC_SDHCI_IO_ACCESSORS
  4963. + select MMC_SDHCI_PLTFM
  4964. help
  4965. This selects the Tegra SD/MMC controller. If you have a Tegra
  4966. platform with SD or MMC devices, say Y or M here.
  4967. @@ -192,7 +193,8 @@
  4968. config MMC_SDHCI_S3C
  4969. tristate "SDHCI support on Samsung S3C SoC"
  4970. - depends on MMC_SDHCI && PLAT_SAMSUNG
  4971. + depends on PLAT_SAMSUNG && HAS_DMA
  4972. + select MMC_SDHCI
  4973. help
  4974. This selects the Secure Digital Host Controller Interface (SDHCI)
  4975. often referrered to as the HSMMC block in some of the Samsung S3C
  4976. @@ -204,8 +206,8 @@
  4977. config MMC_SDHCI_SIRF
  4978. tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs"
  4979. - depends on ARCH_SIRF
  4980. - depends on MMC_SDHCI_PLTFM
  4981. + depends on ARCH_SIRF && HAS_DMA
  4982. + select MMC_SDHCI_PLTFM
  4983. help
  4984. This selects the SDHCI support for SiRF System-on-Chip devices.
  4985. @@ -215,8 +217,7 @@
  4986. config MMC_SDHCI_PXAV3
  4987. tristate "Marvell MMP2 SD Host Controller support (PXAV3)"
  4988. - depends on CLKDEV_LOOKUP
  4989. - select MMC_SDHCI
  4990. + depends on CLKDEV_LOOKUP && HAS_DMA
  4991. select MMC_SDHCI_PLTFM
  4992. default CPU_MMP2
  4993. help
  4994. @@ -228,8 +229,7 @@
  4995. config MMC_SDHCI_PXAV2
  4996. tristate "Marvell PXA9XX SD Host Controller support (PXAV2)"
  4997. - depends on CLKDEV_LOOKUP
  4998. - select MMC_SDHCI
  4999. + depends on CLKDEV_LOOKUP && HAS_DMA
  5000. select MMC_SDHCI_PLTFM
  5001. default CPU_PXA910
  5002. help
  5003. @@ -241,7 +241,8 @@
  5004. config MMC_SDHCI_SPEAR
  5005. tristate "SDHCI support on ST SPEAr platform"
  5006. - depends on MMC_SDHCI && PLAT_SPEAR
  5007. + depends on PLAT_SPEAR && HAS_DMA
  5008. + select MMC_SDHCI
  5009. help
  5010. This selects the Secure Digital Host Controller Interface (SDHCI)
  5011. often referrered to as the HSMMC block in some of the ST SPEAR range
  5012. @@ -263,7 +264,7 @@
  5013. config MMC_SDHCI_BCM_KONA
  5014. tristate "SDHCI support on Broadcom KONA platform"
  5015. - depends on ARCH_BCM_MOBILE
  5016. + depends on ARCH_BCM_MOBILE && HAS_DMA
  5017. select MMC_SDHCI_PLTFM
  5018. help
  5019. This selects the Broadcom Kona Secure Digital Host Controller
  5020. @@ -274,9 +275,9 @@
  5021. config MMC_SDHCI_BCM2835
  5022. tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
  5023. - depends on ARCH_BCM2835
  5024. - depends on MMC_SDHCI_PLTFM
  5025. + depends on ARCH_BCM2835 && HAS_DMA
  5026. select MMC_SDHCI_IO_ACCESSORS
  5027. + select MMC_SDHCI_PLTFM
  5028. help
  5029. This selects the BCM2835 SD/MMC controller. If you have a BCM2835
  5030. platform with SD or MMC devices, say Y or M here.
  5031. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-acpi.c linux-3.15-rc1/drivers/mmc/host/sdhci-acpi.c
  5032. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-acpi.c 2014-04-13 23:18:35.000000000 +0200
  5033. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-acpi.c 2014-04-25 14:11:13.539375164 +0200
  5034. @@ -102,11 +102,19 @@
  5035. }
  5036. static const struct sdhci_ops sdhci_acpi_ops_dflt = {
  5037. + .set_clock = sdhci_set_clock,
  5038. .enable_dma = sdhci_acpi_enable_dma,
  5039. + .set_bus_width = sdhci_set_bus_width,
  5040. + .reset = sdhci_reset,
  5041. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  5042. };
  5043. static const struct sdhci_ops sdhci_acpi_ops_int = {
  5044. + .set_clock = sdhci_set_clock,
  5045. .enable_dma = sdhci_acpi_enable_dma,
  5046. + .set_bus_width = sdhci_set_bus_width,
  5047. + .reset = sdhci_reset,
  5048. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  5049. .hw_reset = sdhci_acpi_int_hw_reset,
  5050. };
  5051. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-bcm2835.c linux-3.15-rc1/drivers/mmc/host/sdhci-bcm2835.c
  5052. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-bcm2835.c 2014-04-13 23:18:35.000000000 +0200
  5053. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-bcm2835.c 2014-04-25 14:11:13.539375164 +0200
  5054. @@ -131,8 +131,12 @@
  5055. .read_l = bcm2835_sdhci_readl,
  5056. .read_w = bcm2835_sdhci_readw,
  5057. .read_b = bcm2835_sdhci_readb,
  5058. + .set_clock = sdhci_set_clock,
  5059. .get_max_clock = sdhci_pltfm_clk_get_max_clock,
  5060. .get_min_clock = bcm2835_sdhci_get_min_clock,
  5061. + .set_bus_width = sdhci_set_bus_width,
  5062. + .reset = sdhci_reset,
  5063. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  5064. };
  5065. static const struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
  5066. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-bcm-kona.c linux-3.15-rc1/drivers/mmc/host/sdhci-bcm-kona.c
  5067. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-bcm-kona.c 2014-04-13 23:18:35.000000000 +0200
  5068. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-bcm-kona.c 2014-04-25 14:11:13.539375164 +0200
  5069. @@ -206,9 +206,13 @@
  5070. }
  5071. static struct sdhci_ops sdhci_bcm_kona_ops = {
  5072. + .set_clock = sdhci_set_clock,
  5073. .get_max_clock = sdhci_bcm_kona_get_max_clk,
  5074. .get_timeout_clock = sdhci_bcm_kona_get_timeout_clock,
  5075. .platform_send_init_74_clocks = sdhci_bcm_kona_init_74_clocks,
  5076. + .set_bus_width = sdhci_set_bus_width,
  5077. + .reset = sdhci_reset,
  5078. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  5079. .card_event = sdhci_bcm_kona_card_event,
  5080. };
  5081. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci.c linux-3.15-rc1/drivers/mmc/host/sdhci.c
  5082. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci.c 2014-04-13 23:18:35.000000000 +0200
  5083. +++ linux-3.15-rc1/drivers/mmc/host/sdhci.c 2014-04-25 14:11:13.583375360 +0200
  5084. @@ -44,6 +44,8 @@
  5085. #define MAX_TUNING_LOOP 40
  5086. +#define ADMA_SIZE ((128 * 2 + 1) * 4)
  5087. +
  5088. static unsigned int debug_quirks = 0;
  5089. static unsigned int debug_quirks2;
  5090. @@ -131,43 +133,26 @@
  5091. * *
  5092. \*****************************************************************************/
  5093. -static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
  5094. -{
  5095. - u32 ier;
  5096. -
  5097. - ier = sdhci_readl(host, SDHCI_INT_ENABLE);
  5098. - ier &= ~clear;
  5099. - ier |= set;
  5100. - sdhci_writel(host, ier, SDHCI_INT_ENABLE);
  5101. - sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
  5102. -}
  5103. -
  5104. -static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
  5105. -{
  5106. - sdhci_clear_set_irqs(host, 0, irqs);
  5107. -}
  5108. -
  5109. -static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
  5110. -{
  5111. - sdhci_clear_set_irqs(host, irqs, 0);
  5112. -}
  5113. -
  5114. static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
  5115. {
  5116. - u32 present, irqs;
  5117. + u32 present;
  5118. if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
  5119. (host->mmc->caps & MMC_CAP_NONREMOVABLE))
  5120. return;
  5121. - present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
  5122. - SDHCI_CARD_PRESENT;
  5123. - irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
  5124. + if (enable) {
  5125. + present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
  5126. + SDHCI_CARD_PRESENT;
  5127. - if (enable)
  5128. - sdhci_unmask_irqs(host, irqs);
  5129. - else
  5130. - sdhci_mask_irqs(host, irqs);
  5131. + host->ier |= present ? SDHCI_INT_CARD_REMOVE :
  5132. + SDHCI_INT_CARD_INSERT;
  5133. + } else {
  5134. + host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
  5135. + }
  5136. +
  5137. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  5138. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  5139. }
  5140. static void sdhci_enable_card_detection(struct sdhci_host *host)
  5141. @@ -180,22 +165,9 @@
  5142. sdhci_set_card_detection(host, false);
  5143. }
  5144. -static void sdhci_reset(struct sdhci_host *host, u8 mask)
  5145. +void sdhci_reset(struct sdhci_host *host, u8 mask)
  5146. {
  5147. unsigned long timeout;
  5148. - u32 uninitialized_var(ier);
  5149. -
  5150. - if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
  5151. - if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
  5152. - SDHCI_CARD_PRESENT))
  5153. - return;
  5154. - }
  5155. -
  5156. - if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
  5157. - ier = sdhci_readl(host, SDHCI_INT_ENABLE);
  5158. -
  5159. - if (host->ops->platform_reset_enter)
  5160. - host->ops->platform_reset_enter(host, mask);
  5161. sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
  5162. @@ -220,16 +192,27 @@
  5163. timeout--;
  5164. mdelay(1);
  5165. }
  5166. +}
  5167. +EXPORT_SYMBOL_GPL(sdhci_reset);
  5168. +
  5169. +static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
  5170. +{
  5171. + if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
  5172. + if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
  5173. + SDHCI_CARD_PRESENT))
  5174. + return;
  5175. + }
  5176. - if (host->ops->platform_reset_exit)
  5177. - host->ops->platform_reset_exit(host, mask);
  5178. + host->ops->reset(host, mask);
  5179. - if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
  5180. - sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
  5181. + if (mask & SDHCI_RESET_ALL) {
  5182. + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
  5183. + if (host->ops->enable_dma)
  5184. + host->ops->enable_dma(host);
  5185. + }
  5186. - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
  5187. - if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
  5188. - host->ops->enable_dma(host);
  5189. + /* Resetting the controller clears many */
  5190. + host->preset_enabled = false;
  5191. }
  5192. }
  5193. @@ -238,15 +221,18 @@
  5194. static void sdhci_init(struct sdhci_host *host, int soft)
  5195. {
  5196. if (soft)
  5197. - sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
  5198. + sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
  5199. else
  5200. - sdhci_reset(host, SDHCI_RESET_ALL);
  5201. + sdhci_do_reset(host, SDHCI_RESET_ALL);
  5202. +
  5203. + host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
  5204. + SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
  5205. + SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
  5206. + SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
  5207. + SDHCI_INT_RESPONSE;
  5208. - sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
  5209. - SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
  5210. - SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
  5211. - SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
  5212. - SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
  5213. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  5214. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  5215. if (soft) {
  5216. /* force clock reconfiguration */
  5217. @@ -502,11 +488,6 @@
  5218. else
  5219. direction = DMA_TO_DEVICE;
  5220. - /*
  5221. - * The ADMA descriptor table is mapped further down as we
  5222. - * need to fill it with data first.
  5223. - */
  5224. -
  5225. host->align_addr = dma_map_single(mmc_dev(host->mmc),
  5226. host->align_buffer, 128 * 4, direction);
  5227. if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
  5228. @@ -567,7 +548,7 @@
  5229. * If this triggers then we have a calculation bug
  5230. * somewhere. :/
  5231. */
  5232. - WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
  5233. + WARN_ON((desc - host->adma_desc) > ADMA_SIZE);
  5234. }
  5235. if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
  5236. @@ -595,17 +576,8 @@
  5237. host->align_addr, 128 * 4, direction);
  5238. }
  5239. - host->adma_addr = dma_map_single(mmc_dev(host->mmc),
  5240. - host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
  5241. - if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
  5242. - goto unmap_entries;
  5243. - BUG_ON(host->adma_addr & 0x3);
  5244. -
  5245. return 0;
  5246. -unmap_entries:
  5247. - dma_unmap_sg(mmc_dev(host->mmc), data->sg,
  5248. - data->sg_len, direction);
  5249. unmap_align:
  5250. dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
  5251. 128 * 4, direction);
  5252. @@ -623,19 +595,25 @@
  5253. u8 *align;
  5254. char *buffer;
  5255. unsigned long flags;
  5256. + bool has_unaligned;
  5257. if (data->flags & MMC_DATA_READ)
  5258. direction = DMA_FROM_DEVICE;
  5259. else
  5260. direction = DMA_TO_DEVICE;
  5261. - dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
  5262. - (128 * 2 + 1) * 4, DMA_TO_DEVICE);
  5263. -
  5264. dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
  5265. 128 * 4, direction);
  5266. - if (data->flags & MMC_DATA_READ) {
  5267. + /* Do a quick scan of the SG list for any unaligned mappings */
  5268. + has_unaligned = false;
  5269. + for_each_sg(data->sg, sg, host->sg_count, i)
  5270. + if (sg_dma_address(sg) & 3) {
  5271. + has_unaligned = true;
  5272. + break;
  5273. + }
  5274. +
  5275. + if (has_unaligned && data->flags & MMC_DATA_READ) {
  5276. dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
  5277. data->sg_len, direction);
  5278. @@ -721,9 +699,12 @@
  5279. u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
  5280. if (host->flags & SDHCI_REQ_USE_DMA)
  5281. - sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
  5282. + host->ier = (host->ier & ~pio_irqs) | dma_irqs;
  5283. else
  5284. - sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
  5285. + host->ier = (host->ier & ~dma_irqs) | pio_irqs;
  5286. +
  5287. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  5288. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  5289. }
  5290. static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
  5291. @@ -976,8 +957,8 @@
  5292. * upon error conditions.
  5293. */
  5294. if (data->error) {
  5295. - sdhci_reset(host, SDHCI_RESET_CMD);
  5296. - sdhci_reset(host, SDHCI_RESET_DATA);
  5297. + sdhci_do_reset(host, SDHCI_RESET_CMD);
  5298. + sdhci_do_reset(host, SDHCI_RESET_DATA);
  5299. }
  5300. sdhci_send_command(host, data->stop);
  5301. @@ -1107,24 +1088,23 @@
  5302. static u16 sdhci_get_preset_value(struct sdhci_host *host)
  5303. {
  5304. - u16 ctrl, preset = 0;
  5305. + u16 preset = 0;
  5306. - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5307. -
  5308. - switch (ctrl & SDHCI_CTRL_UHS_MASK) {
  5309. - case SDHCI_CTRL_UHS_SDR12:
  5310. + switch (host->timing) {
  5311. + case MMC_TIMING_UHS_SDR12:
  5312. preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
  5313. break;
  5314. - case SDHCI_CTRL_UHS_SDR25:
  5315. + case MMC_TIMING_UHS_SDR25:
  5316. preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
  5317. break;
  5318. - case SDHCI_CTRL_UHS_SDR50:
  5319. + case MMC_TIMING_UHS_SDR50:
  5320. preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
  5321. break;
  5322. - case SDHCI_CTRL_UHS_SDR104:
  5323. + case MMC_TIMING_UHS_SDR104:
  5324. + case MMC_TIMING_MMC_HS200:
  5325. preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
  5326. break;
  5327. - case SDHCI_CTRL_UHS_DDR50:
  5328. + case MMC_TIMING_UHS_DDR50:
  5329. preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
  5330. break;
  5331. default:
  5332. @@ -1136,32 +1116,22 @@
  5333. return preset;
  5334. }
  5335. -static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
  5336. +void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
  5337. {
  5338. int div = 0; /* Initialized for compiler warning */
  5339. int real_div = div, clk_mul = 1;
  5340. u16 clk = 0;
  5341. unsigned long timeout;
  5342. - if (clock && clock == host->clock)
  5343. - return;
  5344. -
  5345. host->mmc->actual_clock = 0;
  5346. - if (host->ops->set_clock) {
  5347. - host->ops->set_clock(host, clock);
  5348. - if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
  5349. - return;
  5350. - }
  5351. -
  5352. sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
  5353. if (clock == 0)
  5354. - goto out;
  5355. + return;
  5356. if (host->version >= SDHCI_SPEC_300) {
  5357. - if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
  5358. - SDHCI_CTRL_PRESET_VAL_ENABLE) {
  5359. + if (host->preset_enabled) {
  5360. u16 pre_val;
  5361. clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
  5362. @@ -1247,26 +1217,16 @@
  5363. clk |= SDHCI_CLOCK_CARD_EN;
  5364. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  5365. -
  5366. -out:
  5367. - host->clock = clock;
  5368. -}
  5369. -
  5370. -static inline void sdhci_update_clock(struct sdhci_host *host)
  5371. -{
  5372. - unsigned int clock;
  5373. -
  5374. - clock = host->clock;
  5375. - host->clock = 0;
  5376. - sdhci_set_clock(host, clock);
  5377. }
  5378. +EXPORT_SYMBOL_GPL(sdhci_set_clock);
  5379. -static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
  5380. +static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
  5381. + unsigned short vdd)
  5382. {
  5383. u8 pwr = 0;
  5384. - if (power != (unsigned short)-1) {
  5385. - switch (1 << power) {
  5386. + if (mode != MMC_POWER_OFF) {
  5387. + switch (1 << vdd) {
  5388. case MMC_VDD_165_195:
  5389. pwr = SDHCI_POWER_180;
  5390. break;
  5391. @@ -1284,7 +1244,7 @@
  5392. }
  5393. if (host->pwr == pwr)
  5394. - return -1;
  5395. + return;
  5396. host->pwr = pwr;
  5397. @@ -1292,38 +1252,43 @@
  5398. sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
  5399. if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
  5400. sdhci_runtime_pm_bus_off(host);
  5401. - return 0;
  5402. - }
  5403. -
  5404. - /*
  5405. - * Spec says that we should clear the power reg before setting
  5406. - * a new value. Some controllers don't seem to like this though.
  5407. - */
  5408. - if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
  5409. - sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
  5410. + vdd = 0;
  5411. + } else {
  5412. + /*
  5413. + * Spec says that we should clear the power reg before setting
  5414. + * a new value. Some controllers don't seem to like this though.
  5415. + */
  5416. + if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
  5417. + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
  5418. - /*
  5419. - * At least the Marvell CaFe chip gets confused if we set the voltage
  5420. - * and set turn on power at the same time, so set the voltage first.
  5421. - */
  5422. - if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
  5423. - sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
  5424. + /*
  5425. + * At least the Marvell CaFe chip gets confused if we set the
  5426. + * voltage and set turn on power at the same time, so set the
  5427. + * voltage first.
  5428. + */
  5429. + if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
  5430. + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
  5431. - pwr |= SDHCI_POWER_ON;
  5432. + pwr |= SDHCI_POWER_ON;
  5433. - sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
  5434. + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
  5435. - if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
  5436. - sdhci_runtime_pm_bus_on(host);
  5437. + if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
  5438. + sdhci_runtime_pm_bus_on(host);
  5439. - /*
  5440. - * Some controllers need an extra 10ms delay of 10ms before they
  5441. - * can apply clock after applying power
  5442. - */
  5443. - if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
  5444. - mdelay(10);
  5445. + /*
  5446. + * Some controllers need an extra 10ms delay of 10ms before
  5447. + * they can apply clock after applying power
  5448. + */
  5449. + if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
  5450. + mdelay(10);
  5451. + }
  5452. - return power;
  5453. + if (host->vmmc) {
  5454. + spin_unlock_irq(&host->lock);
  5455. + mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd);
  5456. + spin_lock_irq(&host->lock);
  5457. + }
  5458. }
  5459. /*****************************************************************************\
  5460. @@ -1427,10 +1392,52 @@
  5461. spin_unlock_irqrestore(&host->lock, flags);
  5462. }
  5463. +void sdhci_set_bus_width(struct sdhci_host *host, int width)
  5464. +{
  5465. + u8 ctrl;
  5466. +
  5467. + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  5468. + if (width == MMC_BUS_WIDTH_8) {
  5469. + ctrl &= ~SDHCI_CTRL_4BITBUS;
  5470. + if (host->version >= SDHCI_SPEC_300)
  5471. + ctrl |= SDHCI_CTRL_8BITBUS;
  5472. + } else {
  5473. + if (host->version >= SDHCI_SPEC_300)
  5474. + ctrl &= ~SDHCI_CTRL_8BITBUS;
  5475. + if (width == MMC_BUS_WIDTH_4)
  5476. + ctrl |= SDHCI_CTRL_4BITBUS;
  5477. + else
  5478. + ctrl &= ~SDHCI_CTRL_4BITBUS;
  5479. + }
  5480. + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  5481. +}
  5482. +EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
  5483. +
  5484. +void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
  5485. +{
  5486. + u16 ctrl_2;
  5487. +
  5488. + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5489. + /* Select Bus Speed Mode for host */
  5490. + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
  5491. + if ((timing == MMC_TIMING_MMC_HS200) ||
  5492. + (timing == MMC_TIMING_UHS_SDR104))
  5493. + ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
  5494. + else if (timing == MMC_TIMING_UHS_SDR12)
  5495. + ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
  5496. + else if (timing == MMC_TIMING_UHS_SDR25)
  5497. + ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
  5498. + else if (timing == MMC_TIMING_UHS_SDR50)
  5499. + ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
  5500. + else if (timing == MMC_TIMING_UHS_DDR50)
  5501. + ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
  5502. + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
  5503. +}
  5504. +EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
  5505. +
  5506. static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
  5507. {
  5508. unsigned long flags;
  5509. - int vdd_bit = -1;
  5510. u8 ctrl;
  5511. spin_lock_irqsave(&host->lock, flags);
  5512. @@ -1456,45 +1463,17 @@
  5513. !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
  5514. sdhci_enable_preset_value(host, false);
  5515. - sdhci_set_clock(host, ios->clock);
  5516. -
  5517. - if (ios->power_mode == MMC_POWER_OFF)
  5518. - vdd_bit = sdhci_set_power(host, -1);
  5519. - else
  5520. - vdd_bit = sdhci_set_power(host, ios->vdd);
  5521. -
  5522. - if (host->vmmc && vdd_bit != -1) {
  5523. - spin_unlock_irqrestore(&host->lock, flags);
  5524. - mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
  5525. - spin_lock_irqsave(&host->lock, flags);
  5526. + if (!ios->clock || ios->clock != host->clock) {
  5527. + host->ops->set_clock(host, ios->clock);
  5528. + host->clock = ios->clock;
  5529. }
  5530. + sdhci_set_power(host, ios->power_mode, ios->vdd);
  5531. +
  5532. if (host->ops->platform_send_init_74_clocks)
  5533. host->ops->platform_send_init_74_clocks(host, ios->power_mode);
  5534. - /*
  5535. - * If your platform has 8-bit width support but is not a v3 controller,
  5536. - * or if it requires special setup code, you should implement that in
  5537. - * platform_bus_width().
  5538. - */
  5539. - if (host->ops->platform_bus_width) {
  5540. - host->ops->platform_bus_width(host, ios->bus_width);
  5541. - } else {
  5542. - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  5543. - if (ios->bus_width == MMC_BUS_WIDTH_8) {
  5544. - ctrl &= ~SDHCI_CTRL_4BITBUS;
  5545. - if (host->version >= SDHCI_SPEC_300)
  5546. - ctrl |= SDHCI_CTRL_8BITBUS;
  5547. - } else {
  5548. - if (host->version >= SDHCI_SPEC_300)
  5549. - ctrl &= ~SDHCI_CTRL_8BITBUS;
  5550. - if (ios->bus_width == MMC_BUS_WIDTH_4)
  5551. - ctrl |= SDHCI_CTRL_4BITBUS;
  5552. - else
  5553. - ctrl &= ~SDHCI_CTRL_4BITBUS;
  5554. - }
  5555. - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  5556. - }
  5557. + host->ops->set_bus_width(host, ios->bus_width);
  5558. ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  5559. @@ -1516,13 +1495,13 @@
  5560. (ios->timing == MMC_TIMING_UHS_SDR25))
  5561. ctrl |= SDHCI_CTRL_HISPD;
  5562. - ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5563. - if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
  5564. + if (!host->preset_enabled) {
  5565. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  5566. /*
  5567. * We only need to set Driver Strength if the
  5568. * preset value enable is not set.
  5569. */
  5570. + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5571. ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
  5572. if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
  5573. ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
  5574. @@ -1546,34 +1525,11 @@
  5575. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  5576. /* Re-enable SD Clock */
  5577. - sdhci_update_clock(host);
  5578. + host->ops->set_clock(host, host->clock);
  5579. }
  5580. -
  5581. - /* Reset SD Clock Enable */
  5582. - clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
  5583. - clk &= ~SDHCI_CLOCK_CARD_EN;
  5584. - sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  5585. -
  5586. - if (host->ops->set_uhs_signaling)
  5587. - host->ops->set_uhs_signaling(host, ios->timing);
  5588. - else {
  5589. - ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5590. - /* Select Bus Speed Mode for host */
  5591. - ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
  5592. - if ((ios->timing == MMC_TIMING_MMC_HS200) ||
  5593. - (ios->timing == MMC_TIMING_UHS_SDR104))
  5594. - ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
  5595. - else if (ios->timing == MMC_TIMING_UHS_SDR12)
  5596. - ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
  5597. - else if (ios->timing == MMC_TIMING_UHS_SDR25)
  5598. - ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
  5599. - else if (ios->timing == MMC_TIMING_UHS_SDR50)
  5600. - ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
  5601. - else if (ios->timing == MMC_TIMING_UHS_DDR50)
  5602. - ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
  5603. - sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
  5604. - }
  5605. + host->ops->set_uhs_signaling(host, ios->timing);
  5606. + host->timing = ios->timing;
  5607. if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
  5608. ((ios->timing == MMC_TIMING_UHS_SDR12) ||
  5609. @@ -1588,9 +1544,6 @@
  5610. ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
  5611. >> SDHCI_PRESET_DRV_SHIFT;
  5612. }
  5613. -
  5614. - /* Re-enable SD Clock */
  5615. - sdhci_update_clock(host);
  5616. } else
  5617. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  5618. @@ -1600,7 +1553,7 @@
  5619. * it on each ios seems to solve the problem.
  5620. */
  5621. if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
  5622. - sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
  5623. + sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
  5624. mmiowb();
  5625. spin_unlock_irqrestore(&host->lock, flags);
  5626. @@ -1709,24 +1662,16 @@
  5627. static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
  5628. {
  5629. - if (host->flags & SDHCI_DEVICE_DEAD)
  5630. - goto out;
  5631. -
  5632. - if (enable)
  5633. - host->flags |= SDHCI_SDIO_IRQ_ENABLED;
  5634. - else
  5635. - host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
  5636. -
  5637. - /* SDIO IRQ will be enabled as appropriate in runtime resume */
  5638. - if (host->runtime_suspended)
  5639. - goto out;
  5640. + if (!(host->flags & SDHCI_DEVICE_DEAD)) {
  5641. + if (enable)
  5642. + host->ier |= SDHCI_INT_CARD_INT;
  5643. + else
  5644. + host->ier &= ~SDHCI_INT_CARD_INT;
  5645. - if (enable)
  5646. - sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
  5647. - else
  5648. - sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
  5649. -out:
  5650. - mmiowb();
  5651. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  5652. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  5653. + mmiowb();
  5654. + }
  5655. }
  5656. static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
  5657. @@ -1734,9 +1679,18 @@
  5658. struct sdhci_host *host = mmc_priv(mmc);
  5659. unsigned long flags;
  5660. + sdhci_runtime_pm_get(host);
  5661. +
  5662. spin_lock_irqsave(&host->lock, flags);
  5663. + if (enable)
  5664. + host->flags |= SDHCI_SDIO_IRQ_ENABLED;
  5665. + else
  5666. + host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
  5667. +
  5668. sdhci_enable_sdio_irq_nolock(host, enable);
  5669. spin_unlock_irqrestore(&host->lock, flags);
  5670. +
  5671. + sdhci_runtime_pm_put(host);
  5672. }
  5673. static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
  5674. @@ -1798,9 +1752,6 @@
  5675. ctrl |= SDHCI_CTRL_VDD_180;
  5676. sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  5677. - /* Wait for 5ms */
  5678. - usleep_range(5000, 5500);
  5679. -
  5680. /* 1.8V regulator output should be stable within 5 ms */
  5681. ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5682. if (ctrl & SDHCI_CTRL_VDD_180)
  5683. @@ -1855,22 +1806,16 @@
  5684. static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
  5685. {
  5686. - struct sdhci_host *host;
  5687. + struct sdhci_host *host = mmc_priv(mmc);
  5688. u16 ctrl;
  5689. - u32 ier;
  5690. int tuning_loop_counter = MAX_TUNING_LOOP;
  5691. unsigned long timeout;
  5692. int err = 0;
  5693. - bool requires_tuning_nonuhs = false;
  5694. unsigned long flags;
  5695. - host = mmc_priv(mmc);
  5696. -
  5697. sdhci_runtime_pm_get(host);
  5698. spin_lock_irqsave(&host->lock, flags);
  5699. - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5700. -
  5701. /*
  5702. * The Host Controller needs tuning only in case of SDR104 mode
  5703. * and for SDR50 mode when Use Tuning for SDR50 is set in the
  5704. @@ -1878,15 +1823,18 @@
  5705. * If the Host Controller supports the HS200 mode then the
  5706. * tuning function has to be executed.
  5707. */
  5708. - if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
  5709. - (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
  5710. - host->flags & SDHCI_SDR104_NEEDS_TUNING))
  5711. - requires_tuning_nonuhs = true;
  5712. -
  5713. - if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
  5714. - requires_tuning_nonuhs)
  5715. - ctrl |= SDHCI_CTRL_EXEC_TUNING;
  5716. - else {
  5717. + switch (host->timing) {
  5718. + case MMC_TIMING_MMC_HS200:
  5719. + case MMC_TIMING_UHS_SDR104:
  5720. + break;
  5721. +
  5722. + case MMC_TIMING_UHS_SDR50:
  5723. + if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
  5724. + host->flags & SDHCI_SDR104_NEEDS_TUNING)
  5725. + break;
  5726. + /* FALLTHROUGH */
  5727. +
  5728. + default:
  5729. spin_unlock_irqrestore(&host->lock, flags);
  5730. sdhci_runtime_pm_put(host);
  5731. return 0;
  5732. @@ -1899,6 +1847,8 @@
  5733. return err;
  5734. }
  5735. + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5736. + ctrl |= SDHCI_CTRL_EXEC_TUNING;
  5737. sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  5738. /*
  5739. @@ -1911,8 +1861,8 @@
  5740. * to make sure we don't hit a controller bug, we _only_
  5741. * enable Buffer Read Ready interrupt here.
  5742. */
  5743. - ier = sdhci_readl(host, SDHCI_INT_ENABLE);
  5744. - sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
  5745. + sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
  5746. + sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
  5747. /*
  5748. * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
  5749. @@ -2044,7 +1994,8 @@
  5750. if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
  5751. err = 0;
  5752. - sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
  5753. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  5754. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  5755. spin_unlock_irqrestore(&host->lock, flags);
  5756. sdhci_runtime_pm_put(host);
  5757. @@ -2054,26 +2005,30 @@
  5758. static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
  5759. {
  5760. - u16 ctrl;
  5761. -
  5762. /* Host Controller v3.00 defines preset value registers */
  5763. if (host->version < SDHCI_SPEC_300)
  5764. return;
  5765. - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5766. -
  5767. /*
  5768. * We only enable or disable Preset Value if they are not already
  5769. * enabled or disabled respectively. Otherwise, we bail out.
  5770. */
  5771. - if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
  5772. - ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
  5773. - sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  5774. - host->flags |= SDHCI_PV_ENABLED;
  5775. - } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
  5776. - ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
  5777. + if (host->preset_enabled != enable) {
  5778. + u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5779. +
  5780. + if (enable)
  5781. + ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
  5782. + else
  5783. + ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
  5784. +
  5785. sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  5786. - host->flags &= ~SDHCI_PV_ENABLED;
  5787. +
  5788. + if (enable)
  5789. + host->flags |= SDHCI_PV_ENABLED;
  5790. + else
  5791. + host->flags &= ~SDHCI_PV_ENABLED;
  5792. +
  5793. + host->preset_enabled = enable;
  5794. }
  5795. }
  5796. @@ -2095,8 +2050,8 @@
  5797. pr_err("%s: Resetting controller.\n",
  5798. mmc_hostname(host->mmc));
  5799. - sdhci_reset(host, SDHCI_RESET_CMD);
  5800. - sdhci_reset(host, SDHCI_RESET_DATA);
  5801. + sdhci_do_reset(host, SDHCI_RESET_CMD);
  5802. + sdhci_do_reset(host, SDHCI_RESET_DATA);
  5803. host->mrq->cmd->error = -ENOMEDIUM;
  5804. tasklet_schedule(&host->finish_tasklet);
  5805. @@ -2124,15 +2079,6 @@
  5806. * *
  5807. \*****************************************************************************/
  5808. -static void sdhci_tasklet_card(unsigned long param)
  5809. -{
  5810. - struct sdhci_host *host = (struct sdhci_host*)param;
  5811. -
  5812. - sdhci_card_event(host->mmc);
  5813. -
  5814. - mmc_detect_change(host->mmc, msecs_to_jiffies(200));
  5815. -}
  5816. -
  5817. static void sdhci_tasklet_finish(unsigned long param)
  5818. {
  5819. struct sdhci_host *host;
  5820. @@ -2169,12 +2115,12 @@
  5821. /* Some controllers need this kick or reset won't work here */
  5822. if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
  5823. /* This is to force an update */
  5824. - sdhci_update_clock(host);
  5825. + host->ops->set_clock(host, host->clock);
  5826. /* Spec says we should do both at the same time, but Ricoh
  5827. controllers do not like that. */
  5828. - sdhci_reset(host, SDHCI_RESET_CMD);
  5829. - sdhci_reset(host, SDHCI_RESET_DATA);
  5830. + sdhci_do_reset(host, SDHCI_RESET_CMD);
  5831. + sdhci_do_reset(host, SDHCI_RESET_DATA);
  5832. }
  5833. host->mrq = NULL;
  5834. @@ -2424,101 +2370,94 @@
  5835. static irqreturn_t sdhci_irq(int irq, void *dev_id)
  5836. {
  5837. - irqreturn_t result;
  5838. + irqreturn_t result = IRQ_NONE;
  5839. struct sdhci_host *host = dev_id;
  5840. - u32 intmask, unexpected = 0;
  5841. - int cardint = 0, max_loops = 16;
  5842. + u32 intmask, mask, unexpected = 0;
  5843. + int max_loops = 16;
  5844. spin_lock(&host->lock);
  5845. - if (host->runtime_suspended) {
  5846. + if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
  5847. spin_unlock(&host->lock);
  5848. return IRQ_NONE;
  5849. }
  5850. intmask = sdhci_readl(host, SDHCI_INT_STATUS);
  5851. -
  5852. if (!intmask || intmask == 0xffffffff) {
  5853. result = IRQ_NONE;
  5854. goto out;
  5855. }
  5856. -again:
  5857. - DBG("*** %s got interrupt: 0x%08x\n",
  5858. - mmc_hostname(host->mmc), intmask);
  5859. -
  5860. - if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
  5861. - u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
  5862. - SDHCI_CARD_PRESENT;
  5863. -
  5864. - /*
  5865. - * There is a observation on i.mx esdhc. INSERT bit will be
  5866. - * immediately set again when it gets cleared, if a card is
  5867. - * inserted. We have to mask the irq to prevent interrupt
  5868. - * storm which will freeze the system. And the REMOVE gets
  5869. - * the same situation.
  5870. - *
  5871. - * More testing are needed here to ensure it works for other
  5872. - * platforms though.
  5873. - */
  5874. - sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
  5875. - SDHCI_INT_CARD_REMOVE);
  5876. - sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
  5877. - SDHCI_INT_CARD_INSERT);
  5878. -
  5879. - sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
  5880. - SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
  5881. - intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
  5882. - tasklet_schedule(&host->card_tasklet);
  5883. - }
  5884. -
  5885. - if (intmask & SDHCI_INT_CMD_MASK) {
  5886. - sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
  5887. - SDHCI_INT_STATUS);
  5888. - sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
  5889. - }
  5890. + do {
  5891. + /* Clear selected interrupts. */
  5892. + mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
  5893. + SDHCI_INT_BUS_POWER);
  5894. + sdhci_writel(host, mask, SDHCI_INT_STATUS);
  5895. +
  5896. + DBG("*** %s got interrupt: 0x%08x\n",
  5897. + mmc_hostname(host->mmc), intmask);
  5898. +
  5899. + if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
  5900. + u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
  5901. + SDHCI_CARD_PRESENT;
  5902. - if (intmask & SDHCI_INT_DATA_MASK) {
  5903. - sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
  5904. - SDHCI_INT_STATUS);
  5905. - sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
  5906. - }
  5907. + /*
  5908. + * There is a observation on i.mx esdhc. INSERT
  5909. + * bit will be immediately set again when it gets
  5910. + * cleared, if a card is inserted. We have to mask
  5911. + * the irq to prevent interrupt storm which will
  5912. + * freeze the system. And the REMOVE gets the
  5913. + * same situation.
  5914. + *
  5915. + * More testing are needed here to ensure it works
  5916. + * for other platforms though.
  5917. + */
  5918. + host->ier &= ~(SDHCI_INT_CARD_INSERT |
  5919. + SDHCI_INT_CARD_REMOVE);
  5920. + host->ier |= present ? SDHCI_INT_CARD_REMOVE :
  5921. + SDHCI_INT_CARD_INSERT;
  5922. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  5923. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  5924. - intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
  5925. + sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
  5926. + SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
  5927. - intmask &= ~SDHCI_INT_ERROR;
  5928. + host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
  5929. + SDHCI_INT_CARD_REMOVE);
  5930. + result = IRQ_WAKE_THREAD;
  5931. + }
  5932. - if (intmask & SDHCI_INT_BUS_POWER) {
  5933. - pr_err("%s: Card is consuming too much power!\n",
  5934. - mmc_hostname(host->mmc));
  5935. - sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
  5936. - }
  5937. + if (intmask & SDHCI_INT_CMD_MASK)
  5938. + sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
  5939. - intmask &= ~SDHCI_INT_BUS_POWER;
  5940. + if (intmask & SDHCI_INT_DATA_MASK)
  5941. + sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
  5942. - if (intmask & SDHCI_INT_CARD_INT)
  5943. - cardint = 1;
  5944. + if (intmask & SDHCI_INT_BUS_POWER)
  5945. + pr_err("%s: Card is consuming too much power!\n",
  5946. + mmc_hostname(host->mmc));
  5947. - intmask &= ~SDHCI_INT_CARD_INT;
  5948. + if (intmask & SDHCI_INT_CARD_INT) {
  5949. + sdhci_enable_sdio_irq_nolock(host, false);
  5950. + host->thread_isr |= SDHCI_INT_CARD_INT;
  5951. + result = IRQ_WAKE_THREAD;
  5952. + }
  5953. - if (intmask) {
  5954. - unexpected |= intmask;
  5955. - sdhci_writel(host, intmask, SDHCI_INT_STATUS);
  5956. - }
  5957. + intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
  5958. + SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
  5959. + SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
  5960. + SDHCI_INT_CARD_INT);
  5961. - result = IRQ_HANDLED;
  5962. + if (intmask) {
  5963. + unexpected |= intmask;
  5964. + sdhci_writel(host, intmask, SDHCI_INT_STATUS);
  5965. + }
  5966. - intmask = sdhci_readl(host, SDHCI_INT_STATUS);
  5967. + if (result == IRQ_NONE)
  5968. + result = IRQ_HANDLED;
  5969. - /*
  5970. - * If we know we'll call the driver to signal SDIO IRQ, disregard
  5971. - * further indications of Card Interrupt in the status to avoid a
  5972. - * needless loop.
  5973. - */
  5974. - if (cardint)
  5975. - intmask &= ~SDHCI_INT_CARD_INT;
  5976. - if (intmask && --max_loops)
  5977. - goto again;
  5978. + intmask = sdhci_readl(host, SDHCI_INT_STATUS);
  5979. + } while (intmask && --max_loops);
  5980. out:
  5981. spin_unlock(&host->lock);
  5982. @@ -2527,15 +2466,38 @@
  5983. mmc_hostname(host->mmc), unexpected);
  5984. sdhci_dumpregs(host);
  5985. }
  5986. - /*
  5987. - * We have to delay this as it calls back into the driver.
  5988. - */
  5989. - if (cardint)
  5990. - mmc_signal_sdio_irq(host->mmc);
  5991. return result;
  5992. }
  5993. +static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
  5994. +{
  5995. + struct sdhci_host *host = dev_id;
  5996. + unsigned long flags;
  5997. + u32 isr;
  5998. +
  5999. + spin_lock_irqsave(&host->lock, flags);
  6000. + isr = host->thread_isr;
  6001. + host->thread_isr = 0;
  6002. + spin_unlock_irqrestore(&host->lock, flags);
  6003. +
  6004. + if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
  6005. + sdhci_card_event(host->mmc);
  6006. + mmc_detect_change(host->mmc, msecs_to_jiffies(200));
  6007. + }
  6008. +
  6009. + if (isr & SDHCI_INT_CARD_INT) {
  6010. + sdio_run_irqs(host->mmc);
  6011. +
  6012. + spin_lock_irqsave(&host->lock, flags);
  6013. + if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
  6014. + sdhci_enable_sdio_irq_nolock(host, true);
  6015. + spin_unlock_irqrestore(&host->lock, flags);
  6016. + }
  6017. +
  6018. + return isr ? IRQ_HANDLED : IRQ_NONE;
  6019. +}
  6020. +
  6021. /*****************************************************************************\
  6022. * *
  6023. * Suspend/resume *
  6024. @@ -2572,9 +2534,6 @@
  6025. int sdhci_suspend_host(struct sdhci_host *host)
  6026. {
  6027. - if (host->ops->platform_suspend)
  6028. - host->ops->platform_suspend(host);
  6029. -
  6030. sdhci_disable_card_detection(host);
  6031. /* Disable tuning since we are suspending */
  6032. @@ -2584,7 +2543,9 @@
  6033. }
  6034. if (!device_may_wakeup(mmc_dev(host->mmc))) {
  6035. - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
  6036. + host->ier = 0;
  6037. + sdhci_writel(host, 0, SDHCI_INT_ENABLE);
  6038. + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
  6039. free_irq(host->irq, host);
  6040. } else {
  6041. sdhci_enable_irq_wakeups(host);
  6042. @@ -2605,8 +2566,9 @@
  6043. }
  6044. if (!device_may_wakeup(mmc_dev(host->mmc))) {
  6045. - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
  6046. - mmc_hostname(host->mmc), host);
  6047. + ret = request_threaded_irq(host->irq, sdhci_irq,
  6048. + sdhci_thread_irq, IRQF_SHARED,
  6049. + mmc_hostname(host->mmc), host);
  6050. if (ret)
  6051. return ret;
  6052. } else {
  6053. @@ -2628,9 +2590,6 @@
  6054. sdhci_enable_card_detection(host);
  6055. - if (host->ops->platform_resume)
  6056. - host->ops->platform_resume(host);
  6057. -
  6058. /* Set the re-tuning expiration flag */
  6059. if (host->flags & SDHCI_USING_RETUNING_TIMER)
  6060. host->flags |= SDHCI_NEEDS_RETUNING;
  6061. @@ -2682,10 +2641,12 @@
  6062. }
  6063. spin_lock_irqsave(&host->lock, flags);
  6064. - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
  6065. + host->ier &= SDHCI_INT_CARD_INT;
  6066. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  6067. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  6068. spin_unlock_irqrestore(&host->lock, flags);
  6069. - synchronize_irq(host->irq);
  6070. + synchronize_hardirq(host->irq);
  6071. spin_lock_irqsave(&host->lock, flags);
  6072. host->runtime_suspended = true;
  6073. @@ -2729,7 +2690,7 @@
  6074. host->runtime_suspended = false;
  6075. /* Enable SDIO IRQ */
  6076. - if ((host->flags & SDHCI_SDIO_IRQ_ENABLED))
  6077. + if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
  6078. sdhci_enable_sdio_irq_nolock(host, true);
  6079. /* Enable Card Detection */
  6080. @@ -2788,7 +2749,7 @@
  6081. if (debug_quirks2)
  6082. host->quirks2 = debug_quirks2;
  6083. - sdhci_reset(host, SDHCI_RESET_ALL);
  6084. + sdhci_do_reset(host, SDHCI_RESET_ALL);
  6085. host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
  6086. host->version = (host->version & SDHCI_SPEC_VER_MASK)
  6087. @@ -2848,15 +2809,29 @@
  6088. * (128) and potentially one alignment transfer for
  6089. * each of those entries.
  6090. */
  6091. - host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
  6092. + host->adma_desc = dma_alloc_coherent(mmc_dev(host->mmc),
  6093. + ADMA_SIZE, &host->adma_addr,
  6094. + GFP_KERNEL);
  6095. host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
  6096. if (!host->adma_desc || !host->align_buffer) {
  6097. - kfree(host->adma_desc);
  6098. + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
  6099. + host->adma_desc, host->adma_addr);
  6100. kfree(host->align_buffer);
  6101. pr_warning("%s: Unable to allocate ADMA "
  6102. "buffers. Falling back to standard DMA.\n",
  6103. mmc_hostname(mmc));
  6104. host->flags &= ~SDHCI_USE_ADMA;
  6105. + host->adma_desc = NULL;
  6106. + host->align_buffer = NULL;
  6107. + } else if (host->adma_addr & 3) {
  6108. + pr_warning("%s: unable to allocate aligned ADMA descriptor\n",
  6109. + mmc_hostname(mmc));
  6110. + host->flags &= ~SDHCI_USE_ADMA;
  6111. + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
  6112. + host->adma_desc, host->adma_addr);
  6113. + kfree(host->align_buffer);
  6114. + host->adma_desc = NULL;
  6115. + host->align_buffer = NULL;
  6116. }
  6117. }
  6118. @@ -2941,6 +2916,7 @@
  6119. mmc->max_busy_timeout = (1 << 27) / host->timeout_clk;
  6120. mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
  6121. + mmc->caps2 |= MMC_CAP2_SDIO_NOTHREAD;
  6122. if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
  6123. host->flags |= SDHCI_AUTO_CMD12;
  6124. @@ -3212,8 +3188,6 @@
  6125. /*
  6126. * Init tasklets.
  6127. */
  6128. - tasklet_init(&host->card_tasklet,
  6129. - sdhci_tasklet_card, (unsigned long)host);
  6130. tasklet_init(&host->finish_tasklet,
  6131. sdhci_tasklet_finish, (unsigned long)host);
  6132. @@ -3230,8 +3204,8 @@
  6133. sdhci_init(host, 0);
  6134. - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
  6135. - mmc_hostname(mmc), host);
  6136. + ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
  6137. + IRQF_SHARED, mmc_hostname(mmc), host);
  6138. if (ret) {
  6139. pr_err("%s: Failed to request IRQ %d: %d\n",
  6140. mmc_hostname(mmc), host->irq, ret);
  6141. @@ -3273,12 +3247,12 @@
  6142. #ifdef SDHCI_USE_LEDS_CLASS
  6143. reset:
  6144. - sdhci_reset(host, SDHCI_RESET_ALL);
  6145. - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
  6146. + sdhci_do_reset(host, SDHCI_RESET_ALL);
  6147. + sdhci_writel(host, 0, SDHCI_INT_ENABLE);
  6148. + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
  6149. free_irq(host->irq, host);
  6150. #endif
  6151. untasklet:
  6152. - tasklet_kill(&host->card_tasklet);
  6153. tasklet_kill(&host->finish_tasklet);
  6154. return ret;
  6155. @@ -3315,14 +3289,14 @@
  6156. #endif
  6157. if (!dead)
  6158. - sdhci_reset(host, SDHCI_RESET_ALL);
  6159. + sdhci_do_reset(host, SDHCI_RESET_ALL);
  6160. - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
  6161. + sdhci_writel(host, 0, SDHCI_INT_ENABLE);
  6162. + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
  6163. free_irq(host->irq, host);
  6164. del_timer_sync(&host->timer);
  6165. - tasklet_kill(&host->card_tasklet);
  6166. tasklet_kill(&host->finish_tasklet);
  6167. if (host->vmmc) {
  6168. @@ -3335,7 +3309,9 @@
  6169. regulator_put(host->vqmmc);
  6170. }
  6171. - kfree(host->adma_desc);
  6172. + if (host->adma_desc)
  6173. + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
  6174. + host->adma_desc, host->adma_addr);
  6175. kfree(host->align_buffer);
  6176. host->adma_desc = NULL;
  6177. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-cns3xxx.c linux-3.15-rc1/drivers/mmc/host/sdhci-cns3xxx.c
  6178. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-cns3xxx.c 2014-04-13 23:18:35.000000000 +0200
  6179. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-cns3xxx.c 2014-04-25 14:11:13.539375164 +0200
  6180. @@ -30,13 +30,12 @@
  6181. u16 clk;
  6182. unsigned long timeout;
  6183. - if (clock == host->clock)
  6184. - return;
  6185. + host->mmc->actual_clock = 0;
  6186. sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
  6187. if (clock == 0)
  6188. - goto out;
  6189. + return;
  6190. while (host->max_clk / div > clock) {
  6191. /*
  6192. @@ -75,13 +74,14 @@
  6193. clk |= SDHCI_CLOCK_CARD_EN;
  6194. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  6195. -out:
  6196. - host->clock = clock;
  6197. }
  6198. static const struct sdhci_ops sdhci_cns3xxx_ops = {
  6199. .get_max_clock = sdhci_cns3xxx_get_max_clk,
  6200. .set_clock = sdhci_cns3xxx_set_clock,
  6201. + .set_bus_width = sdhci_set_bus_width,
  6202. + .reset = sdhci_reset,
  6203. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6204. };
  6205. static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
  6206. @@ -90,8 +90,7 @@
  6207. SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
  6208. SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
  6209. SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
  6210. - SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
  6211. - SDHCI_QUIRK_NONSTANDARD_CLOCK,
  6212. + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
  6213. };
  6214. static int sdhci_cns3xxx_probe(struct platform_device *pdev)
  6215. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-dove.c linux-3.15-rc1/drivers/mmc/host/sdhci-dove.c
  6216. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-dove.c 2014-04-13 23:18:35.000000000 +0200
  6217. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-dove.c 2014-04-25 14:11:13.539375164 +0200
  6218. @@ -86,6 +86,10 @@
  6219. static const struct sdhci_ops sdhci_dove_ops = {
  6220. .read_w = sdhci_dove_readw,
  6221. .read_l = sdhci_dove_readl,
  6222. + .set_clock = sdhci_set_clock,
  6223. + .set_bus_width = sdhci_set_bus_width,
  6224. + .reset = sdhci_reset,
  6225. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6226. };
  6227. static const struct sdhci_pltfm_data sdhci_dove_pdata = {
  6228. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-esdhc.h linux-3.15-rc1/drivers/mmc/host/sdhci-esdhc.h
  6229. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-esdhc.h 2014-04-13 23:18:35.000000000 +0200
  6230. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-esdhc.h 2014-04-25 14:11:13.539375164 +0200
  6231. @@ -20,10 +20,8 @@
  6232. #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
  6233. SDHCI_QUIRK_NO_BUSY_IRQ | \
  6234. - SDHCI_QUIRK_NONSTANDARD_CLOCK | \
  6235. SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
  6236. - SDHCI_QUIRK_PIO_NEEDS_DELAY | \
  6237. - SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
  6238. + SDHCI_QUIRK_PIO_NEEDS_DELAY)
  6239. #define ESDHC_SYSTEM_CONTROL 0x2c
  6240. #define ESDHC_CLOCK_MASK 0x0000fff0
  6241. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-esdhc-imx.c linux-3.15-rc1/drivers/mmc/host/sdhci-esdhc-imx.c
  6242. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-esdhc-imx.c 2014-04-13 23:18:35.000000000 +0200
  6243. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-esdhc-imx.c 2014-04-25 14:11:13.539375164 +0200
  6244. @@ -160,7 +160,6 @@
  6245. MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
  6246. WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
  6247. } multiblock_status;
  6248. - u32 uhs_mode;
  6249. u32 is_ddr;
  6250. };
  6251. @@ -382,7 +381,6 @@
  6252. if (val & ESDHC_MIX_CTRL_SMPCLK_SEL)
  6253. ret |= SDHCI_CTRL_TUNED_CLK;
  6254. - ret |= (imx_data->uhs_mode & SDHCI_CTRL_UHS_MASK);
  6255. ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
  6256. return ret;
  6257. @@ -429,7 +427,6 @@
  6258. else
  6259. new_val &= ~ESDHC_VENDOR_SPEC_VSELECT;
  6260. writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
  6261. - imx_data->uhs_mode = val & SDHCI_CTRL_UHS_MASK;
  6262. if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
  6263. new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
  6264. if (val & SDHCI_CTRL_TUNED_CLK)
  6265. @@ -600,12 +597,14 @@
  6266. u32 temp, val;
  6267. if (clock == 0) {
  6268. + host->mmc->actual_clock = 0;
  6269. +
  6270. if (esdhc_is_usdhc(imx_data)) {
  6271. val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
  6272. writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
  6273. host->ioaddr + ESDHC_VENDOR_SPEC);
  6274. }
  6275. - goto out;
  6276. + return;
  6277. }
  6278. if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr)
  6279. @@ -645,8 +644,6 @@
  6280. }
  6281. mdelay(1);
  6282. -out:
  6283. - host->clock = clock;
  6284. }
  6285. static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
  6286. @@ -668,7 +665,7 @@
  6287. return -ENOSYS;
  6288. }
  6289. -static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
  6290. +static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
  6291. {
  6292. u32 ctrl;
  6293. @@ -686,8 +683,6 @@
  6294. esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl,
  6295. SDHCI_HOST_CONTROL);
  6296. -
  6297. - return 0;
  6298. }
  6299. static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
  6300. @@ -697,6 +692,7 @@
  6301. /* FIXME: delay a bit for card to be ready for next tuning due to errors */
  6302. mdelay(1);
  6303. + /* This is balanced by the runtime put in sdhci_tasklet_finish */
  6304. pm_runtime_get_sync(host->mmc->parent);
  6305. reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
  6306. reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
  6307. @@ -713,13 +709,12 @@
  6308. complete(&mrq->completion);
  6309. }
  6310. -static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode)
  6311. +static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode,
  6312. + struct scatterlist *sg)
  6313. {
  6314. struct mmc_command cmd = {0};
  6315. struct mmc_request mrq = {NULL};
  6316. struct mmc_data data = {0};
  6317. - struct scatterlist sg;
  6318. - char tuning_pattern[ESDHC_TUNING_BLOCK_PATTERN_LEN];
  6319. cmd.opcode = opcode;
  6320. cmd.arg = 0;
  6321. @@ -728,11 +723,9 @@
  6322. data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN;
  6323. data.blocks = 1;
  6324. data.flags = MMC_DATA_READ;
  6325. - data.sg = &sg;
  6326. + data.sg = sg;
  6327. data.sg_len = 1;
  6328. - sg_init_one(&sg, tuning_pattern, sizeof(tuning_pattern));
  6329. -
  6330. mrq.cmd = &cmd;
  6331. mrq.cmd->mrq = &mrq;
  6332. mrq.data = &data;
  6333. @@ -742,14 +735,12 @@
  6334. mrq.done = esdhc_request_done;
  6335. init_completion(&(mrq.completion));
  6336. - disable_irq(host->irq);
  6337. - spin_lock(&host->lock);
  6338. + spin_lock_irq(&host->lock);
  6339. host->mrq = &mrq;
  6340. sdhci_send_command(host, mrq.cmd);
  6341. - spin_unlock(&host->lock);
  6342. - enable_irq(host->irq);
  6343. + spin_unlock_irq(&host->lock);
  6344. wait_for_completion(&mrq.completion);
  6345. @@ -772,13 +763,21 @@
  6346. static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
  6347. {
  6348. + struct scatterlist sg;
  6349. + char *tuning_pattern;
  6350. int min, max, avg, ret;
  6351. + tuning_pattern = kmalloc(ESDHC_TUNING_BLOCK_PATTERN_LEN, GFP_KERNEL);
  6352. + if (!tuning_pattern)
  6353. + return -ENOMEM;
  6354. +
  6355. + sg_init_one(&sg, tuning_pattern, ESDHC_TUNING_BLOCK_PATTERN_LEN);
  6356. +
  6357. /* find the mininum delay first which can pass tuning */
  6358. min = ESDHC_TUNE_CTRL_MIN;
  6359. while (min < ESDHC_TUNE_CTRL_MAX) {
  6360. esdhc_prepare_tuning(host, min);
  6361. - if (!esdhc_send_tuning_cmd(host, opcode))
  6362. + if (!esdhc_send_tuning_cmd(host, opcode, &sg))
  6363. break;
  6364. min += ESDHC_TUNE_CTRL_STEP;
  6365. }
  6366. @@ -787,7 +786,7 @@
  6367. max = min + ESDHC_TUNE_CTRL_STEP;
  6368. while (max < ESDHC_TUNE_CTRL_MAX) {
  6369. esdhc_prepare_tuning(host, max);
  6370. - if (esdhc_send_tuning_cmd(host, opcode)) {
  6371. + if (esdhc_send_tuning_cmd(host, opcode, &sg)) {
  6372. max -= ESDHC_TUNE_CTRL_STEP;
  6373. break;
  6374. }
  6375. @@ -797,9 +796,11 @@
  6376. /* use average delay to get the best timing */
  6377. avg = (min + max) / 2;
  6378. esdhc_prepare_tuning(host, avg);
  6379. - ret = esdhc_send_tuning_cmd(host, opcode);
  6380. + ret = esdhc_send_tuning_cmd(host, opcode, &sg);
  6381. esdhc_post_tuning(host);
  6382. + kfree(tuning_pattern);
  6383. +
  6384. dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
  6385. ret ? "failed" : "passed", avg, ret);
  6386. @@ -837,28 +838,20 @@
  6387. return pinctrl_select_state(imx_data->pinctrl, pinctrl);
  6388. }
  6389. -static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
  6390. +static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
  6391. {
  6392. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  6393. struct pltfm_imx_data *imx_data = pltfm_host->priv;
  6394. struct esdhc_platform_data *boarddata = &imx_data->boarddata;
  6395. - switch (uhs) {
  6396. + switch (timing) {
  6397. case MMC_TIMING_UHS_SDR12:
  6398. - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR12;
  6399. - break;
  6400. case MMC_TIMING_UHS_SDR25:
  6401. - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR25;
  6402. - break;
  6403. case MMC_TIMING_UHS_SDR50:
  6404. - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR50;
  6405. - break;
  6406. case MMC_TIMING_UHS_SDR104:
  6407. case MMC_TIMING_MMC_HS200:
  6408. - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR104;
  6409. break;
  6410. case MMC_TIMING_UHS_DDR50:
  6411. - imx_data->uhs_mode = SDHCI_CTRL_UHS_DDR50;
  6412. writel(readl(host->ioaddr + ESDHC_MIX_CTRL) |
  6413. ESDHC_MIX_CTRL_DDREN,
  6414. host->ioaddr + ESDHC_MIX_CTRL);
  6415. @@ -875,7 +868,15 @@
  6416. break;
  6417. }
  6418. - return esdhc_change_pinstate(host, uhs);
  6419. + esdhc_change_pinstate(host, timing);
  6420. +}
  6421. +
  6422. +static void esdhc_reset(struct sdhci_host *host, u8 mask)
  6423. +{
  6424. + sdhci_reset(host, mask);
  6425. +
  6426. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  6427. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  6428. }
  6429. static struct sdhci_ops sdhci_esdhc_ops = {
  6430. @@ -888,8 +889,9 @@
  6431. .get_max_clock = esdhc_pltfm_get_max_clock,
  6432. .get_min_clock = esdhc_pltfm_get_min_clock,
  6433. .get_ro = esdhc_pltfm_get_ro,
  6434. - .platform_bus_width = esdhc_pltfm_bus_width,
  6435. + .set_bus_width = esdhc_pltfm_set_bus_width,
  6436. .set_uhs_signaling = esdhc_set_uhs_signaling,
  6437. + .reset = esdhc_reset,
  6438. };
  6439. static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
  6440. @@ -1170,8 +1172,10 @@
  6441. ret = sdhci_runtime_suspend_host(host);
  6442. - clk_disable_unprepare(imx_data->clk_per);
  6443. - clk_disable_unprepare(imx_data->clk_ipg);
  6444. + if (!sdhci_sdio_irq_enabled(host)) {
  6445. + clk_disable_unprepare(imx_data->clk_per);
  6446. + clk_disable_unprepare(imx_data->clk_ipg);
  6447. + }
  6448. clk_disable_unprepare(imx_data->clk_ahb);
  6449. return ret;
  6450. @@ -1183,8 +1187,10 @@
  6451. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  6452. struct pltfm_imx_data *imx_data = pltfm_host->priv;
  6453. - clk_prepare_enable(imx_data->clk_per);
  6454. - clk_prepare_enable(imx_data->clk_ipg);
  6455. + if (!sdhci_sdio_irq_enabled(host)) {
  6456. + clk_prepare_enable(imx_data->clk_per);
  6457. + clk_prepare_enable(imx_data->clk_ipg);
  6458. + }
  6459. clk_prepare_enable(imx_data->clk_ahb);
  6460. return sdhci_runtime_resume_host(host);
  6461. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci.h linux-3.15-rc1/drivers/mmc/host/sdhci.h
  6462. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci.h 2014-04-13 23:18:35.000000000 +0200
  6463. +++ linux-3.15-rc1/drivers/mmc/host/sdhci.h 2014-04-25 14:11:13.583375360 +0200
  6464. @@ -281,18 +281,14 @@
  6465. unsigned int (*get_max_clock)(struct sdhci_host *host);
  6466. unsigned int (*get_min_clock)(struct sdhci_host *host);
  6467. unsigned int (*get_timeout_clock)(struct sdhci_host *host);
  6468. - int (*platform_bus_width)(struct sdhci_host *host,
  6469. - int width);
  6470. + void (*set_bus_width)(struct sdhci_host *host, int width);
  6471. void (*platform_send_init_74_clocks)(struct sdhci_host *host,
  6472. u8 power_mode);
  6473. unsigned int (*get_ro)(struct sdhci_host *host);
  6474. - void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
  6475. - void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
  6476. + void (*reset)(struct sdhci_host *host, u8 mask);
  6477. int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
  6478. - int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
  6479. + void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
  6480. void (*hw_reset)(struct sdhci_host *host);
  6481. - void (*platform_suspend)(struct sdhci_host *host);
  6482. - void (*platform_resume)(struct sdhci_host *host);
  6483. void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
  6484. void (*platform_init)(struct sdhci_host *host);
  6485. void (*card_event)(struct sdhci_host *host);
  6486. @@ -397,6 +393,16 @@
  6487. extern void sdhci_send_command(struct sdhci_host *host,
  6488. struct mmc_command *cmd);
  6489. +static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
  6490. +{
  6491. + return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED);
  6492. +}
  6493. +
  6494. +void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
  6495. +void sdhci_set_bus_width(struct sdhci_host *host, int width);
  6496. +void sdhci_reset(struct sdhci_host *host, u8 mask);
  6497. +void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
  6498. +
  6499. #ifdef CONFIG_PM
  6500. extern int sdhci_suspend_host(struct sdhci_host *host);
  6501. extern int sdhci_resume_host(struct sdhci_host *host);
  6502. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-of-arasan.c linux-3.15-rc1/drivers/mmc/host/sdhci-of-arasan.c
  6503. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-of-arasan.c 2014-04-13 23:18:35.000000000 +0200
  6504. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-of-arasan.c 2014-04-25 14:11:13.539375164 +0200
  6505. @@ -52,8 +52,12 @@
  6506. }
  6507. static struct sdhci_ops sdhci_arasan_ops = {
  6508. + .set_clock = sdhci_set_clock,
  6509. .get_max_clock = sdhci_pltfm_clk_get_max_clock,
  6510. .get_timeout_clock = sdhci_arasan_get_timeout_clock,
  6511. + .set_bus_width = sdhci_set_bus_width,
  6512. + .reset = sdhci_reset,
  6513. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6514. };
  6515. static struct sdhci_pltfm_data sdhci_arasan_pdata = {
  6516. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-of-esdhc.c linux-3.15-rc1/drivers/mmc/host/sdhci-of-esdhc.c
  6517. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-of-esdhc.c 2014-04-13 23:18:35.000000000 +0200
  6518. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-of-esdhc.c 2014-04-25 14:11:13.539375164 +0200
  6519. @@ -199,13 +199,14 @@
  6520. static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
  6521. {
  6522. -
  6523. int pre_div = 2;
  6524. int div = 1;
  6525. u32 temp;
  6526. + host->mmc->actual_clock = 0;
  6527. +
  6528. if (clock == 0)
  6529. - goto out;
  6530. + return;
  6531. /* Workaround to reduce the clock frequency for p1010 esdhc */
  6532. if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
  6533. @@ -238,24 +239,8 @@
  6534. | (pre_div << ESDHC_PREDIV_SHIFT));
  6535. sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
  6536. mdelay(1);
  6537. -out:
  6538. - host->clock = clock;
  6539. }
  6540. -#ifdef CONFIG_PM
  6541. -static u32 esdhc_proctl;
  6542. -static void esdhc_of_suspend(struct sdhci_host *host)
  6543. -{
  6544. - esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
  6545. -}
  6546. -
  6547. -static void esdhc_of_resume(struct sdhci_host *host)
  6548. -{
  6549. - esdhc_of_enable_dma(host);
  6550. - sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
  6551. -}
  6552. -#endif
  6553. -
  6554. static void esdhc_of_platform_init(struct sdhci_host *host)
  6555. {
  6556. u32 vvn;
  6557. @@ -269,7 +254,7 @@
  6558. host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
  6559. }
  6560. -static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
  6561. +static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
  6562. {
  6563. u32 ctrl;
  6564. @@ -289,8 +274,6 @@
  6565. clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL,
  6566. ESDHC_CTRL_BUSWIDTH_MASK, ctrl);
  6567. -
  6568. - return 0;
  6569. }
  6570. static const struct sdhci_ops sdhci_esdhc_ops = {
  6571. @@ -305,13 +288,46 @@
  6572. .get_max_clock = esdhc_of_get_max_clock,
  6573. .get_min_clock = esdhc_of_get_min_clock,
  6574. .platform_init = esdhc_of_platform_init,
  6575. -#ifdef CONFIG_PM
  6576. - .platform_suspend = esdhc_of_suspend,
  6577. - .platform_resume = esdhc_of_resume,
  6578. -#endif
  6579. .adma_workaround = esdhci_of_adma_workaround,
  6580. - .platform_bus_width = esdhc_pltfm_bus_width,
  6581. + .set_bus_width = esdhc_pltfm_set_bus_width,
  6582. + .reset = sdhci_reset,
  6583. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6584. +};
  6585. +
  6586. +#ifdef CONFIG_PM
  6587. +
  6588. +static u32 esdhc_proctl;
  6589. +static int esdhc_of_suspend(struct device *dev)
  6590. +{
  6591. + struct sdhci_host *host = dev_get_drvdata(dev);
  6592. +
  6593. + esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
  6594. +
  6595. + return sdhci_suspend_host(host);
  6596. +}
  6597. +
  6598. +static void esdhc_of_resume(device *dev)
  6599. +{
  6600. + struct sdhci_host *host = dev_get_drvdata(dev);
  6601. + int ret = sdhci_resume_host(host);
  6602. +
  6603. + if (ret == 0) {
  6604. + /* Isn't this already done by sdhci_resume_host() ? --rmk */
  6605. + esdhc_of_enable_dma(host);
  6606. + sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
  6607. + }
  6608. +
  6609. + return ret;
  6610. +}
  6611. +
  6612. +static const struct dev_pm_ops esdhc_pmops = {
  6613. + .suspend = esdhci_of_suspend,
  6614. + .resume = esdhci_of_resume,
  6615. };
  6616. +#define ESDHC_PMOPS (&esdhc_pmops)
  6617. +#else
  6618. +#define ESDHC_PMOPS NULL
  6619. +#endif
  6620. static const struct sdhci_pltfm_data sdhci_esdhc_pdata = {
  6621. /*
  6622. @@ -374,7 +390,7 @@
  6623. .name = "sdhci-esdhc",
  6624. .owner = THIS_MODULE,
  6625. .of_match_table = sdhci_esdhc_of_match,
  6626. - .pm = SDHCI_PLTFM_PMOPS,
  6627. + .pm = ESDHC_PMOPS,
  6628. },
  6629. .probe = sdhci_esdhc_probe,
  6630. .remove = sdhci_esdhc_remove,
  6631. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-of-hlwd.c linux-3.15-rc1/drivers/mmc/host/sdhci-of-hlwd.c
  6632. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-of-hlwd.c 2014-04-13 23:18:35.000000000 +0200
  6633. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-of-hlwd.c 2014-04-25 14:11:13.539375164 +0200
  6634. @@ -58,6 +58,10 @@
  6635. .write_l = sdhci_hlwd_writel,
  6636. .write_w = sdhci_hlwd_writew,
  6637. .write_b = sdhci_hlwd_writeb,
  6638. + .set_clock = sdhci_set_clock,
  6639. + .set_bus_width = sdhci_set_bus_width,
  6640. + .reset = sdhci_reset,
  6641. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6642. };
  6643. static const struct sdhci_pltfm_data sdhci_hlwd_pdata = {
  6644. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-pci.c linux-3.15-rc1/drivers/mmc/host/sdhci-pci.c
  6645. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-pci.c 2014-04-13 23:18:35.000000000 +0200
  6646. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-pci.c 2014-04-25 14:11:13.539375164 +0200
  6647. @@ -1031,7 +1031,7 @@
  6648. return 0;
  6649. }
  6650. -static int sdhci_pci_bus_width(struct sdhci_host *host, int width)
  6651. +static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width)
  6652. {
  6653. u8 ctrl;
  6654. @@ -1052,8 +1052,6 @@
  6655. }
  6656. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  6657. -
  6658. - return 0;
  6659. }
  6660. static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host)
  6661. @@ -1080,8 +1078,11 @@
  6662. }
  6663. static const struct sdhci_ops sdhci_pci_ops = {
  6664. + .set_clock = sdhci_set_clock,
  6665. .enable_dma = sdhci_pci_enable_dma,
  6666. - .platform_bus_width = sdhci_pci_bus_width,
  6667. + .set_bus_width = sdhci_pci_set_bus_width,
  6668. + .reset = sdhci_reset,
  6669. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6670. .hw_reset = sdhci_pci_hw_reset,
  6671. };
  6672. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-pltfm.c linux-3.15-rc1/drivers/mmc/host/sdhci-pltfm.c
  6673. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-pltfm.c 2014-04-13 23:18:35.000000000 +0200
  6674. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-pltfm.c 2014-04-25 14:11:13.539375164 +0200
  6675. @@ -45,6 +45,10 @@
  6676. EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock);
  6677. static const struct sdhci_ops sdhci_pltfm_ops = {
  6678. + .set_clock = sdhci_set_clock,
  6679. + .set_bus_width = sdhci_set_bus_width,
  6680. + .reset = sdhci_reset,
  6681. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6682. };
  6683. #ifdef CONFIG_OF
  6684. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-pxav2.c linux-3.15-rc1/drivers/mmc/host/sdhci-pxav2.c
  6685. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-pxav2.c 2014-04-13 23:18:35.000000000 +0200
  6686. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-pxav2.c 2014-04-25 14:11:13.539375164 +0200
  6687. @@ -51,11 +51,13 @@
  6688. #define MMC_CARD 0x1000
  6689. #define MMC_WIDTH 0x0100
  6690. -static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask)
  6691. +static void pxav2_reset(struct sdhci_host *host, u8 mask)
  6692. {
  6693. struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
  6694. struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
  6695. + sdhci_reset(host, mask);
  6696. +
  6697. if (mask == SDHCI_RESET_ALL) {
  6698. u16 tmp = 0;
  6699. @@ -88,7 +90,7 @@
  6700. }
  6701. }
  6702. -static int pxav2_mmc_set_width(struct sdhci_host *host, int width)
  6703. +static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width)
  6704. {
  6705. u8 ctrl;
  6706. u16 tmp;
  6707. @@ -107,14 +109,14 @@
  6708. }
  6709. writew(tmp, host->ioaddr + SD_CE_ATA_2);
  6710. writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
  6711. -
  6712. - return 0;
  6713. }
  6714. static const struct sdhci_ops pxav2_sdhci_ops = {
  6715. + .set_clock = sdhci_set_clock,
  6716. .get_max_clock = sdhci_pltfm_clk_get_max_clock,
  6717. - .platform_reset_exit = pxav2_set_private_registers,
  6718. - .platform_bus_width = pxav2_mmc_set_width,
  6719. + .set_bus_width = pxav2_mmc_set_bus_width,
  6720. + .reset = pxav2_reset,
  6721. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6722. };
  6723. #ifdef CONFIG_OF
  6724. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-pxav3.c linux-3.15-rc1/drivers/mmc/host/sdhci-pxav3.c
  6725. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-pxav3.c 2014-04-13 23:18:35.000000000 +0200
  6726. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-pxav3.c 2014-04-25 14:11:13.539375164 +0200
  6727. @@ -112,11 +112,13 @@
  6728. return 0;
  6729. }
  6730. -static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask)
  6731. +static void pxav3_reset(struct sdhci_host *host, u8 mask)
  6732. {
  6733. struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
  6734. struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
  6735. + sdhci_reset(host, mask);
  6736. +
  6737. if (mask == SDHCI_RESET_ALL) {
  6738. /*
  6739. * tune timing of read data/command when crc error happen
  6740. @@ -184,7 +186,7 @@
  6741. pxa->power_mode = power_mode;
  6742. }
  6743. -static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
  6744. +static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
  6745. {
  6746. u16 ctrl_2;
  6747. @@ -218,15 +220,16 @@
  6748. dev_dbg(mmc_dev(host->mmc),
  6749. "%s uhs = %d, ctrl_2 = %04X\n",
  6750. __func__, uhs, ctrl_2);
  6751. -
  6752. - return 0;
  6753. }
  6754. static const struct sdhci_ops pxav3_sdhci_ops = {
  6755. - .platform_reset_exit = pxav3_set_private_registers,
  6756. + .set_clock = sdhci_set_clock,
  6757. .set_uhs_signaling = pxav3_set_uhs_signaling,
  6758. .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
  6759. .get_max_clock = sdhci_pltfm_clk_get_max_clock,
  6760. + .set_bus_width = sdhci_set_bus_width,
  6761. + .reset = pxav3_reset,
  6762. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6763. };
  6764. static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
  6765. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-s3c.c linux-3.15-rc1/drivers/mmc/host/sdhci-s3c.c
  6766. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-s3c.c 2014-04-13 23:18:35.000000000 +0200
  6767. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-s3c.c 2014-04-25 14:11:13.539375164 +0200
  6768. @@ -58,6 +58,8 @@
  6769. struct clk *clk_io;
  6770. struct clk *clk_bus[MAX_BUS_CLK];
  6771. unsigned long clk_rates[MAX_BUS_CLK];
  6772. +
  6773. + bool no_divider;
  6774. };
  6775. /**
  6776. @@ -70,6 +72,7 @@
  6777. */
  6778. struct sdhci_s3c_drv_data {
  6779. unsigned int sdhci_quirks;
  6780. + bool no_divider;
  6781. };
  6782. static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
  6783. @@ -119,7 +122,7 @@
  6784. * If controller uses a non-standard clock division, find the best clock
  6785. * speed possible with selected clock source and skip the division.
  6786. */
  6787. - if (ourhost->host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
  6788. + if (ourhost->no_divider) {
  6789. rate = clk_round_rate(clksrc, wanted);
  6790. return wanted - rate;
  6791. }
  6792. @@ -161,9 +164,13 @@
  6793. int src;
  6794. u32 ctrl;
  6795. + host->mmc->actual_clock = 0;
  6796. +
  6797. /* don't bother if the clock is going off. */
  6798. - if (clock == 0)
  6799. + if (clock == 0) {
  6800. + sdhci_set_clock(host, clock);
  6801. return;
  6802. + }
  6803. for (src = 0; src < MAX_BUS_CLK; src++) {
  6804. delta = sdhci_s3c_consider_clock(ourhost, src, clock);
  6805. @@ -215,6 +222,8 @@
  6806. if (clock < 25 * 1000000)
  6807. ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2);
  6808. writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3);
  6809. +
  6810. + sdhci_set_clock(host, clock);
  6811. }
  6812. /**
  6813. @@ -295,10 +304,11 @@
  6814. unsigned long timeout;
  6815. u16 clk = 0;
  6816. + host->mmc->actual_clock = 0;
  6817. +
  6818. /* If the clock is going off, set to 0 at clock control register */
  6819. if (clock == 0) {
  6820. sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
  6821. - host->clock = clock;
  6822. return;
  6823. }
  6824. @@ -306,8 +316,6 @@
  6825. clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
  6826. - host->clock = clock;
  6827. -
  6828. clk = SDHCI_CLOCK_INT_EN;
  6829. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  6830. @@ -329,14 +337,14 @@
  6831. }
  6832. /**
  6833. - * sdhci_s3c_platform_bus_width - support 8bit buswidth
  6834. + * sdhci_s3c_set_bus_width - support 8bit buswidth
  6835. * @host: The SDHCI host being queried
  6836. * @width: MMC_BUS_WIDTH_ macro for the bus width being requested
  6837. *
  6838. * We have 8-bit width support but is not a v3 controller.
  6839. * So we add platform_bus_width() and support 8bit width.
  6840. */
  6841. -static int sdhci_s3c_platform_bus_width(struct sdhci_host *host, int width)
  6842. +static void sdhci_s3c_set_bus_width(struct sdhci_host *host, int width)
  6843. {
  6844. u8 ctrl;
  6845. @@ -358,15 +366,15 @@
  6846. }
  6847. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  6848. -
  6849. - return 0;
  6850. }
  6851. static struct sdhci_ops sdhci_s3c_ops = {
  6852. .get_max_clock = sdhci_s3c_get_max_clk,
  6853. .set_clock = sdhci_s3c_set_clock,
  6854. .get_min_clock = sdhci_s3c_get_min_clock,
  6855. - .platform_bus_width = sdhci_s3c_platform_bus_width,
  6856. + .set_bus_width = sdhci_s3c_set_bus_width,
  6857. + .reset = sdhci_reset,
  6858. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6859. };
  6860. static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
  6861. @@ -606,8 +614,10 @@
  6862. /* Setup quirks for the controller */
  6863. host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
  6864. host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
  6865. - if (drv_data)
  6866. + if (drv_data) {
  6867. host->quirks |= drv_data->sdhci_quirks;
  6868. + sc->no_divider = drv_data->no_divider;
  6869. + }
  6870. #ifndef CONFIG_MMC_SDHCI_S3C_DMA
  6871. @@ -656,7 +666,7 @@
  6872. * If controller does not have internal clock divider,
  6873. * we can use overriding functions instead of default.
  6874. */
  6875. - if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
  6876. + if (sc->no_divider) {
  6877. sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
  6878. sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
  6879. sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
  6880. @@ -797,7 +807,7 @@
  6881. #if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
  6882. static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
  6883. - .sdhci_quirks = SDHCI_QUIRK_NONSTANDARD_CLOCK,
  6884. + .no_divider = true,
  6885. };
  6886. #define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)
  6887. #else
  6888. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-sirf.c linux-3.15-rc1/drivers/mmc/host/sdhci-sirf.c
  6889. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-sirf.c 2014-04-13 23:18:35.000000000 +0200
  6890. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-sirf.c 2014-04-25 14:11:13.539375164 +0200
  6891. @@ -28,7 +28,11 @@
  6892. }
  6893. static struct sdhci_ops sdhci_sirf_ops = {
  6894. + .set_clock = sdhci_set_clock,
  6895. .get_max_clock = sdhci_sirf_get_max_clk,
  6896. + .set_bus_width = sdhci_set_bus_width,
  6897. + .reset = sdhci_reset,
  6898. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6899. };
  6900. static struct sdhci_pltfm_data sdhci_sirf_pdata = {
  6901. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-spear.c linux-3.15-rc1/drivers/mmc/host/sdhci-spear.c
  6902. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-spear.c 2014-04-13 23:18:35.000000000 +0200
  6903. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-spear.c 2014-04-25 14:11:13.539375164 +0200
  6904. @@ -38,7 +38,10 @@
  6905. /* sdhci ops */
  6906. static const struct sdhci_ops sdhci_pltfm_ops = {
  6907. - /* Nothing to do for now. */
  6908. + .set_clock = sdhci_set_clock,
  6909. + .set_bus_width = sdhci_set_bus_width,
  6910. + .reset = sdhci_reset,
  6911. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6912. };
  6913. #ifdef CONFIG_OF
  6914. diff -Nur linux-3.15-rc1.orig/drivers/mmc/host/sdhci-tegra.c linux-3.15-rc1/drivers/mmc/host/sdhci-tegra.c
  6915. --- linux-3.15-rc1.orig/drivers/mmc/host/sdhci-tegra.c 2014-04-13 23:18:35.000000000 +0200
  6916. +++ linux-3.15-rc1/drivers/mmc/host/sdhci-tegra.c 2014-04-25 14:11:13.539375164 +0200
  6917. @@ -48,19 +48,6 @@
  6918. int power_gpio;
  6919. };
  6920. -static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
  6921. -{
  6922. - u32 val;
  6923. -
  6924. - if (unlikely(reg == SDHCI_PRESENT_STATE)) {
  6925. - /* Use wp_gpio here instead? */
  6926. - val = readl(host->ioaddr + reg);
  6927. - return val | SDHCI_WRITE_PROTECT;
  6928. - }
  6929. -
  6930. - return readl(host->ioaddr + reg);
  6931. -}
  6932. -
  6933. static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
  6934. {
  6935. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  6936. @@ -108,12 +95,14 @@
  6937. return mmc_gpio_get_ro(host->mmc);
  6938. }
  6939. -static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
  6940. +static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
  6941. {
  6942. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  6943. struct sdhci_tegra *tegra_host = pltfm_host->priv;
  6944. const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
  6945. + sdhci_reset(host, mask);
  6946. +
  6947. if (!(mask & SDHCI_RESET_ALL))
  6948. return;
  6949. @@ -127,7 +116,7 @@
  6950. }
  6951. }
  6952. -static int tegra_sdhci_buswidth(struct sdhci_host *host, int bus_width)
  6953. +static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width)
  6954. {
  6955. u32 ctrl;
  6956. @@ -144,16 +133,16 @@
  6957. ctrl &= ~SDHCI_CTRL_4BITBUS;
  6958. }
  6959. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  6960. - return 0;
  6961. }
  6962. static const struct sdhci_ops tegra_sdhci_ops = {
  6963. .get_ro = tegra_sdhci_get_ro,
  6964. - .read_l = tegra_sdhci_readl,
  6965. .read_w = tegra_sdhci_readw,
  6966. .write_l = tegra_sdhci_writel,
  6967. - .platform_bus_width = tegra_sdhci_buswidth,
  6968. - .platform_reset_exit = tegra_sdhci_reset_exit,
  6969. + .set_clock = sdhci_set_clock,
  6970. + .set_bus_width = tegra_sdhci_set_bus_width,
  6971. + .reset = tegra_sdhci_reset,
  6972. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6973. };
  6974. static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
  6975. diff -Nur linux-3.15-rc1.orig/drivers/net/ethernet/freescale/fec.h linux-3.15-rc1/drivers/net/ethernet/freescale/fec.h
  6976. --- linux-3.15-rc1.orig/drivers/net/ethernet/freescale/fec.h 2014-04-13 23:18:35.000000000 +0200
  6977. +++ linux-3.15-rc1/drivers/net/ethernet/freescale/fec.h 2014-04-25 14:11:13.583375360 +0200
  6978. @@ -14,6 +14,7 @@
  6979. /****************************************************************************/
  6980. #include <linux/clocksource.h>
  6981. +#include <linux/mutex.h>
  6982. #include <linux/net_tstamp.h>
  6983. #include <linux/ptp_clock_kernel.h>
  6984. @@ -170,6 +171,11 @@
  6985. unsigned short res0[4];
  6986. };
  6987. +union bufdesc_u {
  6988. + struct bufdesc bd;
  6989. + struct bufdesc_ex ebd;
  6990. +};
  6991. +
  6992. /*
  6993. * The following definitions courtesy of commproc.h, which where
  6994. * Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
  6995. @@ -202,6 +208,7 @@
  6996. #define BD_ENET_RX_OV ((ushort)0x0002)
  6997. #define BD_ENET_RX_CL ((ushort)0x0001)
  6998. #define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */
  6999. +#define BD_ENET_RX_ERROR ((ushort)0x003f)
  7000. /* Enhanced buffer descriptor control/status used by Ethernet receive */
  7001. #define BD_ENET_RX_VLAN 0x00000004
  7002. @@ -224,10 +231,17 @@
  7003. #define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
  7004. /*enhanced buffer descriptor control/status used by Ethernet transmit*/
  7005. -#define BD_ENET_TX_INT 0x40000000
  7006. -#define BD_ENET_TX_TS 0x20000000
  7007. -#define BD_ENET_TX_PINS 0x10000000
  7008. -#define BD_ENET_TX_IINS 0x08000000
  7009. +#define BD_ENET_TX_INT BIT(30)
  7010. +#define BD_ENET_TX_TS BIT(29)
  7011. +#define BD_ENET_TX_PINS BIT(28)
  7012. +#define BD_ENET_TX_IINS BIT(27)
  7013. +#define BD_ENET_TX_TXE BIT(15)
  7014. +#define BD_ENET_TX_UE BIT(13)
  7015. +#define BD_ENET_TX_EE BIT(12)
  7016. +#define BD_ENET_TX_FE BIT(11)
  7017. +#define BD_ENET_TX_LCE BIT(10)
  7018. +#define BD_ENET_TX_OE BIT(9)
  7019. +#define BD_ENET_TX_TSE BIT(8)
  7020. /* This device has up to three irqs on some platforms */
  7021. @@ -240,28 +254,20 @@
  7022. * the skbuffer directly.
  7023. */
  7024. -#define FEC_ENET_RX_PAGES 8
  7025. +#define FEC_ENET_RX_PAGES 64
  7026. #define FEC_ENET_RX_FRSIZE 2048
  7027. #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
  7028. #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
  7029. #define FEC_ENET_TX_FRSIZE 2048
  7030. #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
  7031. -#define TX_RING_SIZE 16 /* Must be power of two */
  7032. -#define TX_RING_MOD_MASK 15 /* for this to work */
  7033. +#define TX_RING_SIZE 128 /* Must be power of two */
  7034. #define BD_ENET_RX_INT 0x00800000
  7035. #define BD_ENET_RX_PTP ((ushort)0x0400)
  7036. #define BD_ENET_RX_ICE 0x00000020
  7037. #define BD_ENET_RX_PCR 0x00000010
  7038. -#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
  7039. #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
  7040. -struct fec_enet_delayed_work {
  7041. - struct delayed_work delay_work;
  7042. - bool timeout;
  7043. - bool trig_tx;
  7044. -};
  7045. -
  7046. /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
  7047. * tx_bd_base always point to the base of the buffer descriptors. The
  7048. * cur_rx and cur_tx point to the currently available buffer.
  7049. @@ -281,27 +287,33 @@
  7050. struct clk *clk_enet_out;
  7051. struct clk *clk_ptp;
  7052. + unsigned char tx_page_map[TX_RING_SIZE];
  7053. /* The saved address of a sent-in-place packet/buffer, for skfree(). */
  7054. unsigned char *tx_bounce[TX_RING_SIZE];
  7055. struct sk_buff *tx_skbuff[TX_RING_SIZE];
  7056. struct sk_buff *rx_skbuff[RX_RING_SIZE];
  7057. /* CPM dual port RAM relative addresses */
  7058. - dma_addr_t bd_dma;
  7059. + dma_addr_t rx_bd_dma;
  7060. + dma_addr_t tx_bd_dma;
  7061. /* Address of Rx and Tx buffers */
  7062. - struct bufdesc *rx_bd_base;
  7063. - struct bufdesc *tx_bd_base;
  7064. + union bufdesc_u *rx_bd_base;
  7065. + union bufdesc_u *tx_bd_base;
  7066. /* The next free ring entry */
  7067. - struct bufdesc *cur_rx, *cur_tx;
  7068. - /* The ring entries to be free()ed */
  7069. - struct bufdesc *dirty_tx;
  7070. + unsigned short tx_next;
  7071. + unsigned short tx_dirty;
  7072. + unsigned short tx_min;
  7073. + unsigned short rx_next;
  7074. unsigned short tx_ring_size;
  7075. unsigned short rx_ring_size;
  7076. + unsigned char flags;
  7077. +
  7078. + struct mutex mutex;
  7079. +
  7080. struct platform_device *pdev;
  7081. - int opened;
  7082. int dev_id;
  7083. /* Phylib and MDIO interface */
  7084. @@ -315,11 +327,12 @@
  7085. int speed;
  7086. struct completion mdio_done;
  7087. int irq[FEC_IRQ_NUM];
  7088. - int bufdesc_ex;
  7089. - int pause_flag;
  7090. + unsigned short pause_flag;
  7091. + unsigned short pause_mode;
  7092. struct napi_struct napi;
  7093. - int csum_flags;
  7094. +
  7095. + struct work_struct tx_timeout_work;
  7096. struct ptp_clock *ptp_clock;
  7097. struct ptp_clock_info ptp_caps;
  7098. @@ -333,8 +346,8 @@
  7099. int hwts_rx_en;
  7100. int hwts_tx_en;
  7101. struct timer_list time_keep;
  7102. - struct fec_enet_delayed_work delay_work;
  7103. struct regulator *reg_phy;
  7104. + unsigned long quirks;
  7105. };
  7106. void fec_ptp_init(struct platform_device *pdev);
  7107. diff -Nur linux-3.15-rc1.orig/drivers/net/ethernet/freescale/fec_main.c linux-3.15-rc1/drivers/net/ethernet/freescale/fec_main.c
  7108. --- linux-3.15-rc1.orig/drivers/net/ethernet/freescale/fec_main.c 2014-04-13 23:18:35.000000000 +0200
  7109. +++ linux-3.15-rc1/drivers/net/ethernet/freescale/fec_main.c 2014-04-25 14:23:42.077994426 +0200
  7110. @@ -33,12 +33,6 @@
  7111. #include <linux/netdevice.h>
  7112. #include <linux/etherdevice.h>
  7113. #include <linux/skbuff.h>
  7114. -#include <linux/in.h>
  7115. -#include <linux/ip.h>
  7116. -#include <net/ip.h>
  7117. -#include <linux/tcp.h>
  7118. -#include <linux/udp.h>
  7119. -#include <linux/icmp.h>
  7120. #include <linux/spinlock.h>
  7121. #include <linux/workqueue.h>
  7122. #include <linux/bitops.h>
  7123. @@ -91,16 +85,8 @@
  7124. #define FEC_QUIRK_HAS_CSUM (1 << 5)
  7125. /* Controller has hardware vlan support */
  7126. #define FEC_QUIRK_HAS_VLAN (1 << 6)
  7127. -/* ENET IP errata ERR006358
  7128. - *
  7129. - * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
  7130. - * detected as not set during a prior frame transmission, then the
  7131. - * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
  7132. - * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
  7133. - * frames not being transmitted until there is a 0-to-1 transition on
  7134. - * ENET_TDAR[TDAR].
  7135. - */
  7136. -#define FEC_QUIRK_ERR006358 (1 << 7)
  7137. +/* Controller has ability to offset rx packets */
  7138. +#define FEC_QUIRK_RX_SHIFT16 (1 << 8)
  7139. static struct platform_device_id fec_devtype[] = {
  7140. {
  7141. @@ -120,7 +106,7 @@
  7142. .name = "imx6q-fec",
  7143. .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
  7144. FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
  7145. - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
  7146. + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_RX_SHIFT16,
  7147. }, {
  7148. .name = "mvf600-fec",
  7149. .driver_data = FEC_QUIRK_ENET_MAC,
  7150. @@ -172,9 +158,15 @@
  7151. #endif
  7152. #endif /* CONFIG_M5272 */
  7153. -#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
  7154. -#error "FEC: descriptor ring size constants too large"
  7155. +#if RX_RING_SIZE * 32 > PAGE_SIZE
  7156. +#error "FEC: receive descriptor ring size too large"
  7157. #endif
  7158. +#if TX_RING_SIZE * 32 > PAGE_SIZE
  7159. +#error "FEC: transmit descriptor ring size too large"
  7160. +#endif
  7161. +
  7162. +/* Minimum TX ring size when using NETIF_F_SG */
  7163. +#define TX_RING_SIZE_MIN_SG (2 * (MAX_SKB_FRAGS + 1))
  7164. /* Interrupt events/masks. */
  7165. #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
  7166. @@ -200,6 +192,7 @@
  7167. /* FEC receive acceleration */
  7168. #define FEC_RACC_IPDIS (1 << 1)
  7169. #define FEC_RACC_PRODIS (1 << 2)
  7170. +#define FEC_RACC_SHIFT16 BIT(7)
  7171. #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
  7172. /*
  7173. @@ -228,62 +221,60 @@
  7174. /* Transmitter timeout */
  7175. #define TX_TIMEOUT (2 * HZ)
  7176. -#define FEC_PAUSE_FLAG_AUTONEG 0x1
  7177. -#define FEC_PAUSE_FLAG_ENABLE 0x2
  7178. +/* pause mode/flag */
  7179. +#define FEC_PAUSE_FLAG_AUTONEG BIT(0)
  7180. +#define FEC_PAUSE_FLAG_RX BIT(1)
  7181. +#define FEC_PAUSE_FLAG_TX BIT(2)
  7182. +
  7183. +/* flags */
  7184. +#define FEC_FLAG_BUFDESC_EX BIT(0)
  7185. +#define FEC_FLAG_RX_CSUM BIT(1)
  7186. +#define FEC_FLAG_RX_VLAN BIT(2)
  7187. static int mii_cnt;
  7188. -static inline
  7189. -struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
  7190. +static unsigned copybreak = 200;
  7191. +module_param(copybreak, uint, 0644);
  7192. +MODULE_PARM_DESC(copybreak,
  7193. + "Maximum size of packet that is copied to a new buffer on receive");
  7194. +
  7195. +static bool fec_enet_rx_zerocopy(struct fec_enet_private *fep, unsigned pktlen)
  7196. {
  7197. - struct bufdesc *new_bd = bdp + 1;
  7198. - struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
  7199. - struct bufdesc_ex *ex_base;
  7200. - struct bufdesc *base;
  7201. - int ring_size;
  7202. -
  7203. - if (bdp >= fep->tx_bd_base) {
  7204. - base = fep->tx_bd_base;
  7205. - ring_size = fep->tx_ring_size;
  7206. - ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
  7207. - } else {
  7208. - base = fep->rx_bd_base;
  7209. - ring_size = fep->rx_ring_size;
  7210. - ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
  7211. - }
  7212. +#ifndef CONFIG_M5272
  7213. + if (fep->quirks & FEC_QUIRK_RX_SHIFT16 && pktlen >= copybreak)
  7214. + return true;
  7215. +#endif
  7216. + return false;
  7217. +}
  7218. - if (fep->bufdesc_ex)
  7219. - return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
  7220. - ex_base : ex_new_bd);
  7221. +static union bufdesc_u *
  7222. +fec_enet_tx_get(unsigned index, struct fec_enet_private *fep)
  7223. +{
  7224. + union bufdesc_u *base = fep->tx_bd_base;
  7225. + union bufdesc_u *bdp;
  7226. +
  7227. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  7228. + bdp = (union bufdesc_u *)(&base->ebd + index);
  7229. else
  7230. - return (new_bd >= (base + ring_size)) ?
  7231. - base : new_bd;
  7232. + bdp = (union bufdesc_u *)(&base->bd + index);
  7233. +
  7234. + return bdp;
  7235. }
  7236. -static inline
  7237. -struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
  7238. +static union bufdesc_u *
  7239. +fec_enet_rx_get(unsigned index, struct fec_enet_private *fep)
  7240. {
  7241. - struct bufdesc *new_bd = bdp - 1;
  7242. - struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
  7243. - struct bufdesc_ex *ex_base;
  7244. - struct bufdesc *base;
  7245. - int ring_size;
  7246. -
  7247. - if (bdp >= fep->tx_bd_base) {
  7248. - base = fep->tx_bd_base;
  7249. - ring_size = fep->tx_ring_size;
  7250. - ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
  7251. - } else {
  7252. - base = fep->rx_bd_base;
  7253. - ring_size = fep->rx_ring_size;
  7254. - ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
  7255. - }
  7256. + union bufdesc_u *base = fep->rx_bd_base;
  7257. + union bufdesc_u *bdp;
  7258. +
  7259. + index &= fep->rx_ring_size - 1;
  7260. - if (fep->bufdesc_ex)
  7261. - return (struct bufdesc *)((ex_new_bd < ex_base) ?
  7262. - (ex_new_bd + ring_size) : ex_new_bd);
  7263. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  7264. + bdp = (union bufdesc_u *)(&base->ebd + index);
  7265. else
  7266. - return (new_bd < base) ? (new_bd + ring_size) : new_bd;
  7267. + bdp = (union bufdesc_u *)(&base->bd + index);
  7268. +
  7269. + return bdp;
  7270. }
  7271. static void *swap_buffer(void *bufaddr, int len)
  7272. @@ -297,13 +288,47 @@
  7273. return bufaddr;
  7274. }
  7275. +static void fec_dump(struct net_device *ndev)
  7276. +{
  7277. + struct fec_enet_private *fep = netdev_priv(ndev);
  7278. + union bufdesc_u *bdp;
  7279. + unsigned index = 0;
  7280. +
  7281. + netdev_info(ndev, "TX ring dump\n");
  7282. + pr_info("Nr SC addr len SKB\n");
  7283. +
  7284. + for (index = 0; index < fep->tx_ring_size; index++) {
  7285. + bdp = fec_enet_tx_get(index, fep);
  7286. +
  7287. + pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p",
  7288. + index,
  7289. + index == fep->tx_next ? 'S' : ' ',
  7290. + index == fep->tx_dirty ? 'H' : ' ',
  7291. + bdp->bd.cbd_sc, bdp->bd.cbd_bufaddr,
  7292. + bdp->bd.cbd_datlen,
  7293. + fep->tx_skbuff[index]);
  7294. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  7295. + pr_cont(" %08lx", bdp->ebd.cbd_esc);
  7296. + pr_cont("\n");
  7297. + }
  7298. +}
  7299. +
  7300. static int
  7301. fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
  7302. {
  7303. + int csum_start;
  7304. +
  7305. /* Only run for packets requiring a checksum. */
  7306. if (skb->ip_summed != CHECKSUM_PARTIAL)
  7307. return 0;
  7308. + csum_start = skb_checksum_start_offset(skb);
  7309. + if (csum_start + skb->csum_offset > skb_headlen(skb)) {
  7310. + netdev_err(ndev, "checksum outside skb head: headlen %u start %u offset %u\n",
  7311. + skb_headlen(skb), csum_start, skb->csum_offset);
  7312. + return -1;
  7313. + }
  7314. +
  7315. if (unlikely(skb_cow_head(skb, 0)))
  7316. return -1;
  7317. @@ -312,23 +337,56 @@
  7318. return 0;
  7319. }
  7320. +static void
  7321. +fec_enet_tx_unmap(unsigned index, union bufdesc_u *bdp, struct fec_enet_private *fep)
  7322. +{
  7323. + dma_addr_t addr = bdp->bd.cbd_bufaddr;
  7324. + unsigned length = bdp->bd.cbd_datlen;
  7325. +
  7326. + bdp->bd.cbd_bufaddr = 0;
  7327. +
  7328. + if (fep->tx_page_map[index])
  7329. + dma_unmap_page(&fep->pdev->dev, addr, length, DMA_TO_DEVICE);
  7330. + else
  7331. + dma_unmap_single(&fep->pdev->dev, addr, length, DMA_TO_DEVICE);
  7332. +}
  7333. +
  7334. +static void
  7335. +fec_enet_tx_unmap_range(unsigned index, unsigned last, struct fec_enet_private *fep)
  7336. +{
  7337. + union bufdesc_u *bdp;
  7338. +
  7339. + do {
  7340. + if (last == 0)
  7341. + last = fep->tx_ring_size;
  7342. + last--;
  7343. +
  7344. + bdp = fec_enet_tx_get(last, fep);
  7345. + fec_enet_tx_unmap(last, bdp, fep);
  7346. + } while (index != last);
  7347. +}
  7348. +
  7349. +static unsigned ring_free(unsigned ins, unsigned rem, unsigned size)
  7350. +{
  7351. + int num = rem - ins;
  7352. + return num < 0 ? num + size : num;
  7353. +}
  7354. +
  7355. static netdev_tx_t
  7356. fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  7357. {
  7358. struct fec_enet_private *fep = netdev_priv(ndev);
  7359. - const struct platform_device_id *id_entry =
  7360. - platform_get_device_id(fep->pdev);
  7361. - struct bufdesc *bdp, *bdp_pre;
  7362. + union bufdesc_u *bdp;
  7363. void *bufaddr;
  7364. unsigned short status;
  7365. - unsigned int index;
  7366. + unsigned index, last, length, cbd_esc;
  7367. + int f, nr_frags = skb_shinfo(skb)->nr_frags;
  7368. + dma_addr_t addr;
  7369. /* Fill in a Tx ring entry */
  7370. - bdp = fep->cur_tx;
  7371. -
  7372. - status = bdp->cbd_sc;
  7373. + index = fep->tx_next;
  7374. - if (status & BD_ENET_TX_READY) {
  7375. + if (ring_free(index, fep->tx_dirty, fep->tx_ring_size) < 1 + nr_frags) {
  7376. /* Ooops. All transmit buffers are full. Bail out.
  7377. * This should not happen, since ndev->tbusy should be set.
  7378. */
  7379. @@ -342,26 +400,17 @@
  7380. return NETDEV_TX_OK;
  7381. }
  7382. - /* Clear all of the status flags */
  7383. - status &= ~BD_ENET_TX_STATS;
  7384. -
  7385. /* Set buffer length and buffer pointer */
  7386. bufaddr = skb->data;
  7387. - bdp->cbd_datlen = skb->len;
  7388. + length = skb_headlen(skb);
  7389. /*
  7390. * On some FEC implementations data must be aligned on
  7391. * 4-byte boundaries. Use bounce buffers to copy data
  7392. * and get it aligned. Ugh.
  7393. */
  7394. - if (fep->bufdesc_ex)
  7395. - index = (struct bufdesc_ex *)bdp -
  7396. - (struct bufdesc_ex *)fep->tx_bd_base;
  7397. - else
  7398. - index = bdp - fep->tx_bd_base;
  7399. -
  7400. if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
  7401. - memcpy(fep->tx_bounce[index], skb->data, skb->len);
  7402. + memcpy(fep->tx_bounce[index], skb->data, length);
  7403. bufaddr = fep->tx_bounce[index];
  7404. }
  7405. @@ -370,75 +419,127 @@
  7406. * the system that it's running on. As the result, driver has to
  7407. * swap every frame going to and coming from the controller.
  7408. */
  7409. - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
  7410. - swap_buffer(bufaddr, skb->len);
  7411. + if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
  7412. + swap_buffer(bufaddr, length);
  7413. - /* Save skb pointer */
  7414. - fep->tx_skbuff[index] = skb;
  7415. + /* Push the data cache so the CPM does not get stale memory data. */
  7416. + addr = dma_map_single(&fep->pdev->dev, bufaddr, length, DMA_TO_DEVICE);
  7417. + if (dma_mapping_error(&fep->pdev->dev, addr))
  7418. + goto release;
  7419. +
  7420. + bdp = fec_enet_tx_get(index, fep);
  7421. + bdp->bd.cbd_datlen = length;
  7422. + bdp->bd.cbd_bufaddr = addr;
  7423. - /* Push the data cache so the CPM does not get stale memory
  7424. - * data.
  7425. - */
  7426. - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
  7427. - skb->len, DMA_TO_DEVICE);
  7428. - if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
  7429. - bdp->cbd_bufaddr = 0;
  7430. - fep->tx_skbuff[index] = NULL;
  7431. - dev_kfree_skb_any(skb);
  7432. - if (net_ratelimit())
  7433. - netdev_err(ndev, "Tx DMA memory map failed\n");
  7434. - return NETDEV_TX_OK;
  7435. - }
  7436. + fep->tx_page_map[index] = 0;
  7437. - if (fep->bufdesc_ex) {
  7438. -
  7439. - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
  7440. - ebdp->cbd_bdu = 0;
  7441. + cbd_esc = BD_ENET_TX_INT;
  7442. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  7443. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
  7444. fep->hwts_tx_en)) {
  7445. - ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
  7446. + cbd_esc |= BD_ENET_TX_TS;
  7447. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  7448. } else {
  7449. - ebdp->cbd_esc = BD_ENET_TX_INT;
  7450. -
  7451. /* Enable protocol checksum flags
  7452. * We do not bother with the IP Checksum bits as they
  7453. * are done by the kernel
  7454. */
  7455. if (skb->ip_summed == CHECKSUM_PARTIAL)
  7456. - ebdp->cbd_esc |= BD_ENET_TX_PINS;
  7457. + cbd_esc |= BD_ENET_TX_PINS;
  7458. + }
  7459. + bdp->ebd.cbd_bdu = 0;
  7460. + bdp->ebd.cbd_esc = cbd_esc;
  7461. + }
  7462. +
  7463. + for (last = index, f = 0; f < nr_frags; f++) {
  7464. + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
  7465. +
  7466. + if (++last >= fep->tx_ring_size)
  7467. + last = 0;
  7468. +
  7469. + length = skb_frag_size(frag);
  7470. +
  7471. + /* If the alignment is unsuitable, we need to bounce. */
  7472. + if (frag->page_offset & FEC_ALIGNMENT) {
  7473. + unsigned char *bounce = fep->tx_bounce[last];
  7474. +
  7475. + /* FIXME: highdma? */
  7476. + memcpy(bounce, skb_frag_address(frag), length);
  7477. +
  7478. + addr = dma_map_single(&fep->pdev->dev, bounce,
  7479. + length, DMA_TO_DEVICE);
  7480. + fep->tx_page_map[last] = 0;
  7481. + } else {
  7482. + addr = skb_frag_dma_map(&fep->pdev->dev, frag, 0,
  7483. + length, DMA_TO_DEVICE);
  7484. + fep->tx_page_map[last] = 1;
  7485. + }
  7486. +
  7487. + if (dma_mapping_error(&fep->pdev->dev, addr))
  7488. + goto release_frags;
  7489. +
  7490. + bdp = fec_enet_tx_get(last, fep);
  7491. + bdp->bd.cbd_datlen = length;
  7492. + bdp->bd.cbd_bufaddr = addr;
  7493. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  7494. + bdp->ebd.cbd_esc = cbd_esc;
  7495. + bdp->ebd.cbd_bdu = 0;
  7496. }
  7497. }
  7498. + /* Save skb pointer */
  7499. + fep->tx_skbuff[last] = skb;
  7500. +
  7501. + /*
  7502. + * We need the preceding stores to the descriptor to complete
  7503. + * before updating the status field, which hands it over to the
  7504. + * hardware. The corresponding rmb() is "in the hardware".
  7505. + */
  7506. + wmb();
  7507. +
  7508. /* Send it on its way. Tell FEC it's ready, interrupt when done,
  7509. * it's the last BD of the frame, and to put the CRC on the end.
  7510. */
  7511. - status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
  7512. - | BD_ENET_TX_LAST | BD_ENET_TX_TC);
  7513. - bdp->cbd_sc = status;
  7514. -
  7515. - bdp_pre = fec_enet_get_prevdesc(bdp, fep);
  7516. - if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
  7517. - !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
  7518. - fep->delay_work.trig_tx = true;
  7519. - schedule_delayed_work(&(fep->delay_work.delay_work),
  7520. - msecs_to_jiffies(1));
  7521. + status = bdp->bd.cbd_sc & BD_ENET_TX_WRAP;
  7522. + bdp->bd.cbd_sc = status | BD_ENET_TX_READY | BD_ENET_TX_INTR |
  7523. + BD_ENET_TX_LAST | BD_ENET_TX_TC;
  7524. +
  7525. + /* Now walk backwards setting the TX_READY on each fragment */
  7526. + for (f = nr_frags - 1; f >= 0; f--) {
  7527. + unsigned i = index + f;
  7528. +
  7529. + if (i >= fep->tx_ring_size)
  7530. + i -= fep->tx_ring_size;
  7531. +
  7532. + bdp = fec_enet_tx_get(i, fep);
  7533. + status = bdp->bd.cbd_sc & BD_ENET_TX_WRAP;
  7534. + bdp->bd.cbd_sc = status | BD_ENET_TX_READY | BD_ENET_TX_INTR;
  7535. }
  7536. - /* If this was the last BD in the ring, start at the beginning again. */
  7537. - bdp = fec_enet_get_nextdesc(bdp, fep);
  7538. -
  7539. skb_tx_timestamp(skb);
  7540. + netdev_sent_queue(ndev, skb->len);
  7541. +
  7542. + if (++last >= fep->tx_ring_size)
  7543. + last = 0;
  7544. - fep->cur_tx = bdp;
  7545. + fep->tx_next = last;
  7546. - if (fep->cur_tx == fep->dirty_tx)
  7547. + if (ring_free(last, fep->tx_dirty, fep->tx_ring_size) < fep->tx_min)
  7548. netif_stop_queue(ndev);
  7549. /* Trigger transmission start */
  7550. - writel(0, fep->hwp + FEC_X_DES_ACTIVE);
  7551. + if (readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
  7552. + writel(0, fep->hwp + FEC_X_DES_ACTIVE);
  7553. return NETDEV_TX_OK;
  7554. +
  7555. + release_frags:
  7556. + fec_enet_tx_unmap_range(index, last, fep);
  7557. + release:
  7558. + dev_kfree_skb_any(skb);
  7559. + if (net_ratelimit())
  7560. + netdev_err(ndev, "Tx DMA memory map failed\n");
  7561. + return NETDEV_TX_OK;
  7562. }
  7563. /* Init RX & TX buffer descriptors
  7564. @@ -446,71 +547,60 @@
  7565. static void fec_enet_bd_init(struct net_device *dev)
  7566. {
  7567. struct fec_enet_private *fep = netdev_priv(dev);
  7568. - struct bufdesc *bdp;
  7569. + union bufdesc_u *bdp;
  7570. unsigned int i;
  7571. /* Initialize the receive buffer descriptors. */
  7572. - bdp = fep->rx_bd_base;
  7573. for (i = 0; i < fep->rx_ring_size; i++) {
  7574. + bdp = fec_enet_rx_get(i, fep);
  7575. /* Initialize the BD for every fragment in the page. */
  7576. - if (bdp->cbd_bufaddr)
  7577. - bdp->cbd_sc = BD_ENET_RX_EMPTY;
  7578. + if (bdp->bd.cbd_bufaddr)
  7579. + bdp->bd.cbd_sc = BD_ENET_RX_EMPTY;
  7580. else
  7581. - bdp->cbd_sc = 0;
  7582. - bdp = fec_enet_get_nextdesc(bdp, fep);
  7583. - }
  7584. + bdp->bd.cbd_sc = 0;
  7585. - /* Set the last buffer to wrap */
  7586. - bdp = fec_enet_get_prevdesc(bdp, fep);
  7587. - bdp->cbd_sc |= BD_SC_WRAP;
  7588. + if (i == fep->rx_ring_size - 1)
  7589. + bdp->bd.cbd_sc |= BD_SC_WRAP;
  7590. + }
  7591. - fep->cur_rx = fep->rx_bd_base;
  7592. + fep->rx_next = 0;
  7593. /* ...and the same for transmit */
  7594. - bdp = fep->tx_bd_base;
  7595. - fep->cur_tx = bdp;
  7596. for (i = 0; i < fep->tx_ring_size; i++) {
  7597. + bdp = fec_enet_tx_get(i, fep);
  7598. /* Initialize the BD for every fragment in the page. */
  7599. - bdp->cbd_sc = 0;
  7600. - if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
  7601. + if (i == fep->tx_ring_size - 1)
  7602. + bdp->bd.cbd_sc = BD_SC_WRAP;
  7603. + else
  7604. + bdp->bd.cbd_sc = 0;
  7605. + if (bdp->bd.cbd_bufaddr)
  7606. + fec_enet_tx_unmap(i, bdp, fep);
  7607. + if (fep->tx_skbuff[i]) {
  7608. dev_kfree_skb_any(fep->tx_skbuff[i]);
  7609. fep->tx_skbuff[i] = NULL;
  7610. }
  7611. - bdp->cbd_bufaddr = 0;
  7612. - bdp = fec_enet_get_nextdesc(bdp, fep);
  7613. }
  7614. - /* Set the last buffer to wrap */
  7615. - bdp = fec_enet_get_prevdesc(bdp, fep);
  7616. - bdp->cbd_sc |= BD_SC_WRAP;
  7617. - fep->dirty_tx = bdp;
  7618. + fep->tx_next = 0;
  7619. + fep->tx_dirty = fep->tx_ring_size - 1;
  7620. }
  7621. -/* This function is called to start or restart the FEC during a link
  7622. - * change. This only happens when switching between half and full
  7623. - * duplex.
  7624. +/*
  7625. + * This function is called to start or restart the FEC during a link
  7626. + * change, transmit timeout, or to reconfigure the FEC. The network
  7627. + * packet processing for this device must be stopped before this call.
  7628. */
  7629. static void
  7630. -fec_restart(struct net_device *ndev, int duplex)
  7631. +fec_restart(struct net_device *ndev)
  7632. {
  7633. struct fec_enet_private *fep = netdev_priv(ndev);
  7634. - const struct platform_device_id *id_entry =
  7635. - platform_get_device_id(fep->pdev);
  7636. - int i;
  7637. u32 val;
  7638. u32 temp_mac[2];
  7639. u32 rcntl = OPT_FRAME_SIZE | 0x04;
  7640. u32 ecntl = 0x2; /* ETHEREN */
  7641. - if (netif_running(ndev)) {
  7642. - netif_device_detach(ndev);
  7643. - napi_disable(&fep->napi);
  7644. - netif_stop_queue(ndev);
  7645. - netif_tx_lock_bh(ndev);
  7646. - }
  7647. -
  7648. /* Whack a reset. We should wait for this. */
  7649. writel(1, fep->hwp + FEC_ECNTRL);
  7650. udelay(10);
  7651. @@ -519,7 +609,7 @@
  7652. * enet-mac reset will reset mac address registers too,
  7653. * so need to reconfigure it.
  7654. */
  7655. - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
  7656. + if (fep->quirks & FEC_QUIRK_ENET_MAC) {
  7657. memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
  7658. writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
  7659. writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
  7660. @@ -531,27 +621,16 @@
  7661. /* Set maximum receive buffer size. */
  7662. writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
  7663. - fec_enet_bd_init(ndev);
  7664. + if (fep->rx_bd_base)
  7665. + fec_enet_bd_init(ndev);
  7666. + netdev_reset_queue(ndev);
  7667. /* Set receive and transmit descriptor base. */
  7668. - writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
  7669. - if (fep->bufdesc_ex)
  7670. - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
  7671. - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
  7672. - else
  7673. - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
  7674. - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
  7675. -
  7676. -
  7677. - for (i = 0; i <= TX_RING_MOD_MASK; i++) {
  7678. - if (fep->tx_skbuff[i]) {
  7679. - dev_kfree_skb_any(fep->tx_skbuff[i]);
  7680. - fep->tx_skbuff[i] = NULL;
  7681. - }
  7682. - }
  7683. + writel(fep->rx_bd_dma, fep->hwp + FEC_R_DES_START);
  7684. + writel(fep->tx_bd_dma, fep->hwp + FEC_X_DES_START);
  7685. /* Enable MII mode */
  7686. - if (duplex) {
  7687. + if (fep->full_duplex == DUPLEX_FULL) {
  7688. /* FD enable */
  7689. writel(0x04, fep->hwp + FEC_X_CNTRL);
  7690. } else {
  7691. @@ -560,15 +639,15 @@
  7692. writel(0x0, fep->hwp + FEC_X_CNTRL);
  7693. }
  7694. - fep->full_duplex = duplex;
  7695. -
  7696. /* Set MII speed */
  7697. writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
  7698. #if !defined(CONFIG_M5272)
  7699. /* set RX checksum */
  7700. val = readl(fep->hwp + FEC_RACC);
  7701. - if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
  7702. + if (fep->quirks & FEC_QUIRK_RX_SHIFT16)
  7703. + val |= FEC_RACC_SHIFT16;
  7704. + if (fep->flags & FEC_FLAG_RX_CSUM)
  7705. val |= FEC_RACC_OPTIONS;
  7706. else
  7707. val &= ~FEC_RACC_OPTIONS;
  7708. @@ -579,9 +658,9 @@
  7709. * The phy interface and speed need to get configured
  7710. * differently on enet-mac.
  7711. */
  7712. - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
  7713. + if (fep->quirks & FEC_QUIRK_ENET_MAC) {
  7714. /* Enable flow control and length check */
  7715. - rcntl |= 0x40000000 | 0x00000020;
  7716. + rcntl |= 0x40000000;
  7717. /* RGMII, RMII or MII */
  7718. if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
  7719. @@ -602,7 +681,7 @@
  7720. }
  7721. } else {
  7722. #ifdef FEC_MIIGSK_ENR
  7723. - if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
  7724. + if (fep->quirks & FEC_QUIRK_USE_GASKET) {
  7725. u32 cfgr;
  7726. /* disable the gasket and wait */
  7727. writel(0, fep->hwp + FEC_MIIGSK_ENR);
  7728. @@ -627,22 +706,24 @@
  7729. }
  7730. #if !defined(CONFIG_M5272)
  7731. - /* enable pause frame*/
  7732. - if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
  7733. - ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
  7734. - fep->phy_dev && fep->phy_dev->pause)) {
  7735. - rcntl |= FEC_ENET_FCE;
  7736. -
  7737. - /* set FIFO threshold parameter to reduce overrun */
  7738. - writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
  7739. - writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
  7740. - writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
  7741. - writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
  7742. + if (fep->full_duplex == DUPLEX_FULL) {
  7743. + /*
  7744. + * Configure pause modes according to the current status.
  7745. + * Must only be enabled for full duplex links.
  7746. + */
  7747. + if (fep->pause_mode & FEC_PAUSE_FLAG_RX)
  7748. + rcntl |= FEC_ENET_FCE;
  7749. - /* OPD */
  7750. - writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
  7751. - } else {
  7752. - rcntl &= ~FEC_ENET_FCE;
  7753. + if (fep->pause_mode & FEC_PAUSE_FLAG_TX) {
  7754. + /* set FIFO threshold parameter to reduce overrun */
  7755. + writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
  7756. + writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
  7757. + writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
  7758. + writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
  7759. +
  7760. + /* OPD */
  7761. + writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
  7762. + }
  7763. }
  7764. #endif /* !defined(CONFIG_M5272) */
  7765. @@ -655,14 +736,14 @@
  7766. writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
  7767. #endif
  7768. - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
  7769. + if (fep->quirks & FEC_QUIRK_ENET_MAC) {
  7770. /* enable ENET endian swap */
  7771. ecntl |= (1 << 8);
  7772. /* enable ENET store and forward mode */
  7773. writel(1 << 8, fep->hwp + FEC_X_WMRK);
  7774. }
  7775. - if (fep->bufdesc_ex)
  7776. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  7777. ecntl |= (1 << 4);
  7778. #ifndef CONFIG_M5272
  7779. @@ -674,26 +755,17 @@
  7780. writel(ecntl, fep->hwp + FEC_ECNTRL);
  7781. writel(0, fep->hwp + FEC_R_DES_ACTIVE);
  7782. - if (fep->bufdesc_ex)
  7783. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  7784. fec_ptp_start_cyclecounter(ndev);
  7785. /* Enable interrupts we wish to service */
  7786. writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
  7787. -
  7788. - if (netif_running(ndev)) {
  7789. - netif_tx_unlock_bh(ndev);
  7790. - netif_wake_queue(ndev);
  7791. - napi_enable(&fep->napi);
  7792. - netif_device_attach(ndev);
  7793. - }
  7794. }
  7795. static void
  7796. fec_stop(struct net_device *ndev)
  7797. {
  7798. struct fec_enet_private *fep = netdev_priv(ndev);
  7799. - const struct platform_device_id *id_entry =
  7800. - platform_get_device_id(fep->pdev);
  7801. u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
  7802. /* We cannot expect a graceful transmit stop without link !!! */
  7803. @@ -711,7 +783,7 @@
  7804. writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
  7805. /* We have to keep ENET enabled to have MII interrupt stay working */
  7806. - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
  7807. + if (fep->quirks & FEC_QUIRK_ENET_MAC) {
  7808. writel(2, fep->hwp + FEC_ECNTRL);
  7809. writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
  7810. }
  7811. @@ -723,127 +795,312 @@
  7812. {
  7813. struct fec_enet_private *fep = netdev_priv(ndev);
  7814. + fec_dump(ndev);
  7815. +
  7816. ndev->stats.tx_errors++;
  7817. - fep->delay_work.timeout = true;
  7818. - schedule_delayed_work(&(fep->delay_work.delay_work), 0);
  7819. + schedule_work(&fep->tx_timeout_work);
  7820. }
  7821. -static void fec_enet_work(struct work_struct *work)
  7822. +static void fec_enet_timeout_work(struct work_struct *work)
  7823. {
  7824. struct fec_enet_private *fep =
  7825. - container_of(work,
  7826. - struct fec_enet_private,
  7827. - delay_work.delay_work.work);
  7828. -
  7829. - if (fep->delay_work.timeout) {
  7830. - fep->delay_work.timeout = false;
  7831. - fec_restart(fep->netdev, fep->full_duplex);
  7832. - netif_wake_queue(fep->netdev);
  7833. - }
  7834. + container_of(work, struct fec_enet_private, tx_timeout_work);
  7835. + struct net_device *ndev = fep->netdev;
  7836. - if (fep->delay_work.trig_tx) {
  7837. - fep->delay_work.trig_tx = false;
  7838. - writel(0, fep->hwp + FEC_X_DES_ACTIVE);
  7839. + rtnl_lock();
  7840. + if (netif_device_present(ndev) || netif_running(ndev)) {
  7841. + mutex_lock(&fep->mutex);
  7842. + napi_disable(&fep->napi);
  7843. + netif_tx_lock_bh(ndev);
  7844. + fec_restart(ndev);
  7845. + netif_wake_queue(ndev);
  7846. + netif_tx_unlock_bh(ndev);
  7847. + napi_enable(&fep->napi);
  7848. + mutex_unlock(&fep->mutex);
  7849. }
  7850. + rtnl_unlock();
  7851. }
  7852. static void
  7853. +fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
  7854. + struct skb_shared_hwtstamps *hwtstamps)
  7855. +{
  7856. + unsigned long flags;
  7857. + u64 ns;
  7858. +
  7859. + spin_lock_irqsave(&fep->tmreg_lock, flags);
  7860. + ns = timecounter_cyc2time(&fep->tc, ts);
  7861. + spin_unlock_irqrestore(&fep->tmreg_lock, flags);
  7862. +
  7863. + memset(hwtstamps, 0, sizeof(*hwtstamps));
  7864. + hwtstamps->hwtstamp = ns_to_ktime(ns);
  7865. +}
  7866. +
  7867. +static void noinline
  7868. fec_enet_tx(struct net_device *ndev)
  7869. {
  7870. - struct fec_enet_private *fep;
  7871. - struct bufdesc *bdp;
  7872. - unsigned short status;
  7873. + struct fec_enet_private *fep = netdev_priv(ndev);
  7874. + union bufdesc_u *bdp;
  7875. struct sk_buff *skb;
  7876. - int index = 0;
  7877. -
  7878. - fep = netdev_priv(ndev);
  7879. - bdp = fep->dirty_tx;
  7880. + unsigned index = fep->tx_dirty;
  7881. + unsigned pkts_compl, bytes_compl;
  7882. - /* get next bdp of dirty_tx */
  7883. - bdp = fec_enet_get_nextdesc(bdp, fep);
  7884. + pkts_compl = bytes_compl = 0;
  7885. + do {
  7886. + unsigned status, cbd_esc;
  7887. - while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
  7888. + if (++index >= fep->tx_ring_size)
  7889. + index = 0;
  7890. /* current queue is empty */
  7891. - if (bdp == fep->cur_tx)
  7892. + if (index == fep->tx_next)
  7893. break;
  7894. - if (fep->bufdesc_ex)
  7895. - index = (struct bufdesc_ex *)bdp -
  7896. - (struct bufdesc_ex *)fep->tx_bd_base;
  7897. - else
  7898. - index = bdp - fep->tx_bd_base;
  7899. + bdp = fec_enet_tx_get(index, fep);
  7900. +
  7901. + status = bdp->bd.cbd_sc;
  7902. + if (status & BD_ENET_TX_READY)
  7903. + break;
  7904. +
  7905. + fec_enet_tx_unmap(index, bdp, fep);
  7906. skb = fep->tx_skbuff[index];
  7907. - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
  7908. - DMA_TO_DEVICE);
  7909. - bdp->cbd_bufaddr = 0;
  7910. + fep->tx_skbuff[index] = NULL;
  7911. /* Check for errors. */
  7912. - if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
  7913. - BD_ENET_TX_RL | BD_ENET_TX_UN |
  7914. - BD_ENET_TX_CSL)) {
  7915. - ndev->stats.tx_errors++;
  7916. - if (status & BD_ENET_TX_HB) /* No heartbeat */
  7917. - ndev->stats.tx_heartbeat_errors++;
  7918. - if (status & BD_ENET_TX_LC) /* Late collision */
  7919. - ndev->stats.tx_window_errors++;
  7920. - if (status & BD_ENET_TX_RL) /* Retrans limit */
  7921. - ndev->stats.tx_aborted_errors++;
  7922. - if (status & BD_ENET_TX_UN) /* Underrun */
  7923. - ndev->stats.tx_fifo_errors++;
  7924. - if (status & BD_ENET_TX_CSL) /* Carrier lost */
  7925. - ndev->stats.tx_carrier_errors++;
  7926. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  7927. + cbd_esc = bdp->ebd.cbd_esc;
  7928. + if (cbd_esc & BD_ENET_TX_TXE) {
  7929. + ndev->stats.tx_errors++;
  7930. + if (cbd_esc & BD_ENET_TX_EE) { /* excess collision */
  7931. + ndev->stats.collisions += 16;
  7932. + ndev->stats.tx_aborted_errors++;
  7933. + }
  7934. + if (cbd_esc & BD_ENET_TX_LCE) /* late collision error */
  7935. + ndev->stats.tx_window_errors++;
  7936. + if (cbd_esc & (BD_ENET_TX_UE | BD_ENET_TX_FE | BD_ENET_TX_OE))
  7937. + ndev->stats.tx_fifo_errors++;
  7938. + goto next;
  7939. + }
  7940. } else {
  7941. - ndev->stats.tx_packets++;
  7942. - ndev->stats.tx_bytes += bdp->cbd_datlen;
  7943. + if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
  7944. + BD_ENET_TX_RL | BD_ENET_TX_UN |
  7945. + BD_ENET_TX_CSL)) {
  7946. + ndev->stats.tx_errors++;
  7947. + if (status & BD_ENET_TX_HB) /* No heartbeat */
  7948. + ndev->stats.tx_heartbeat_errors++;
  7949. + if (status & BD_ENET_TX_LC) /* Late collision */
  7950. + ndev->stats.tx_window_errors++;
  7951. + if (status & BD_ENET_TX_RL) /* Retrans limit */
  7952. + ndev->stats.tx_aborted_errors++;
  7953. + if (status & BD_ENET_TX_UN) /* Underrun */
  7954. + ndev->stats.tx_fifo_errors++;
  7955. + if (status & BD_ENET_TX_CSL) /* Carrier lost */
  7956. + ndev->stats.tx_carrier_errors++;
  7957. + goto next;
  7958. + }
  7959. }
  7960. - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
  7961. - fep->bufdesc_ex) {
  7962. - struct skb_shared_hwtstamps shhwtstamps;
  7963. - unsigned long flags;
  7964. - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
  7965. -
  7966. - memset(&shhwtstamps, 0, sizeof(shhwtstamps));
  7967. - spin_lock_irqsave(&fep->tmreg_lock, flags);
  7968. - shhwtstamps.hwtstamp = ns_to_ktime(
  7969. - timecounter_cyc2time(&fep->tc, ebdp->ts));
  7970. - spin_unlock_irqrestore(&fep->tmreg_lock, flags);
  7971. - skb_tstamp_tx(skb, &shhwtstamps);
  7972. + if (skb) {
  7973. + ndev->stats.tx_packets++;
  7974. + ndev->stats.tx_bytes += skb->len;
  7975. }
  7976. - if (status & BD_ENET_TX_READY)
  7977. - netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n");
  7978. -
  7979. /* Deferred means some collisions occurred during transmit,
  7980. * but we eventually sent the packet OK.
  7981. */
  7982. if (status & BD_ENET_TX_DEF)
  7983. ndev->stats.collisions++;
  7984. + next:
  7985. + if (skb) {
  7986. + if (fep->flags & FEC_FLAG_BUFDESC_EX &&
  7987. + unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
  7988. + struct skb_shared_hwtstamps shhwtstamps;
  7989. - /* Free the sk buffer associated with this last transmit */
  7990. - dev_kfree_skb_any(skb);
  7991. - fep->tx_skbuff[index] = NULL;
  7992. + fec_enet_hwtstamp(fep, bdp->ebd.ts, &shhwtstamps);
  7993. + skb_tstamp_tx(skb, &shhwtstamps);
  7994. + }
  7995. - fep->dirty_tx = bdp;
  7996. + pkts_compl++;
  7997. + bytes_compl += skb->len;
  7998. - /* Update pointer to next buffer descriptor to be transmitted */
  7999. - bdp = fec_enet_get_nextdesc(bdp, fep);
  8000. + /* Free the sk buffer associated with this last transmit */
  8001. + dev_kfree_skb_any(skb);
  8002. + }
  8003. - /* Since we have freed up a buffer, the ring is no longer full
  8004. - */
  8005. - if (fep->dirty_tx != fep->cur_tx) {
  8006. - if (netif_queue_stopped(ndev))
  8007. - netif_wake_queue(ndev);
  8008. + fep->tx_dirty = index;
  8009. + } while (1);
  8010. +
  8011. + netdev_completed_queue(ndev, pkts_compl, bytes_compl);
  8012. +
  8013. + /* ERR006538: Keep the transmitter going */
  8014. + if (index != fep->tx_next && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
  8015. + writel(0, fep->hwp + FEC_X_DES_ACTIVE);
  8016. +
  8017. + if (netif_queue_stopped(ndev) &&
  8018. + ring_free(fep->tx_next, fep->tx_dirty, fep->tx_ring_size) >=
  8019. + fep->tx_min)
  8020. + netif_wake_queue(ndev);
  8021. +}
  8022. +
  8023. +
  8024. +static void
  8025. +fec_enet_receive(struct sk_buff *skb, union bufdesc_u *bdp, struct net_device *ndev)
  8026. +{
  8027. + struct fec_enet_private *fep = netdev_priv(ndev);
  8028. +
  8029. + skb->protocol = eth_type_trans(skb, ndev);
  8030. +
  8031. + /* Get receive timestamp from the skb */
  8032. + if (fep->hwts_rx_en && fep->flags & FEC_FLAG_BUFDESC_EX)
  8033. + fec_enet_hwtstamp(fep, bdp->ebd.ts, skb_hwtstamps(skb));
  8034. +
  8035. + if (fep->flags & FEC_FLAG_RX_CSUM) {
  8036. + if (!(bdp->ebd.cbd_esc & FLAG_RX_CSUM_ERROR)) {
  8037. + /* don't check it */
  8038. + skb->ip_summed = CHECKSUM_UNNECESSARY;
  8039. + } else {
  8040. + skb_checksum_none_assert(skb);
  8041. }
  8042. }
  8043. - return;
  8044. +
  8045. + napi_gro_receive(&fep->napi, skb);
  8046. +}
  8047. +
  8048. +static void
  8049. +fec_enet_receive_copy(unsigned pkt_len, unsigned index, union bufdesc_u *bdp, struct net_device *ndev)
  8050. +{
  8051. + struct fec_enet_private *fep = netdev_priv(ndev);
  8052. + struct sk_buff *skb;
  8053. + unsigned char *data;
  8054. + bool vlan_packet_rcvd = false;
  8055. +
  8056. + /*
  8057. + * Detect the presence of the VLAN tag, and adjust
  8058. + * the packet length appropriately.
  8059. + */
  8060. + if (fep->flags & FEC_FLAG_RX_VLAN &&
  8061. + bdp->ebd.cbd_esc & BD_ENET_RX_VLAN) {
  8062. + pkt_len -= VLAN_HLEN;
  8063. + vlan_packet_rcvd = true;
  8064. + }
  8065. +
  8066. + /* This does 16 byte alignment, exactly what we need. */
  8067. + skb = netdev_alloc_skb(ndev, pkt_len + NET_IP_ALIGN);
  8068. + if (unlikely(!skb)) {
  8069. + ndev->stats.rx_dropped++;
  8070. + return;
  8071. + }
  8072. +
  8073. + dma_sync_single_for_cpu(&fep->pdev->dev, bdp->bd.cbd_bufaddr,
  8074. + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8075. +
  8076. + data = fep->rx_skbuff[index]->data;
  8077. +
  8078. +#ifndef CONFIG_M5272
  8079. + /*
  8080. + * If we have enabled this feature, we need to discard
  8081. + * the two bytes at the beginning of the packet before
  8082. + * copying it.
  8083. + */
  8084. + if (fep->quirks & FEC_QUIRK_RX_SHIFT16) {
  8085. + pkt_len -= 2;
  8086. + data += 2;
  8087. + }
  8088. +#endif
  8089. +
  8090. + if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
  8091. + swap_buffer(data, pkt_len);
  8092. +
  8093. + skb_reserve(skb, NET_IP_ALIGN);
  8094. + skb_put(skb, pkt_len); /* Make room */
  8095. +
  8096. + /* If this is a VLAN packet remove the VLAN Tag */
  8097. + if (vlan_packet_rcvd) {
  8098. + struct vlan_hdr *vlan = (struct vlan_hdr *)(data + ETH_HLEN);
  8099. +
  8100. + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  8101. + ntohs(vlan->h_vlan_TCI));
  8102. +
  8103. + /* Extract the frame data without the VLAN header. */
  8104. + skb_copy_to_linear_data(skb, data, 2 * ETH_ALEN);
  8105. + skb_copy_to_linear_data_offset(skb, 2 * ETH_ALEN,
  8106. + data + 2 * ETH_ALEN + VLAN_HLEN,
  8107. + pkt_len - 2 * ETH_ALEN);
  8108. + } else {
  8109. + skb_copy_to_linear_data(skb, data, pkt_len);
  8110. + }
  8111. +
  8112. + dma_sync_single_for_device(&fep->pdev->dev, bdp->bd.cbd_bufaddr,
  8113. + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8114. +
  8115. + fec_enet_receive(skb, bdp, ndev);
  8116. }
  8117. +static void
  8118. +fec_enet_receive_nocopy(unsigned pkt_len, unsigned index, union bufdesc_u *bdp,
  8119. + struct net_device *ndev)
  8120. +{
  8121. + struct fec_enet_private *fep = netdev_priv(ndev);
  8122. + struct sk_buff *skb, *skb_new;
  8123. + unsigned char *data;
  8124. + dma_addr_t addr;
  8125. +
  8126. + skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
  8127. + if (!skb_new) {
  8128. + ndev->stats.rx_dropped++;
  8129. + return;
  8130. + }
  8131. +
  8132. + addr = dma_map_single(&fep->pdev->dev, skb_new->data,
  8133. + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8134. + if (dma_mapping_error(&fep->pdev->dev, addr)) {
  8135. + dev_kfree_skb(skb_new);
  8136. + ndev->stats.rx_dropped++;
  8137. + return;
  8138. + }
  8139. -/* During a receive, the cur_rx points to the current incoming buffer.
  8140. + /* We have the new skb, so proceed to deal with the received data. */
  8141. + dma_unmap_single(&fep->pdev->dev, bdp->bd.cbd_bufaddr,
  8142. + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8143. +
  8144. + skb = fep->rx_skbuff[index];
  8145. +
  8146. + /* Now subsitute in the new skb */
  8147. + fep->rx_skbuff[index] = skb_new;
  8148. + bdp->bd.cbd_bufaddr = addr;
  8149. +
  8150. + /*
  8151. + * Update the skb length according to the raw packet length.
  8152. + * Then remove the two bytes of additional padding.
  8153. + */
  8154. + skb_put(skb, pkt_len);
  8155. + data = skb_pull_inline(skb, 2);
  8156. +
  8157. + if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
  8158. + swap_buffer(data, skb->len);
  8159. +
  8160. + /*
  8161. + * Now juggle things for the VLAN tag - if the hardware
  8162. + * flags this as present, we need to read the tag, and
  8163. + * then shuffle the ethernet addresses up.
  8164. + */
  8165. + if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
  8166. + bdp->ebd.cbd_esc & BD_ENET_RX_VLAN) {
  8167. + struct vlan_hdr *vlan = (struct vlan_hdr *)(data + ETH_HLEN);
  8168. +
  8169. + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  8170. + ntohs(vlan->h_vlan_TCI));
  8171. +
  8172. + memmove(data + VLAN_HLEN, data, 2 * ETH_ALEN);
  8173. + skb_pull_inline(skb, VLAN_HLEN);
  8174. + }
  8175. +
  8176. + fec_enet_receive(skb, bdp, ndev);
  8177. +}
  8178. +
  8179. +/* During a receive, the rx_next points to the current incoming buffer.
  8180. * When we update through the ring, if the next incoming buffer has
  8181. * not been given to the system, we just set the empty indicator,
  8182. * effectively tossing the packet.
  8183. @@ -852,18 +1109,9 @@
  8184. fec_enet_rx(struct net_device *ndev, int budget)
  8185. {
  8186. struct fec_enet_private *fep = netdev_priv(ndev);
  8187. - const struct platform_device_id *id_entry =
  8188. - platform_get_device_id(fep->pdev);
  8189. - struct bufdesc *bdp;
  8190. - unsigned short status;
  8191. - struct sk_buff *skb;
  8192. ushort pkt_len;
  8193. - __u8 *data;
  8194. int pkt_received = 0;
  8195. - struct bufdesc_ex *ebdp = NULL;
  8196. - bool vlan_packet_rcvd = false;
  8197. - u16 vlan_tag;
  8198. - int index = 0;
  8199. + unsigned index = fep->rx_next;
  8200. #ifdef CONFIG_M532x
  8201. flush_cache_all();
  8202. @@ -872,12 +1120,17 @@
  8203. /* First, grab all of the stats for the incoming packet.
  8204. * These get messed up if we get called due to a busy condition.
  8205. */
  8206. - bdp = fep->cur_rx;
  8207. + do {
  8208. + union bufdesc_u *bdp = fec_enet_rx_get(index, fep);
  8209. + unsigned status;
  8210. - while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
  8211. + status = bdp->bd.cbd_sc;
  8212. + if (status & BD_ENET_RX_EMPTY)
  8213. + break;
  8214. if (pkt_received >= budget)
  8215. break;
  8216. +
  8217. pkt_received++;
  8218. /* Since we have allocated space to hold a complete frame,
  8219. @@ -886,155 +1139,81 @@
  8220. if ((status & BD_ENET_RX_LAST) == 0)
  8221. netdev_err(ndev, "rcv is not +last\n");
  8222. - if (!fep->opened)
  8223. - goto rx_processing_done;
  8224. + writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
  8225. /* Check for errors. */
  8226. - if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
  8227. - BD_ENET_RX_CR | BD_ENET_RX_OV)) {
  8228. + if (status & BD_ENET_RX_ERROR) {
  8229. ndev->stats.rx_errors++;
  8230. - if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
  8231. - /* Frame too long or too short. */
  8232. - ndev->stats.rx_length_errors++;
  8233. - }
  8234. - if (status & BD_ENET_RX_NO) /* Frame alignment */
  8235. - ndev->stats.rx_frame_errors++;
  8236. - if (status & BD_ENET_RX_CR) /* CRC Error */
  8237. - ndev->stats.rx_crc_errors++;
  8238. - if (status & BD_ENET_RX_OV) /* FIFO overrun */
  8239. - ndev->stats.rx_fifo_errors++;
  8240. - }
  8241. - /* Report late collisions as a frame error.
  8242. - * On this error, the BD is closed, but we don't know what we
  8243. - * have in the buffer. So, just drop this frame on the floor.
  8244. - */
  8245. - if (status & BD_ENET_RX_CL) {
  8246. - ndev->stats.rx_errors++;
  8247. - ndev->stats.rx_frame_errors++;
  8248. + /*
  8249. + * Report late collisions as a frame error. On this
  8250. + * error, the BD is closed, but we don't know what we
  8251. + * have in the buffer. So, just drop this frame on
  8252. + * the floor.
  8253. + */
  8254. + if (status & BD_ENET_RX_CL) {
  8255. + ndev->stats.rx_frame_errors++;
  8256. + } else {
  8257. + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH))
  8258. + /* Frame too long or too short. */
  8259. + ndev->stats.rx_length_errors++;
  8260. + if (status & BD_ENET_RX_NO) /* Frame alignment */
  8261. + ndev->stats.rx_frame_errors++;
  8262. + if (status & BD_ENET_RX_CR) /* CRC Error */
  8263. + ndev->stats.rx_crc_errors++;
  8264. + if (status & BD_ENET_RX_OV) /* FIFO overrun */
  8265. + ndev->stats.rx_fifo_errors++;
  8266. + }
  8267. goto rx_processing_done;
  8268. }
  8269. /* Process the incoming frame. */
  8270. ndev->stats.rx_packets++;
  8271. - pkt_len = bdp->cbd_datlen;
  8272. - ndev->stats.rx_bytes += pkt_len;
  8273. -
  8274. - if (fep->bufdesc_ex)
  8275. - index = (struct bufdesc_ex *)bdp -
  8276. - (struct bufdesc_ex *)fep->rx_bd_base;
  8277. - else
  8278. - index = bdp - fep->rx_bd_base;
  8279. - data = fep->rx_skbuff[index]->data;
  8280. - dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
  8281. - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8282. -
  8283. - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
  8284. - swap_buffer(data, pkt_len);
  8285. - /* Extract the enhanced buffer descriptor */
  8286. - ebdp = NULL;
  8287. - if (fep->bufdesc_ex)
  8288. - ebdp = (struct bufdesc_ex *)bdp;
  8289. -
  8290. - /* If this is a VLAN packet remove the VLAN Tag */
  8291. - vlan_packet_rcvd = false;
  8292. - if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  8293. - fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
  8294. - /* Push and remove the vlan tag */
  8295. - struct vlan_hdr *vlan_header =
  8296. - (struct vlan_hdr *) (data + ETH_HLEN);
  8297. - vlan_tag = ntohs(vlan_header->h_vlan_TCI);
  8298. - pkt_len -= VLAN_HLEN;
  8299. -
  8300. - vlan_packet_rcvd = true;
  8301. - }
  8302. -
  8303. - /* This does 16 byte alignment, exactly what we need.
  8304. - * The packet length includes FCS, but we don't want to
  8305. - * include that when passing upstream as it messes up
  8306. - * bridging applications.
  8307. + /*
  8308. + * The packet length includes FCS, but we don't want
  8309. + * to include that when passing upstream as it messes
  8310. + * up bridging applications.
  8311. */
  8312. - skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
  8313. + pkt_len = bdp->bd.cbd_datlen - 4;
  8314. + ndev->stats.rx_bytes += pkt_len;
  8315. - if (unlikely(!skb)) {
  8316. - ndev->stats.rx_dropped++;
  8317. + if (fec_enet_rx_zerocopy(fep, pkt_len)) {
  8318. + fec_enet_receive_nocopy(pkt_len, index, bdp, ndev);
  8319. } else {
  8320. - int payload_offset = (2 * ETH_ALEN);
  8321. - skb_reserve(skb, NET_IP_ALIGN);
  8322. - skb_put(skb, pkt_len - 4); /* Make room */
  8323. -
  8324. - /* Extract the frame data without the VLAN header. */
  8325. - skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN));
  8326. - if (vlan_packet_rcvd)
  8327. - payload_offset = (2 * ETH_ALEN) + VLAN_HLEN;
  8328. - skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN),
  8329. - data + payload_offset,
  8330. - pkt_len - 4 - (2 * ETH_ALEN));
  8331. -
  8332. - skb->protocol = eth_type_trans(skb, ndev);
  8333. -
  8334. - /* Get receive timestamp from the skb */
  8335. - if (fep->hwts_rx_en && fep->bufdesc_ex) {
  8336. - struct skb_shared_hwtstamps *shhwtstamps =
  8337. - skb_hwtstamps(skb);
  8338. - unsigned long flags;
  8339. -
  8340. - memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  8341. -
  8342. - spin_lock_irqsave(&fep->tmreg_lock, flags);
  8343. - shhwtstamps->hwtstamp = ns_to_ktime(
  8344. - timecounter_cyc2time(&fep->tc, ebdp->ts));
  8345. - spin_unlock_irqrestore(&fep->tmreg_lock, flags);
  8346. - }
  8347. -
  8348. - if (fep->bufdesc_ex &&
  8349. - (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
  8350. - if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
  8351. - /* don't check it */
  8352. - skb->ip_summed = CHECKSUM_UNNECESSARY;
  8353. - } else {
  8354. - skb_checksum_none_assert(skb);
  8355. - }
  8356. - }
  8357. -
  8358. - /* Handle received VLAN packets */
  8359. - if (vlan_packet_rcvd)
  8360. - __vlan_hwaccel_put_tag(skb,
  8361. - htons(ETH_P_8021Q),
  8362. - vlan_tag);
  8363. -
  8364. - napi_gro_receive(&fep->napi, skb);
  8365. + fec_enet_receive_copy(pkt_len, index, bdp, ndev);
  8366. }
  8367. - dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
  8368. - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8369. rx_processing_done:
  8370. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  8371. + bdp->ebd.cbd_esc = BD_ENET_RX_INT;
  8372. + bdp->ebd.cbd_prot = 0;
  8373. + bdp->ebd.cbd_bdu = 0;
  8374. + }
  8375. +
  8376. + /*
  8377. + * Ensure that the previous writes have completed before
  8378. + * the status update becomes visible.
  8379. + */
  8380. + wmb();
  8381. +
  8382. /* Clear the status flags for this buffer */
  8383. status &= ~BD_ENET_RX_STATS;
  8384. /* Mark the buffer empty */
  8385. status |= BD_ENET_RX_EMPTY;
  8386. - bdp->cbd_sc = status;
  8387. -
  8388. - if (fep->bufdesc_ex) {
  8389. - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
  8390. -
  8391. - ebdp->cbd_esc = BD_ENET_RX_INT;
  8392. - ebdp->cbd_prot = 0;
  8393. - ebdp->cbd_bdu = 0;
  8394. - }
  8395. -
  8396. - /* Update BD pointer to next entry */
  8397. - bdp = fec_enet_get_nextdesc(bdp, fep);
  8398. + bdp->bd.cbd_sc = status;
  8399. /* Doing this here will keep the FEC running while we process
  8400. * incoming frames. On a heavily loaded network, we should be
  8401. * able to keep up at the expense of system resources.
  8402. */
  8403. writel(0, fep->hwp + FEC_R_DES_ACTIVE);
  8404. - }
  8405. - fep->cur_rx = bdp;
  8406. +
  8407. + if (++index >= fep->rx_ring_size)
  8408. + index = 0;
  8409. + } while (1);
  8410. + fep->rx_next = index;
  8411. return pkt_received;
  8412. }
  8413. @@ -1044,29 +1223,25 @@
  8414. {
  8415. struct net_device *ndev = dev_id;
  8416. struct fec_enet_private *fep = netdev_priv(ndev);
  8417. + const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF;
  8418. uint int_events;
  8419. irqreturn_t ret = IRQ_NONE;
  8420. - do {
  8421. - int_events = readl(fep->hwp + FEC_IEVENT);
  8422. - writel(int_events, fep->hwp + FEC_IEVENT);
  8423. + int_events = readl(fep->hwp + FEC_IEVENT);
  8424. + writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
  8425. - if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
  8426. - ret = IRQ_HANDLED;
  8427. + if (int_events & napi_mask) {
  8428. + ret = IRQ_HANDLED;
  8429. - /* Disable the RX interrupt */
  8430. - if (napi_schedule_prep(&fep->napi)) {
  8431. - writel(FEC_RX_DISABLED_IMASK,
  8432. - fep->hwp + FEC_IMASK);
  8433. - __napi_schedule(&fep->napi);
  8434. - }
  8435. - }
  8436. + /* Disable the NAPI interrupts */
  8437. + writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
  8438. + napi_schedule(&fep->napi);
  8439. + }
  8440. - if (int_events & FEC_ENET_MII) {
  8441. - ret = IRQ_HANDLED;
  8442. - complete(&fep->mdio_done);
  8443. - }
  8444. - } while (int_events);
  8445. + if (int_events & FEC_ENET_MII) {
  8446. + ret = IRQ_HANDLED;
  8447. + complete(&fep->mdio_done);
  8448. + }
  8449. return ret;
  8450. }
  8451. @@ -1074,8 +1249,16 @@
  8452. static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
  8453. {
  8454. struct net_device *ndev = napi->dev;
  8455. - int pkts = fec_enet_rx(ndev, budget);
  8456. struct fec_enet_private *fep = netdev_priv(ndev);
  8457. + int pkts;
  8458. +
  8459. + /*
  8460. + * Clear any pending transmit or receive interrupts before
  8461. + * processing the rings to avoid racing with the hardware.
  8462. + */
  8463. + writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT);
  8464. +
  8465. + pkts = fec_enet_rx(ndev, budget);
  8466. fec_enet_tx(ndev);
  8467. @@ -1173,26 +1356,78 @@
  8468. return;
  8469. }
  8470. - if (phy_dev->link) {
  8471. + /*
  8472. + * If the netdev is down, or is going down, we're not interested
  8473. + * in link state events, so just mark our idea of the link as down
  8474. + * and ignore the event.
  8475. + */
  8476. + if (!netif_running(ndev) || !netif_device_present(ndev)) {
  8477. + fep->link = 0;
  8478. + } else if (phy_dev->link) {
  8479. if (!fep->link) {
  8480. fep->link = phy_dev->link;
  8481. status_change = 1;
  8482. }
  8483. - if (fep->full_duplex != phy_dev->duplex)
  8484. + if (fep->full_duplex != phy_dev->duplex) {
  8485. + fep->full_duplex = phy_dev->duplex;
  8486. status_change = 1;
  8487. + }
  8488. if (phy_dev->speed != fep->speed) {
  8489. fep->speed = phy_dev->speed;
  8490. status_change = 1;
  8491. }
  8492. + if (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) {
  8493. + u32 lcl_adv = phy_dev->advertising;
  8494. + u32 rmt_adv = phy_dev->lp_advertising;
  8495. + unsigned mode = 0;
  8496. +
  8497. + if (lcl_adv & rmt_adv & ADVERTISED_Pause) {
  8498. + /*
  8499. + * Local Device Link Partner
  8500. + * Pause AsymDir Pause AsymDir Result
  8501. + * 1 X 1 X TX+RX
  8502. + */
  8503. + mode = FEC_PAUSE_FLAG_TX | FEC_PAUSE_FLAG_RX;
  8504. + } else if (lcl_adv & rmt_adv & ADVERTISED_Asym_Pause) {
  8505. + /*
  8506. + * 0 1 1 1 RX
  8507. + * 1 1 0 1 TX
  8508. + */
  8509. + if (rmt_adv & ADVERTISED_Pause)
  8510. + mode = FEC_PAUSE_FLAG_RX;
  8511. + else
  8512. + mode = FEC_PAUSE_FLAG_TX;
  8513. + }
  8514. +
  8515. + if (mode != fep->pause_mode) {
  8516. + fep->pause_mode = mode;
  8517. + status_change = 1;
  8518. + }
  8519. + }
  8520. +
  8521. /* if any of the above changed restart the FEC */
  8522. - if (status_change)
  8523. - fec_restart(ndev, phy_dev->duplex);
  8524. + if (status_change) {
  8525. + mutex_lock(&fep->mutex);
  8526. + napi_disable(&fep->napi);
  8527. + netif_tx_lock_bh(ndev);
  8528. + fec_restart(ndev);
  8529. + netif_wake_queue(ndev);
  8530. + netif_tx_unlock_bh(ndev);
  8531. + napi_enable(&fep->napi);
  8532. + mutex_unlock(&fep->mutex);
  8533. + }
  8534. } else {
  8535. if (fep->link) {
  8536. + mutex_lock(&fep->mutex);
  8537. + napi_disable(&fep->napi);
  8538. + netif_tx_lock_bh(ndev);
  8539. fec_stop(ndev);
  8540. + netif_tx_unlock_bh(ndev);
  8541. + napi_enable(&fep->napi);
  8542. + mutex_unlock(&fep->mutex);
  8543. fep->link = phy_dev->link;
  8544. status_change = 1;
  8545. }
  8546. @@ -1202,23 +1437,35 @@
  8547. phy_print_status(phy_dev);
  8548. }
  8549. -static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  8550. +static unsigned long fec_enet_mdio_op(struct fec_enet_private *fep,
  8551. + unsigned data)
  8552. {
  8553. - struct fec_enet_private *fep = bus->priv;
  8554. unsigned long time_left;
  8555. fep->mii_timeout = 0;
  8556. init_completion(&fep->mdio_done);
  8557. - /* start a read op */
  8558. - writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
  8559. - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
  8560. - FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
  8561. + mutex_lock(&fep->mutex);
  8562. +
  8563. + /* start operation */
  8564. + writel(data, fep->hwp + FEC_MII_DATA);
  8565. /* wait for end of transfer */
  8566. time_left = wait_for_completion_timeout(&fep->mdio_done,
  8567. usecs_to_jiffies(FEC_MII_TIMEOUT));
  8568. - if (time_left == 0) {
  8569. +
  8570. + mutex_unlock(&fep->mutex);
  8571. +
  8572. + return time_left;
  8573. +}
  8574. +
  8575. +static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  8576. +{
  8577. + struct fec_enet_private *fep = bus->priv;
  8578. +
  8579. + if (fec_enet_mdio_op(fep, FEC_MMFR_ST | FEC_MMFR_OP_READ |
  8580. + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
  8581. + FEC_MMFR_TA) == 0) {
  8582. fep->mii_timeout = 1;
  8583. netdev_err(fep->netdev, "MDIO read timeout\n");
  8584. return -ETIMEDOUT;
  8585. @@ -1232,21 +1479,10 @@
  8586. u16 value)
  8587. {
  8588. struct fec_enet_private *fep = bus->priv;
  8589. - unsigned long time_left;
  8590. -
  8591. - fep->mii_timeout = 0;
  8592. - init_completion(&fep->mdio_done);
  8593. -
  8594. - /* start a write op */
  8595. - writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
  8596. - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
  8597. - FEC_MMFR_TA | FEC_MMFR_DATA(value),
  8598. - fep->hwp + FEC_MII_DATA);
  8599. - /* wait for end of transfer */
  8600. - time_left = wait_for_completion_timeout(&fep->mdio_done,
  8601. - usecs_to_jiffies(FEC_MII_TIMEOUT));
  8602. - if (time_left == 0) {
  8603. + if (fec_enet_mdio_op(fep, FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
  8604. + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
  8605. + FEC_MMFR_TA | FEC_MMFR_DATA(value)) == 0) {
  8606. fep->mii_timeout = 1;
  8607. netdev_err(fep->netdev, "MDIO write timeout\n");
  8608. return -ETIMEDOUT;
  8609. @@ -1255,11 +1491,37 @@
  8610. return 0;
  8611. }
  8612. +static void fec_enet_phy_config(struct net_device *ndev)
  8613. +{
  8614. +#ifndef CONFIG_M5272
  8615. + struct fec_enet_private *fep = netdev_priv(ndev);
  8616. + struct phy_device *phy = fep->phy_dev;
  8617. + unsigned pause = 0;
  8618. +
  8619. + /*
  8620. + * Pause advertisment logic is weird. We don't advertise the raw
  8621. + * "can tx" and "can rx" modes, but instead it is whether we support
  8622. + * symmetric flow or asymmetric flow.
  8623. + *
  8624. + * Symmetric flow means we can only support both transmit and receive
  8625. + * flow control frames together. Asymmetric flow means we can
  8626. + * independently control each. Note that there is no bit encoding
  8627. + * for "I can only receive flow control frames."
  8628. + */
  8629. + if (fep->pause_flag & FEC_PAUSE_FLAG_RX)
  8630. + pause |= ADVERTISED_Asym_Pause | ADVERTISED_Pause;
  8631. + if (fep->pause_flag & FEC_PAUSE_FLAG_TX)
  8632. + pause |= ADVERTISED_Asym_Pause;
  8633. +
  8634. + pause &= phy->supported;
  8635. + pause |= phy->advertising & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
  8636. + phy->advertising = pause;
  8637. +#endif
  8638. +}
  8639. +
  8640. static int fec_enet_mii_probe(struct net_device *ndev)
  8641. {
  8642. struct fec_enet_private *fep = netdev_priv(ndev);
  8643. - const struct platform_device_id *id_entry =
  8644. - platform_get_device_id(fep->pdev);
  8645. struct phy_device *phy_dev = NULL;
  8646. char mdio_bus_id[MII_BUS_ID_SIZE];
  8647. char phy_name[MII_BUS_ID_SIZE + 3];
  8648. @@ -1297,10 +1559,11 @@
  8649. }
  8650. /* mask with MAC supported features */
  8651. - if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
  8652. + if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
  8653. phy_dev->supported &= PHY_GBIT_FEATURES;
  8654. + phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
  8655. #if !defined(CONFIG_M5272)
  8656. - phy_dev->supported |= SUPPORTED_Pause;
  8657. + phy_dev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  8658. #endif
  8659. }
  8660. else
  8661. @@ -1312,6 +1575,8 @@
  8662. fep->link = 0;
  8663. fep->full_duplex = 0;
  8664. + fec_enet_phy_config(ndev);
  8665. +
  8666. netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
  8667. fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
  8668. fep->phy_dev->irq);
  8669. @@ -1324,8 +1589,6 @@
  8670. static struct mii_bus *fec0_mii_bus;
  8671. struct net_device *ndev = platform_get_drvdata(pdev);
  8672. struct fec_enet_private *fep = netdev_priv(ndev);
  8673. - const struct platform_device_id *id_entry =
  8674. - platform_get_device_id(fep->pdev);
  8675. int err = -ENXIO, i;
  8676. /*
  8677. @@ -1344,7 +1607,7 @@
  8678. * mdio interface in board design, and need to be configured by
  8679. * fec0 mii_bus.
  8680. */
  8681. - if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
  8682. + if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
  8683. /* fec1 uses fec0 mii_bus */
  8684. if (mii_cnt && fec0_mii_bus) {
  8685. fep->mii_bus = fec0_mii_bus;
  8686. @@ -1365,7 +1628,7 @@
  8687. * document.
  8688. */
  8689. fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
  8690. - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
  8691. + if (fep->quirks & FEC_QUIRK_ENET_MAC)
  8692. fep->phy_speed--;
  8693. fep->phy_speed <<= 1;
  8694. writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
  8695. @@ -1399,7 +1662,7 @@
  8696. mii_cnt++;
  8697. /* save fec0 mii_bus */
  8698. - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
  8699. + if (fep->quirks & FEC_QUIRK_ENET_MAC)
  8700. fec0_mii_bus = fep->mii_bus;
  8701. return 0;
  8702. @@ -1461,7 +1724,7 @@
  8703. {
  8704. struct fec_enet_private *fep = netdev_priv(ndev);
  8705. - if (fep->bufdesc_ex) {
  8706. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  8707. info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
  8708. SOF_TIMESTAMPING_RX_SOFTWARE |
  8709. @@ -1485,6 +1748,51 @@
  8710. }
  8711. }
  8712. +static void fec_enet_get_ringparam(struct net_device *ndev,
  8713. + struct ethtool_ringparam *ring)
  8714. +{
  8715. + struct fec_enet_private *fep = netdev_priv(ndev);
  8716. +
  8717. + ring->rx_max_pending = RX_RING_SIZE;
  8718. + ring->tx_max_pending = TX_RING_SIZE;
  8719. + ring->rx_pending = fep->rx_ring_size;
  8720. + ring->tx_pending = fep->tx_ring_size;
  8721. +}
  8722. +
  8723. +static int fec_enet_set_ringparam(struct net_device *ndev,
  8724. + struct ethtool_ringparam *ring)
  8725. +{
  8726. + struct fec_enet_private *fep = netdev_priv(ndev);
  8727. + unsigned rx, tx, tx_min;
  8728. +
  8729. + tx_min = ndev->features & NETIF_F_SG ? TX_RING_SIZE_MIN_SG : 16;
  8730. +
  8731. + rx = clamp_t(u32, ring->rx_pending, 16, RX_RING_SIZE);
  8732. + tx = clamp_t(u32, ring->tx_pending, tx_min, TX_RING_SIZE);
  8733. +
  8734. + if (tx == fep->tx_ring_size && rx == fep->rx_ring_size)
  8735. + return 0;
  8736. +
  8737. + /* Setting the ring size while the interface is down is easy */
  8738. + if (!netif_running(ndev)) {
  8739. + fep->tx_ring_size = tx;
  8740. + fep->rx_ring_size = rx;
  8741. + } else {
  8742. + return -EINVAL;
  8743. +
  8744. + napi_disable(&fep->napi);
  8745. + netif_tx_lock_bh(ndev);
  8746. + fec_stop(ndev);
  8747. + /* reallocate ring */
  8748. + fec_restart(ndev);
  8749. + netif_wake_queue(ndev);
  8750. + netif_tx_unlock_bh(ndev);
  8751. + napi_enable(&fep->napi);
  8752. + }
  8753. +
  8754. + return 0;
  8755. +}
  8756. +
  8757. #if !defined(CONFIG_M5272)
  8758. static void fec_enet_get_pauseparam(struct net_device *ndev,
  8759. @@ -1493,42 +1801,81 @@
  8760. struct fec_enet_private *fep = netdev_priv(ndev);
  8761. pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
  8762. - pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
  8763. - pause->rx_pause = pause->tx_pause;
  8764. + pause->rx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_RX) != 0;
  8765. + pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_TX) != 0;
  8766. }
  8767. static int fec_enet_set_pauseparam(struct net_device *ndev,
  8768. struct ethtool_pauseparam *pause)
  8769. {
  8770. struct fec_enet_private *fep = netdev_priv(ndev);
  8771. + unsigned pause_flag, changed;
  8772. + struct phy_device *phy = fep->phy_dev;
  8773. - if (pause->tx_pause != pause->rx_pause) {
  8774. - netdev_info(ndev,
  8775. - "hardware only support enable/disable both tx and rx");
  8776. + if (!phy)
  8777. + return -ENODEV;
  8778. + if (!(phy->supported & SUPPORTED_Pause))
  8779. + return -EINVAL;
  8780. + if (!(phy->supported & SUPPORTED_Asym_Pause) &&
  8781. + pause->rx_pause != pause->tx_pause)
  8782. return -EINVAL;
  8783. - }
  8784. - fep->pause_flag = 0;
  8785. + pause_flag = 0;
  8786. + if (pause->autoneg)
  8787. + pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
  8788. + if (pause->rx_pause)
  8789. + pause_flag |= FEC_PAUSE_FLAG_RX;
  8790. + if (pause->tx_pause)
  8791. + pause_flag |= FEC_PAUSE_FLAG_TX;
  8792. +
  8793. + changed = fep->pause_flag ^ pause_flag;
  8794. + fep->pause_flag = pause_flag;
  8795. +
  8796. + /* configure the phy advertisment according to our new options */
  8797. + fec_enet_phy_config(ndev);
  8798. +
  8799. + if (changed) {
  8800. + if (pause_flag & FEC_PAUSE_FLAG_AUTONEG) {
  8801. + if (netif_running(ndev))
  8802. + phy_start_aneg(fep->phy_dev);
  8803. + } else {
  8804. + int adv, old_adv;
  8805. - /* tx pause must be same as rx pause */
  8806. - fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
  8807. - fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
  8808. -
  8809. - if (pause->rx_pause || pause->autoneg) {
  8810. - fep->phy_dev->supported |= ADVERTISED_Pause;
  8811. - fep->phy_dev->advertising |= ADVERTISED_Pause;
  8812. - } else {
  8813. - fep->phy_dev->supported &= ~ADVERTISED_Pause;
  8814. - fep->phy_dev->advertising &= ~ADVERTISED_Pause;
  8815. - }
  8816. + /*
  8817. + * Even if we are not in autonegotiate mode, we
  8818. + * still update the phy with our capabilities so
  8819. + * our link parter can make the appropriate
  8820. + * decision. PHYLIB provides no way to do this.
  8821. + */
  8822. + adv = phy_read(phy, MII_ADVERTISE);
  8823. + if (adv >= 0) {
  8824. + old_adv = adv;
  8825. + adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  8826. + if (phy->advertising & ADVERTISED_Pause)
  8827. + adv |= ADVERTISE_PAUSE_CAP;
  8828. + if (phy->advertising & ADVERTISED_Asym_Pause)
  8829. + adv |= ADVERTISE_PAUSE_ASYM;
  8830. - if (pause->autoneg) {
  8831. - if (netif_running(ndev))
  8832. - fec_stop(ndev);
  8833. - phy_start_aneg(fep->phy_dev);
  8834. + if (old_adv != adv)
  8835. + phy_write(phy, MII_ADVERTISE, adv);
  8836. + }
  8837. +
  8838. + /* Forced pause mode */
  8839. + fep->pause_mode = fep->pause_flag;
  8840. +
  8841. + if (netif_running(ndev)) {
  8842. + mutex_lock(&fep->mutex);
  8843. + napi_disable(&fep->napi);
  8844. + netif_tx_lock_bh(ndev);
  8845. + fec_stop(ndev);
  8846. + fec_restart(ndev);
  8847. + netif_wake_queue(ndev);
  8848. + netif_tx_unlock_bh(ndev);
  8849. + napi_enable(&fep->napi);
  8850. + mutex_unlock(&fep->mutex);
  8851. + }
  8852. + }
  8853. }
  8854. - if (netif_running(ndev))
  8855. - fec_restart(ndev, 0);
  8856. return 0;
  8857. }
  8858. @@ -1645,21 +1992,21 @@
  8859. }
  8860. static const struct ethtool_ops fec_enet_ethtool_ops = {
  8861. -#if !defined(CONFIG_M5272)
  8862. - .get_pauseparam = fec_enet_get_pauseparam,
  8863. - .set_pauseparam = fec_enet_set_pauseparam,
  8864. -#endif
  8865. .get_settings = fec_enet_get_settings,
  8866. .set_settings = fec_enet_set_settings,
  8867. .get_drvinfo = fec_enet_get_drvinfo,
  8868. - .get_link = ethtool_op_get_link,
  8869. - .get_ts_info = fec_enet_get_ts_info,
  8870. .nway_reset = fec_enet_nway_reset,
  8871. + .get_link = ethtool_op_get_link,
  8872. + .get_ringparam = fec_enet_get_ringparam,
  8873. + .set_ringparam = fec_enet_set_ringparam,
  8874. #ifndef CONFIG_M5272
  8875. - .get_ethtool_stats = fec_enet_get_ethtool_stats,
  8876. + .get_pauseparam = fec_enet_get_pauseparam,
  8877. + .set_pauseparam = fec_enet_set_pauseparam,
  8878. .get_strings = fec_enet_get_strings,
  8879. + .get_ethtool_stats = fec_enet_get_ethtool_stats,
  8880. .get_sset_count = fec_enet_get_sset_count,
  8881. #endif
  8882. + .get_ts_info = fec_enet_get_ts_info,
  8883. };
  8884. static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
  8885. @@ -1673,7 +2020,7 @@
  8886. if (!phydev)
  8887. return -ENODEV;
  8888. - if (fep->bufdesc_ex) {
  8889. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  8890. if (cmd == SIOCSHWTSTAMP)
  8891. return fec_ptp_set(ndev, rq);
  8892. if (cmd == SIOCGHWTSTAMP)
  8893. @@ -1688,23 +2035,33 @@
  8894. struct fec_enet_private *fep = netdev_priv(ndev);
  8895. unsigned int i;
  8896. struct sk_buff *skb;
  8897. - struct bufdesc *bdp;
  8898. + union bufdesc_u *bdp;
  8899. - bdp = fep->rx_bd_base;
  8900. for (i = 0; i < fep->rx_ring_size; i++) {
  8901. - skb = fep->rx_skbuff[i];
  8902. + bdp = fec_enet_rx_get(i, fep);
  8903. - if (bdp->cbd_bufaddr)
  8904. - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
  8905. + skb = fep->rx_skbuff[i];
  8906. + fep->rx_skbuff[i] = NULL;
  8907. + if (skb) {
  8908. + dma_unmap_single(&fep->pdev->dev, bdp->bd.cbd_bufaddr,
  8909. FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8910. - if (skb)
  8911. dev_kfree_skb(skb);
  8912. - bdp = fec_enet_get_nextdesc(bdp, fep);
  8913. + }
  8914. }
  8915. - bdp = fep->tx_bd_base;
  8916. - for (i = 0; i < fep->tx_ring_size; i++)
  8917. + for (i = 0; i < fep->tx_ring_size; i++) {
  8918. + bdp = fec_enet_tx_get(i, fep);
  8919. + if (bdp->bd.cbd_bufaddr)
  8920. + fec_enet_tx_unmap(i, bdp, fep);
  8921. kfree(fep->tx_bounce[i]);
  8922. + fep->tx_bounce[i] = NULL;
  8923. + skb = fep->tx_skbuff[i];
  8924. + fep->tx_skbuff[i] = NULL;
  8925. + if (skb)
  8926. + dev_kfree_skb(skb);
  8927. + }
  8928. +
  8929. + dma_free_coherent(NULL, PAGE_SIZE, fep->rx_bd_base, fep->rx_bd_dma);
  8930. }
  8931. static int fec_enet_alloc_buffers(struct net_device *ndev)
  8932. @@ -1712,59 +2069,82 @@
  8933. struct fec_enet_private *fep = netdev_priv(ndev);
  8934. unsigned int i;
  8935. struct sk_buff *skb;
  8936. - struct bufdesc *bdp;
  8937. + union bufdesc_u *bdp;
  8938. + union bufdesc_u *rx_cbd_cpu, *tx_cbd_cpu;
  8939. + dma_addr_t rx_cbd_dma, tx_cbd_dma;
  8940. +
  8941. + /* Allocate memory for buffer descriptors. */
  8942. + rx_cbd_cpu = dma_alloc_coherent(NULL, PAGE_SIZE, &rx_cbd_dma,
  8943. + GFP_KERNEL);
  8944. + tx_cbd_cpu = dma_alloc_coherent(NULL, PAGE_SIZE, &tx_cbd_dma,
  8945. + GFP_KERNEL);
  8946. + if (!rx_cbd_cpu || !tx_cbd_cpu) {
  8947. + if (rx_cbd_cpu)
  8948. + dma_free_coherent(NULL, PAGE_SIZE, rx_cbd_cpu, rx_cbd_dma);
  8949. + if (tx_cbd_cpu)
  8950. + dma_free_coherent(NULL, PAGE_SIZE, tx_cbd_cpu, tx_cbd_dma);
  8951. + return -ENOMEM;
  8952. + }
  8953. +
  8954. + memset(rx_cbd_cpu, 0, PAGE_SIZE);
  8955. + memset(tx_cbd_cpu, 0, PAGE_SIZE);
  8956. +
  8957. + /* Set receive and transmit descriptor base. */
  8958. + fep->rx_bd_base = rx_cbd_cpu;
  8959. + fep->rx_bd_dma = rx_cbd_dma;
  8960. + fep->tx_bd_base = tx_cbd_cpu;
  8961. + fep->tx_bd_dma = tx_cbd_dma;
  8962. - bdp = fep->rx_bd_base;
  8963. for (i = 0; i < fep->rx_ring_size; i++) {
  8964. + dma_addr_t addr;
  8965. +
  8966. skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
  8967. - if (!skb) {
  8968. - fec_enet_free_buffers(ndev);
  8969. - return -ENOMEM;
  8970. - }
  8971. - fep->rx_skbuff[i] = skb;
  8972. + if (!skb)
  8973. + goto err_alloc;
  8974. - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
  8975. + addr = dma_map_single(&fep->pdev->dev, skb->data,
  8976. FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8977. - if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
  8978. - fec_enet_free_buffers(ndev);
  8979. + if (dma_mapping_error(&fep->pdev->dev, addr)) {
  8980. + dev_kfree_skb(skb);
  8981. if (net_ratelimit())
  8982. netdev_err(ndev, "Rx DMA memory map failed\n");
  8983. - return -ENOMEM;
  8984. + goto err_alloc;
  8985. }
  8986. - bdp->cbd_sc = BD_ENET_RX_EMPTY;
  8987. - if (fep->bufdesc_ex) {
  8988. - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
  8989. - ebdp->cbd_esc = BD_ENET_RX_INT;
  8990. - }
  8991. + fep->rx_skbuff[i] = skb;
  8992. + bdp = fec_enet_rx_get(i, fep);
  8993. + bdp->bd.cbd_bufaddr = addr;
  8994. + bdp->bd.cbd_sc = BD_ENET_RX_EMPTY;
  8995. + /* Set the last buffer to wrap. */
  8996. + if (i == fep->rx_ring_size - 1)
  8997. + bdp->bd.cbd_sc |= BD_SC_WRAP;
  8998. - bdp = fec_enet_get_nextdesc(bdp, fep);
  8999. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  9000. + bdp->ebd.cbd_esc = BD_ENET_RX_INT;
  9001. }
  9002. - /* Set the last buffer to wrap. */
  9003. - bdp = fec_enet_get_prevdesc(bdp, fep);
  9004. - bdp->cbd_sc |= BD_SC_WRAP;
  9005. -
  9006. - bdp = fep->tx_bd_base;
  9007. for (i = 0; i < fep->tx_ring_size; i++) {
  9008. + bdp = fec_enet_tx_get(i, fep);
  9009. fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
  9010. + if (!fep->tx_bounce[i])
  9011. + goto err_alloc;
  9012. - bdp->cbd_sc = 0;
  9013. - bdp->cbd_bufaddr = 0;
  9014. -
  9015. - if (fep->bufdesc_ex) {
  9016. - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
  9017. - ebdp->cbd_esc = BD_ENET_TX_INT;
  9018. - }
  9019. + /* Set the last buffer to wrap. */
  9020. + if (i == fep->tx_ring_size - 1)
  9021. + bdp->bd.cbd_sc = BD_SC_WRAP;
  9022. + else
  9023. + bdp->bd.cbd_sc = 0;
  9024. + bdp->bd.cbd_bufaddr = 0;
  9025. - bdp = fec_enet_get_nextdesc(bdp, fep);
  9026. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  9027. + bdp->ebd.cbd_esc = BD_ENET_TX_INT;
  9028. }
  9029. - /* Set the last buffer to wrap. */
  9030. - bdp = fec_enet_get_prevdesc(bdp, fep);
  9031. - bdp->cbd_sc |= BD_SC_WRAP;
  9032. -
  9033. return 0;
  9034. +
  9035. + err_alloc:
  9036. + fec_enet_free_buffers(ndev);
  9037. + return -ENOMEM;
  9038. }
  9039. static int
  9040. @@ -1788,10 +2168,12 @@
  9041. return ret;
  9042. }
  9043. + mutex_lock(&fep->mutex);
  9044. + fec_restart(ndev);
  9045. + mutex_unlock(&fep->mutex);
  9046. napi_enable(&fep->napi);
  9047. phy_start(fep->phy_dev);
  9048. netif_start_queue(ndev);
  9049. - fep->opened = 1;
  9050. return 0;
  9051. }
  9052. @@ -1800,17 +2182,19 @@
  9053. {
  9054. struct fec_enet_private *fep = netdev_priv(ndev);
  9055. - /* Don't know what to do yet. */
  9056. - napi_disable(&fep->napi);
  9057. - fep->opened = 0;
  9058. - netif_stop_queue(ndev);
  9059. - fec_stop(ndev);
  9060. + phy_stop(fep->phy_dev);
  9061. - if (fep->phy_dev) {
  9062. - phy_stop(fep->phy_dev);
  9063. - phy_disconnect(fep->phy_dev);
  9064. + if (netif_device_present(ndev)) {
  9065. + napi_disable(&fep->napi);
  9066. + netif_tx_disable(ndev);
  9067. + mutex_lock(&fep->mutex);
  9068. + fec_stop(ndev);
  9069. + mutex_unlock(&fep->mutex);
  9070. }
  9071. + phy_disconnect(fep->phy_dev);
  9072. + fep->phy_dev = NULL;
  9073. +
  9074. fec_enet_free_buffers(ndev);
  9075. return 0;
  9076. @@ -1935,28 +2319,67 @@
  9077. }
  9078. #endif
  9079. +static netdev_features_t fec_fix_features(struct net_device *ndev,
  9080. + netdev_features_t features)
  9081. +{
  9082. + struct fec_enet_private *fep = netdev_priv(ndev);
  9083. +
  9084. + /*
  9085. + * NETIF_F_SG requires a minimum transmit ring size. If we
  9086. + * have less than this size, we can't support this feature.
  9087. + */
  9088. + if (fep->tx_ring_size < TX_RING_SIZE_MIN_SG)
  9089. + features &= ~NETIF_F_SG;
  9090. +
  9091. + return features;
  9092. +}
  9093. +
  9094. +#define FEATURES_NEED_QUIESCE (NETIF_F_RXCSUM | NETIF_F_SG)
  9095. +
  9096. static int fec_set_features(struct net_device *netdev,
  9097. netdev_features_t features)
  9098. {
  9099. struct fec_enet_private *fep = netdev_priv(netdev);
  9100. netdev_features_t changed = features ^ netdev->features;
  9101. + /* Quiesce the device if necessary */
  9102. + if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
  9103. + mutex_lock(&fep->mutex);
  9104. + napi_disable(&fep->napi);
  9105. + netif_tx_lock_bh(netdev);
  9106. + fec_stop(netdev);
  9107. + }
  9108. +
  9109. netdev->features = features;
  9110. /* Receive checksum has been changed */
  9111. if (changed & NETIF_F_RXCSUM) {
  9112. if (features & NETIF_F_RXCSUM)
  9113. - fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
  9114. + fep->flags |= FEC_FLAG_RX_CSUM;
  9115. else
  9116. - fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
  9117. + fep->flags &= ~FEC_FLAG_RX_CSUM;
  9118. + }
  9119. - if (netif_running(netdev)) {
  9120. - fec_stop(netdev);
  9121. - fec_restart(netdev, fep->phy_dev->duplex);
  9122. - netif_wake_queue(netdev);
  9123. - } else {
  9124. - fec_restart(netdev, fep->phy_dev->duplex);
  9125. - }
  9126. + if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
  9127. + if (features & NETIF_F_HW_VLAN_CTAG_RX)
  9128. + fep->flags |= FEC_FLAG_RX_VLAN;
  9129. + else
  9130. + fep->flags &= ~FEC_FLAG_RX_VLAN;
  9131. + }
  9132. +
  9133. + /* Set the appropriate minimum transmit ring free threshold */
  9134. + if (features & NETIF_F_SG)
  9135. + fep->tx_min = MAX_SKB_FRAGS + 1;
  9136. + else
  9137. + fep->tx_min = 1;
  9138. +
  9139. + /* Resume the device after updates */
  9140. + if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
  9141. + fec_restart(netdev);
  9142. + netif_wake_queue(netdev);
  9143. + netif_tx_unlock_bh(netdev);
  9144. + napi_enable(&fep->napi);
  9145. + mutex_unlock(&fep->mutex);
  9146. }
  9147. return 0;
  9148. @@ -1975,27 +2398,13 @@
  9149. #ifdef CONFIG_NET_POLL_CONTROLLER
  9150. .ndo_poll_controller = fec_poll_controller,
  9151. #endif
  9152. + .ndo_fix_features = fec_fix_features,
  9153. .ndo_set_features = fec_set_features,
  9154. };
  9155. - /*
  9156. - * XXX: We need to clean up on failure exits here.
  9157. - *
  9158. - */
  9159. -static int fec_enet_init(struct net_device *ndev)
  9160. +static void fec_enet_init(struct net_device *ndev)
  9161. {
  9162. struct fec_enet_private *fep = netdev_priv(ndev);
  9163. - const struct platform_device_id *id_entry =
  9164. - platform_get_device_id(fep->pdev);
  9165. - struct bufdesc *cbd_base;
  9166. -
  9167. - /* Allocate memory for buffer descriptors. */
  9168. - cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
  9169. - GFP_KERNEL);
  9170. - if (!cbd_base)
  9171. - return -ENOMEM;
  9172. -
  9173. - memset(cbd_base, 0, PAGE_SIZE);
  9174. fep->netdev = ndev;
  9175. @@ -2008,13 +2417,8 @@
  9176. fep->tx_ring_size = TX_RING_SIZE;
  9177. fep->rx_ring_size = RX_RING_SIZE;
  9178. - /* Set receive and transmit descriptor base. */
  9179. - fep->rx_bd_base = cbd_base;
  9180. - if (fep->bufdesc_ex)
  9181. - fep->tx_bd_base = (struct bufdesc *)
  9182. - (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
  9183. - else
  9184. - fep->tx_bd_base = cbd_base + fep->rx_ring_size;
  9185. + fep->rx_bd_base = fep->tx_bd_base = NULL;
  9186. + fep->rx_bd_dma = fep->tx_bd_dma = 0;
  9187. /* The FEC Ethernet specific entries in the device structure */
  9188. ndev->watchdog_timeo = TX_TIMEOUT;
  9189. @@ -2024,24 +2428,37 @@
  9190. writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
  9191. netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
  9192. - if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
  9193. - /* enable hw VLAN support */
  9194. - ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
  9195. - ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
  9196. - }
  9197. -
  9198. - if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
  9199. - /* enable hw accelerator */
  9200. - ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
  9201. - | NETIF_F_RXCSUM);
  9202. - ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
  9203. - | NETIF_F_RXCSUM);
  9204. - fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
  9205. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  9206. + /* Features which require the enhanced buffer descriptors */
  9207. + if (fep->quirks & FEC_QUIRK_HAS_VLAN) {
  9208. + /* enable hw VLAN support */
  9209. + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
  9210. + ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
  9211. + fep->flags |= FEC_FLAG_RX_VLAN;
  9212. + }
  9213. +
  9214. + if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
  9215. + /* enable hw accelerator */
  9216. + ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
  9217. + | NETIF_F_RXCSUM);
  9218. + ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
  9219. + | NETIF_F_RXCSUM);
  9220. + fep->flags |= FEC_FLAG_RX_CSUM;
  9221. + }
  9222. }
  9223. - fec_restart(ndev, 0);
  9224. + if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) {
  9225. + /* don't enable SG if we need to swap frames */
  9226. + ndev->features |= NETIF_F_SG;
  9227. + ndev->hw_features |= NETIF_F_SG;
  9228. + }
  9229. - return 0;
  9230. + if (ndev->features & NETIF_F_SG)
  9231. + fep->tx_min = MAX_SKB_FRAGS + 1;
  9232. + else
  9233. + fep->tx_min = 1;
  9234. +
  9235. + fec_restart(ndev);
  9236. }
  9237. #ifdef CONFIG_OF
  9238. @@ -2107,11 +2524,16 @@
  9239. /* setup board info structure */
  9240. fep = netdev_priv(ndev);
  9241. + mutex_init(&fep->mutex);
  9242. +
  9243. + if (pdev->id_entry)
  9244. + fep->quirks = pdev->id_entry->driver_data;
  9245. #if !defined(CONFIG_M5272)
  9246. /* default enable pause frame auto negotiation */
  9247. - if (pdev->id_entry &&
  9248. - (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
  9249. - fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
  9250. + if (fep->quirks & FEC_QUIRK_HAS_GBIT)
  9251. + fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG |
  9252. + FEC_PAUSE_FLAG_TX |
  9253. + FEC_PAUSE_FLAG_RX;
  9254. #endif
  9255. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  9256. @@ -2124,7 +2546,9 @@
  9257. fep->pdev = pdev;
  9258. fep->dev_id = dev_id++;
  9259. - fep->bufdesc_ex = 0;
  9260. + fep->flags = 0;
  9261. + if (pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX)
  9262. + fep->flags |= FEC_FLAG_BUFDESC_EX;
  9263. platform_set_drvdata(pdev, ndev);
  9264. @@ -2157,11 +2581,9 @@
  9265. fep->clk_enet_out = NULL;
  9266. fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
  9267. - fep->bufdesc_ex =
  9268. - pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
  9269. if (IS_ERR(fep->clk_ptp)) {
  9270. fep->clk_ptp = NULL;
  9271. - fep->bufdesc_ex = 0;
  9272. + fep->flags &= ~FEC_FLAG_BUFDESC_EX;
  9273. }
  9274. ret = clk_prepare_enable(fep->clk_ahb);
  9275. @@ -2198,12 +2620,10 @@
  9276. fec_reset_phy(pdev);
  9277. - if (fep->bufdesc_ex)
  9278. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  9279. fec_ptp_init(pdev);
  9280. - ret = fec_enet_init(ndev);
  9281. - if (ret)
  9282. - goto failed_init;
  9283. + fec_enet_init(ndev);
  9284. for (i = 0; i < FEC_IRQ_NUM; i++) {
  9285. irq = platform_get_irq(pdev, i);
  9286. @@ -2230,17 +2650,16 @@
  9287. if (ret)
  9288. goto failed_register;
  9289. - if (fep->bufdesc_ex && fep->ptp_clock)
  9290. + if (fep->flags & FEC_FLAG_BUFDESC_EX && fep->ptp_clock)
  9291. netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
  9292. - INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work);
  9293. + INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
  9294. return 0;
  9295. failed_register:
  9296. fec_enet_mii_remove(fep);
  9297. failed_mii_init:
  9298. failed_irq:
  9299. -failed_init:
  9300. if (fep->reg_phy)
  9301. regulator_disable(fep->reg_phy);
  9302. failed_regulator:
  9303. @@ -2266,7 +2685,7 @@
  9304. struct net_device *ndev = platform_get_drvdata(pdev);
  9305. struct fec_enet_private *fep = netdev_priv(ndev);
  9306. - cancel_delayed_work_sync(&(fep->delay_work.delay_work));
  9307. + cancel_work_sync(&fep->tx_timeout_work);
  9308. unregister_netdev(ndev);
  9309. fec_enet_mii_remove(fep);
  9310. del_timer_sync(&fep->time_keep);
  9311. @@ -2292,10 +2711,19 @@
  9312. struct net_device *ndev = dev_get_drvdata(dev);
  9313. struct fec_enet_private *fep = netdev_priv(ndev);
  9314. + rtnl_lock();
  9315. if (netif_running(ndev)) {
  9316. - fec_stop(ndev);
  9317. + phy_stop(fep->phy_dev);
  9318. + napi_disable(&fep->napi);
  9319. + netif_tx_lock_bh(ndev);
  9320. netif_device_detach(ndev);
  9321. + netif_tx_unlock_bh(ndev);
  9322. + mutex_lock(&fep->mutex);
  9323. + fec_stop(ndev);
  9324. + mutex_unlock(&fep->mutex);
  9325. }
  9326. + rtnl_unlock();
  9327. +
  9328. if (fep->clk_ptp)
  9329. clk_disable_unprepare(fep->clk_ptp);
  9330. if (fep->clk_enet_out)
  9331. @@ -2342,10 +2770,18 @@
  9332. goto failed_clk_ptp;
  9333. }
  9334. + rtnl_lock();
  9335. if (netif_running(ndev)) {
  9336. - fec_restart(ndev, fep->full_duplex);
  9337. + mutex_lock(&fep->mutex);
  9338. + fec_restart(ndev);
  9339. + mutex_unlock(&fep->mutex);
  9340. + netif_tx_lock_bh(ndev);
  9341. netif_device_attach(ndev);
  9342. + netif_tx_unlock_bh(ndev);
  9343. + napi_enable(&fep->napi);
  9344. + phy_start(fep->phy_dev);
  9345. }
  9346. + rtnl_unlock();
  9347. return 0;
  9348. diff -Nur linux-3.15-rc1.orig/drivers/regulator/anatop-regulator.c linux-3.15-rc1/drivers/regulator/anatop-regulator.c
  9349. --- linux-3.15-rc1.orig/drivers/regulator/anatop-regulator.c 2014-04-13 23:18:35.000000000 +0200
  9350. +++ linux-3.15-rc1/drivers/regulator/anatop-regulator.c 2014-04-25 14:11:13.583375360 +0200
  9351. @@ -267,6 +267,7 @@
  9352. config.driver_data = sreg;
  9353. config.of_node = pdev->dev.of_node;
  9354. config.regmap = sreg->anatop;
  9355. + config.ena_gpio = -EINVAL;
  9356. /* Only core regulators have the ramp up delay configuration. */
  9357. if (sreg->control_reg && sreg->delay_bit_width) {
  9358. diff -Nur linux-3.15-rc1.orig/drivers/regulator/core.c linux-3.15-rc1/drivers/regulator/core.c
  9359. --- linux-3.15-rc1.orig/drivers/regulator/core.c 2014-04-13 23:18:35.000000000 +0200
  9360. +++ linux-3.15-rc1/drivers/regulator/core.c 2014-04-25 14:11:13.583375360 +0200
  9361. @@ -3459,7 +3459,7 @@
  9362. dev_set_drvdata(&rdev->dev, rdev);
  9363. - if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) {
  9364. + if (gpio_is_valid(config->ena_gpio)) {
  9365. ret = regulator_ena_gpio_request(rdev, config);
  9366. if (ret != 0) {
  9367. rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
  9368. diff -Nur linux-3.15-rc1.orig/drivers/regulator/dummy.c linux-3.15-rc1/drivers/regulator/dummy.c
  9369. --- linux-3.15-rc1.orig/drivers/regulator/dummy.c 2014-04-13 23:18:35.000000000 +0200
  9370. +++ linux-3.15-rc1/drivers/regulator/dummy.c 2014-04-25 14:11:13.583375360 +0200
  9371. @@ -48,6 +48,7 @@
  9372. config.dev = &pdev->dev;
  9373. config.init_data = &dummy_initdata;
  9374. + config.ena_gpio = -EINVAL;
  9375. dummy_regulator_rdev = regulator_register(&dummy_desc, &config);
  9376. if (IS_ERR(dummy_regulator_rdev)) {
  9377. diff -Nur linux-3.15-rc1.orig/drivers/regulator/fixed.c linux-3.15-rc1/drivers/regulator/fixed.c
  9378. --- linux-3.15-rc1.orig/drivers/regulator/fixed.c 2014-04-13 23:18:35.000000000 +0200
  9379. +++ linux-3.15-rc1/drivers/regulator/fixed.c 2014-04-25 14:11:13.583375360 +0200
  9380. @@ -161,9 +161,7 @@
  9381. drvdata->desc.n_voltages = 1;
  9382. drvdata->desc.fixed_uV = config->microvolts;
  9383. -
  9384. - if (config->gpio >= 0)
  9385. - cfg.ena_gpio = config->gpio;
  9386. + cfg.ena_gpio = config->gpio;
  9387. cfg.ena_gpio_invert = !config->enable_high;
  9388. if (config->enabled_at_boot) {
  9389. if (config->enable_high)
  9390. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/drm-ddc-connector.c linux-3.15-rc1/drivers/staging/imx-drm/drm-ddc-connector.c
  9391. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/drm-ddc-connector.c 1970-01-01 01:00:00.000000000 +0100
  9392. +++ linux-3.15-rc1/drivers/staging/imx-drm/drm-ddc-connector.c 2014-04-25 14:11:13.587375378 +0200
  9393. @@ -0,0 +1,92 @@
  9394. +#include <linux/i2c.h>
  9395. +#include <linux/module.h>
  9396. +#include <drm/drmP.h>
  9397. +#include <drm/drm_crtc_helper.h>
  9398. +#include <drm/drm_edid.h>
  9399. +
  9400. +#include "drm-ddc-connector.h"
  9401. +
  9402. +static enum drm_connector_status
  9403. +drm_ddc_connector_detect(struct drm_connector *connector, bool force)
  9404. +{
  9405. + struct drm_ddc_connector *ddc_conn = to_ddc_conn(connector);
  9406. +
  9407. + return ddc_conn->detect ? ddc_conn->detect(connector, force) :
  9408. + connector_status_connected;
  9409. +}
  9410. +
  9411. +int drm_ddc_connector_get_modes(struct drm_connector *connector)
  9412. +{
  9413. + struct drm_ddc_connector *ddc_conn = to_ddc_conn(connector);
  9414. + struct edid *edid;
  9415. + int ret = 0;
  9416. +
  9417. + if (!ddc_conn->ddc)
  9418. + return 0;
  9419. +
  9420. + edid = drm_get_edid(connector, ddc_conn->ddc);
  9421. + if (edid) {
  9422. + drm_mode_connector_update_edid_property(connector, edid);
  9423. + ret = drm_add_edid_modes(connector, edid);
  9424. + /* Store the ELD */
  9425. + drm_edid_to_eld(connector, edid);
  9426. + kfree(edid);
  9427. + }
  9428. +
  9429. + return ret;
  9430. +}
  9431. +EXPORT_SYMBOL_GPL(drm_ddc_connector_get_modes);
  9432. +
  9433. +static void drm_ddc_connector_destroy(struct drm_connector *connector)
  9434. +{
  9435. + struct drm_ddc_connector *ddc_conn = to_ddc_conn(connector);
  9436. +
  9437. + drm_sysfs_connector_remove(connector);
  9438. + drm_connector_cleanup(connector);
  9439. + if (ddc_conn->ddc)
  9440. + i2c_put_adapter(ddc_conn->ddc);
  9441. +}
  9442. +
  9443. +static const struct drm_connector_funcs drm_ddc_connector_funcs = {
  9444. + .dpms = drm_helper_connector_dpms,
  9445. + .fill_modes = drm_helper_probe_single_connector_modes,
  9446. + .detect = drm_ddc_connector_detect,
  9447. + .destroy = drm_ddc_connector_destroy,
  9448. +};
  9449. +
  9450. +int drm_ddc_connector_add(struct drm_device *drm,
  9451. + struct drm_ddc_connector *ddc_conn, int connector_type)
  9452. +{
  9453. + drm_connector_init(drm, &ddc_conn->connector, &drm_ddc_connector_funcs,
  9454. + connector_type);
  9455. + return 0;
  9456. +}
  9457. +EXPORT_SYMBOL_GPL(drm_ddc_connector_add);
  9458. +
  9459. +struct drm_ddc_connector *drm_ddc_connector_create(struct drm_device *drm,
  9460. + struct device_node *np, void *private)
  9461. +{
  9462. + struct drm_ddc_connector *ddc_conn;
  9463. + struct device_node *ddc_node;
  9464. +
  9465. + ddc_conn = devm_kzalloc(drm->dev, sizeof(*ddc_conn), GFP_KERNEL);
  9466. + if (!ddc_conn)
  9467. + return ERR_PTR(-ENOMEM);
  9468. +
  9469. + ddc_conn->private = private;
  9470. +
  9471. + ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
  9472. + if (ddc_node) {
  9473. + ddc_conn->ddc = of_find_i2c_adapter_by_node(ddc_node);
  9474. + of_node_put(ddc_node);
  9475. + if (!ddc_conn->ddc)
  9476. + return ERR_PTR(-EPROBE_DEFER);
  9477. + }
  9478. +
  9479. + return ddc_conn;
  9480. +}
  9481. +EXPORT_SYMBOL_GPL(drm_ddc_connector_create);
  9482. +
  9483. +MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
  9484. +MODULE_DESCRIPTION("Generic DRM DDC connector module");
  9485. +MODULE_LICENSE("GPL v2");
  9486. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/drm-ddc-connector.h linux-3.15-rc1/drivers/staging/imx-drm/drm-ddc-connector.h
  9487. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/drm-ddc-connector.h 1970-01-01 01:00:00.000000000 +0100
  9488. +++ linux-3.15-rc1/drivers/staging/imx-drm/drm-ddc-connector.h 2014-04-25 14:11:13.587375378 +0200
  9489. @@ -0,0 +1,26 @@
  9490. +#ifndef DRM_DDC_CONNECTOR_H
  9491. +#define DRM_DDC_CONNECTOR_H
  9492. +
  9493. +struct drm_ddc_connector {
  9494. + struct i2c_adapter *ddc;
  9495. + struct drm_connector connector;
  9496. + enum drm_connector_status (*detect)(struct drm_connector *, bool);
  9497. + void *private;
  9498. +};
  9499. +
  9500. +#define to_ddc_conn(c) container_of(c, struct drm_ddc_connector, connector)
  9501. +
  9502. +int drm_ddc_connector_get_modes(struct drm_connector *connector);
  9503. +int drm_ddc_connector_add(struct drm_device *drm,
  9504. + struct drm_ddc_connector *ddc_conn, int connector_type);
  9505. +struct drm_ddc_connector *drm_ddc_connector_create(struct drm_device *drm,
  9506. + struct device_node *np, void *private);
  9507. +
  9508. +static inline void *drm_ddc_private(struct drm_connector *connector)
  9509. +{
  9510. + struct drm_ddc_connector *ddc_conn = to_ddc_conn(connector);
  9511. +
  9512. + return ddc_conn->private;
  9513. +}
  9514. +
  9515. +#endif
  9516. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/dw-hdmi-audio.c linux-3.15-rc1/drivers/staging/imx-drm/dw-hdmi-audio.c
  9517. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/dw-hdmi-audio.c 1970-01-01 01:00:00.000000000 +0100
  9518. +++ linux-3.15-rc1/drivers/staging/imx-drm/dw-hdmi-audio.c 2014-04-25 14:11:13.587375378 +0200
  9519. @@ -0,0 +1,654 @@
  9520. +/*
  9521. + * DesignWare HDMI audio driver
  9522. + *
  9523. + * This program is free software; you can redistribute it and/or modify
  9524. + * it under the terms of the GNU General Public License version 2 as
  9525. + * published by the Free Software Foundation.
  9526. + *
  9527. + * Written and tested against the (alleged) DW HDMI Tx found in iMX6S.
  9528. + */
  9529. +#include <linux/delay.h>
  9530. +#include <linux/io.h>
  9531. +#include <linux/interrupt.h>
  9532. +#include <linux/module.h>
  9533. +#include <linux/platform_device.h>
  9534. +
  9535. +#include <sound/asoundef.h>
  9536. +#include <sound/core.h>
  9537. +#include <sound/initval.h>
  9538. +#include <sound/pcm.h>
  9539. +
  9540. +#include "dw-hdmi-audio.h"
  9541. +
  9542. +#define DRIVER_NAME "dw-hdmi-audio"
  9543. +
  9544. +/* Provide some bits rather than bit offsets */
  9545. +enum {
  9546. + HDMI_AHB_DMA_CONF0_SW_FIFO_RST = BIT(7),
  9547. + HDMI_AHB_DMA_CONF0_EN_HLOCK = BIT(3),
  9548. + HDMI_AHB_DMA_START_START = BIT(0),
  9549. + HDMI_AHB_DMA_STOP_STOP = BIT(0),
  9550. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR = BIT(5),
  9551. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST = BIT(4),
  9552. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY = BIT(3),
  9553. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE = BIT(2),
  9554. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL = BIT(1),
  9555. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0),
  9556. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL =
  9557. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR |
  9558. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST |
  9559. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY |
  9560. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE |
  9561. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL |
  9562. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY,
  9563. + HDMI_IH_AHBDMAAUD_STAT0_ERROR = BIT(5),
  9564. + HDMI_IH_AHBDMAAUD_STAT0_LOST = BIT(4),
  9565. + HDMI_IH_AHBDMAAUD_STAT0_RETRY = BIT(3),
  9566. + HDMI_IH_AHBDMAAUD_STAT0_DONE = BIT(2),
  9567. + HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL = BIT(1),
  9568. + HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0),
  9569. + HDMI_IH_AHBDMAAUD_STAT0_ALL =
  9570. + HDMI_IH_AHBDMAAUD_STAT0_ERROR |
  9571. + HDMI_IH_AHBDMAAUD_STAT0_LOST |
  9572. + HDMI_IH_AHBDMAAUD_STAT0_RETRY |
  9573. + HDMI_IH_AHBDMAAUD_STAT0_DONE |
  9574. + HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL |
  9575. + HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY,
  9576. + HDMI_AHB_DMA_CONF0_INCR16 = 2 << 1,
  9577. + HDMI_AHB_DMA_CONF0_INCR8 = 1 << 1,
  9578. + HDMI_AHB_DMA_CONF0_INCR4 = 0,
  9579. + HDMI_AHB_DMA_CONF0_BURST_MODE = BIT(0),
  9580. + HDMI_AHB_DMA_MASK_DONE = BIT(7),
  9581. + HDMI_REVISION_ID = 0x0001,
  9582. + HDMI_IH_AHBDMAAUD_STAT0 = 0x0109,
  9583. + HDMI_IH_MUTE_AHBDMAAUD_STAT0 = 0x0189,
  9584. + HDMI_AUD_N1 = 0x3200,
  9585. + HDMI_AUD_CTS1 = 0x3203,
  9586. + HDMI_AHB_DMA_CONF0 = 0x3600,
  9587. + HDMI_AHB_DMA_START = 0x3601,
  9588. + HDMI_AHB_DMA_STOP = 0x3602,
  9589. + HDMI_AHB_DMA_THRSLD = 0x3603,
  9590. + HDMI_AHB_DMA_STRADDR0 = 0x3604,
  9591. + HDMI_AHB_DMA_STPADDR0 = 0x3608,
  9592. + HDMI_AHB_DMA_STAT = 0x3612,
  9593. + HDMI_AHB_DMA_STAT_FULL = BIT(1),
  9594. + HDMI_AHB_DMA_MASK = 0x3614,
  9595. + HDMI_AHB_DMA_POL = 0x3615,
  9596. + HDMI_AHB_DMA_CONF1 = 0x3616,
  9597. + HDMI_AHB_DMA_BUFFPOL = 0x361a,
  9598. +};
  9599. +
  9600. +struct snd_dw_hdmi {
  9601. + struct snd_card *card;
  9602. + struct snd_pcm *pcm;
  9603. + struct dw_hdmi_audio_data data;
  9604. + struct snd_pcm_substream *substream;
  9605. + void (*reformat)(struct snd_dw_hdmi *, size_t, size_t);
  9606. + void *buf_src;
  9607. + void *buf_dst;
  9608. + dma_addr_t buf_addr;
  9609. + unsigned buf_offset;
  9610. + unsigned buf_period;
  9611. + unsigned buf_size;
  9612. + unsigned channels;
  9613. + uint8_t revision;
  9614. + uint8_t iec_offset;
  9615. + uint8_t cs[192][8];
  9616. +};
  9617. +
  9618. +static void dw_hdmi_writel(unsigned long val, void __iomem *ptr)
  9619. +{
  9620. + writeb_relaxed(val, ptr);
  9621. + writeb_relaxed(val >> 8, ptr + 1);
  9622. + writeb_relaxed(val >> 16, ptr + 2);
  9623. + writeb_relaxed(val >> 24, ptr + 3);
  9624. +}
  9625. +
  9626. +/*
  9627. + * Convert to hardware format: The userspace buffer contains IEC958 samples,
  9628. + * with the PCUV bits in bits 31..28 and audio samples in bits 27..4. We
  9629. + * need these to be in bits 27..24, with the IEC B bit in bit 28, and audio
  9630. + * samples in 23..0.
  9631. + *
  9632. + * Default preamble in bits 3..0: 8 = block start, 4 = even 2 = odd
  9633. + *
  9634. + * Ideally, we could do with having the data properly formatted in userspace.
  9635. + */
  9636. +static void dw_hdmi_reformat_iec958(struct snd_dw_hdmi *dw,
  9637. + size_t offset, size_t bytes)
  9638. +{
  9639. + uint32_t *src = dw->buf_src + offset;
  9640. + uint32_t *dst = dw->buf_dst + offset;
  9641. + uint32_t *end = dw->buf_src + offset + bytes;
  9642. +
  9643. + do {
  9644. + uint32_t b, sample = *src++;
  9645. +
  9646. + b = (sample & 8) << (28 - 3);
  9647. +
  9648. + sample >>= 4;
  9649. +
  9650. + *dst++ = sample | b;
  9651. + } while (src < end);
  9652. +}
  9653. +
  9654. +static uint32_t parity(uint32_t sample)
  9655. +{
  9656. + sample ^= sample >> 16;
  9657. + sample ^= sample >> 8;
  9658. + sample ^= sample >> 4;
  9659. + sample ^= sample >> 2;
  9660. + sample ^= sample >> 1;
  9661. + return (sample & 1) << 27;
  9662. +}
  9663. +
  9664. +static void dw_hdmi_reformat_s24(struct snd_dw_hdmi *dw,
  9665. + size_t offset, size_t bytes)
  9666. +{
  9667. + uint32_t *src = dw->buf_src + offset;
  9668. + uint32_t *dst = dw->buf_dst + offset;
  9669. + uint32_t *end = dw->buf_src + offset + bytes;
  9670. +
  9671. + do {
  9672. + unsigned i;
  9673. + uint8_t *cs;
  9674. +
  9675. + cs = dw->cs[dw->iec_offset++];
  9676. + if (dw->iec_offset >= 192)
  9677. + dw->iec_offset = 0;
  9678. +
  9679. + i = dw->channels;
  9680. + do {
  9681. + uint32_t sample = *src++;
  9682. +
  9683. + sample &= ~0xff000000;
  9684. + sample |= *cs++ << 24;
  9685. + sample |= parity(sample & ~0xf8000000);
  9686. +
  9687. + *dst++ = sample;
  9688. + } while (--i);
  9689. + } while (src < end);
  9690. +}
  9691. +
  9692. +static void dw_hdmi_create_cs(struct snd_dw_hdmi *dw,
  9693. + struct snd_pcm_runtime *runtime)
  9694. +{
  9695. + uint8_t cs[4];
  9696. + unsigned ch, i, j;
  9697. +
  9698. + cs[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_NONE;
  9699. + cs[1] = IEC958_AES1_CON_GENERAL;
  9700. + cs[2] = IEC958_AES2_CON_SOURCE_UNSPEC;
  9701. + cs[3] = IEC958_AES3_CON_CLOCK_1000PPM;
  9702. +
  9703. + switch (runtime->rate) {
  9704. + case 32000:
  9705. + cs[3] |= IEC958_AES3_CON_FS_32000;
  9706. + break;
  9707. + case 44100:
  9708. + cs[3] |= IEC958_AES3_CON_FS_44100;
  9709. + break;
  9710. + case 48000:
  9711. + cs[3] |= IEC958_AES3_CON_FS_48000;
  9712. + break;
  9713. + case 88200:
  9714. + cs[3] |= IEC958_AES3_CON_FS_88200;
  9715. + break;
  9716. + case 96000:
  9717. + cs[3] |= IEC958_AES3_CON_FS_96000;
  9718. + break;
  9719. + case 176400:
  9720. + cs[3] |= IEC958_AES3_CON_FS_176400;
  9721. + break;
  9722. + case 192000:
  9723. + cs[3] |= IEC958_AES3_CON_FS_192000;
  9724. + break;
  9725. + }
  9726. +
  9727. + memset(dw->cs, 0, sizeof(dw->cs));
  9728. +
  9729. + for (ch = 0; ch < 8; ch++) {
  9730. + cs[2] &= ~IEC958_AES2_CON_CHANNEL;
  9731. + cs[2] |= (ch + 1) << 4;
  9732. +
  9733. + for (i = 0; i < ARRAY_SIZE(cs); i++) {
  9734. + unsigned c = cs[i];
  9735. +
  9736. + for (j = 0; j < 8; j++, c >>= 1)
  9737. + dw->cs[i * 8 + j][ch] = (c & 1) << 2;
  9738. + }
  9739. + }
  9740. + dw->cs[0][0] |= BIT(4);
  9741. +}
  9742. +
  9743. +static void dw_hdmi_start_dma(struct snd_dw_hdmi *dw)
  9744. +{
  9745. + void __iomem *base = dw->data.base;
  9746. + unsigned offset = dw->buf_offset;
  9747. + unsigned period = dw->buf_period;
  9748. + u32 start, stop;
  9749. +
  9750. + dw->reformat(dw, offset, period);
  9751. +
  9752. + /* Clear all irqs before enabling irqs and starting DMA */
  9753. + writeb_relaxed(HDMI_IH_AHBDMAAUD_STAT0_ALL,
  9754. + base + HDMI_IH_AHBDMAAUD_STAT0);
  9755. +
  9756. + start = dw->buf_addr + offset;
  9757. + stop = start + period - 1;
  9758. +
  9759. + /* Setup the hardware start/stop addresses */
  9760. + dw_hdmi_writel(start, base + HDMI_AHB_DMA_STRADDR0);
  9761. + dw_hdmi_writel(stop, base + HDMI_AHB_DMA_STPADDR0);
  9762. +
  9763. + writeb_relaxed((u8)~HDMI_AHB_DMA_MASK_DONE, base + HDMI_AHB_DMA_MASK);
  9764. + writeb(HDMI_AHB_DMA_START_START, base + HDMI_AHB_DMA_START);
  9765. +
  9766. + offset += period;
  9767. + if (offset >= dw->buf_size)
  9768. + offset = 0;
  9769. + dw->buf_offset = offset;
  9770. +}
  9771. +
  9772. +static void dw_hdmi_stop_dma(struct snd_dw_hdmi *dw)
  9773. +{
  9774. + dw->substream = NULL;
  9775. +
  9776. + /* Disable interrupts before disabling DMA */
  9777. + writeb_relaxed(~0, dw->data.base + HDMI_AHB_DMA_MASK);
  9778. + writeb_relaxed(HDMI_AHB_DMA_STOP_STOP, dw->data.base + HDMI_AHB_DMA_STOP);
  9779. +}
  9780. +
  9781. +static irqreturn_t snd_dw_hdmi_irq(int irq, void *data)
  9782. +{
  9783. + struct snd_dw_hdmi *dw = data;
  9784. + struct snd_pcm_substream *substream;
  9785. + unsigned stat;
  9786. +
  9787. + stat = readb_relaxed(dw->data.base + HDMI_IH_AHBDMAAUD_STAT0);
  9788. + if (!stat)
  9789. + return IRQ_NONE;
  9790. +
  9791. + writeb_relaxed(stat, dw->data.base + HDMI_IH_AHBDMAAUD_STAT0);
  9792. +
  9793. + substream = dw->substream;
  9794. + if (stat & HDMI_IH_AHBDMAAUD_STAT0_DONE && substream) {
  9795. + snd_pcm_period_elapsed(substream);
  9796. + if (dw->substream)
  9797. + dw_hdmi_start_dma(dw);
  9798. + }
  9799. +
  9800. + return IRQ_HANDLED;
  9801. +}
  9802. +
  9803. +static struct snd_pcm_hardware dw_hdmi_hw = {
  9804. + .info = SNDRV_PCM_INFO_INTERLEAVED |
  9805. + SNDRV_PCM_INFO_BLOCK_TRANSFER |
  9806. + SNDRV_PCM_INFO_MMAP |
  9807. + SNDRV_PCM_INFO_MMAP_VALID,
  9808. + .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE |
  9809. + SNDRV_PCM_FMTBIT_S24_LE,
  9810. + .rates = SNDRV_PCM_RATE_32000 |
  9811. + SNDRV_PCM_RATE_44100 |
  9812. + SNDRV_PCM_RATE_48000 |
  9813. + SNDRV_PCM_RATE_88200 |
  9814. + SNDRV_PCM_RATE_96000 |
  9815. + SNDRV_PCM_RATE_176400 |
  9816. + SNDRV_PCM_RATE_192000,
  9817. + .channels_min = 2,
  9818. + .channels_max = 8,
  9819. + .buffer_bytes_max = 64 * 1024,
  9820. + .period_bytes_min = 256,
  9821. + .period_bytes_max = 8192, /* ERR004323: must limit to 8k */
  9822. + .periods_min = 2,
  9823. + .periods_max = 16,
  9824. + .fifo_size = 0,
  9825. +};
  9826. +
  9827. +static unsigned rates_mask[] = {
  9828. + SNDRV_PCM_RATE_32000,
  9829. + SNDRV_PCM_RATE_44100,
  9830. + SNDRV_PCM_RATE_48000,
  9831. + SNDRV_PCM_RATE_88200,
  9832. + SNDRV_PCM_RATE_96000,
  9833. + SNDRV_PCM_RATE_176400,
  9834. + SNDRV_PCM_RATE_192000,
  9835. +};
  9836. +
  9837. +static void dw_hdmi_parse_eld(struct snd_dw_hdmi *dw,
  9838. + struct snd_pcm_runtime *runtime)
  9839. +{
  9840. + u8 *sad, *eld = dw->data.eld;
  9841. + unsigned eld_ver, mnl, sad_count, rates, rate_mask, i;
  9842. + unsigned max_channels;
  9843. +
  9844. + eld_ver = eld[0] >> 3;
  9845. + if (eld_ver != 2 && eld_ver != 31)
  9846. + return;
  9847. +
  9848. + mnl = eld[4] & 0x1f;
  9849. + if (mnl > 16)
  9850. + return;
  9851. +
  9852. + sad_count = eld[5] >> 4;
  9853. + sad = eld + 20 + mnl;
  9854. +
  9855. + /* Start from the basic audio settings */
  9856. + max_channels = 2;
  9857. + rates = 7;
  9858. + while (sad_count > 0) {
  9859. + switch (sad[0] & 0x78) {
  9860. + case 0x08: /* PCM */
  9861. + max_channels = max(max_channels, (sad[0] & 7) + 1u);
  9862. + rates |= sad[1];
  9863. + break;
  9864. + }
  9865. + sad += 3;
  9866. + sad_count -= 1;
  9867. + }
  9868. +
  9869. + for (rate_mask = i = 0; i < ARRAY_SIZE(rates_mask); i++)
  9870. + if (rates & 1 << i)
  9871. + rate_mask |= rates_mask[i];
  9872. +
  9873. + runtime->hw.rates &= rate_mask;
  9874. + runtime->hw.channels_max = min(runtime->hw.channels_max, max_channels);
  9875. +}
  9876. +
  9877. +static int dw_hdmi_open(struct snd_pcm_substream *substream)
  9878. +{
  9879. + struct snd_pcm_runtime *runtime = substream->runtime;
  9880. + struct snd_dw_hdmi *dw = substream->private_data;
  9881. + void __iomem *base = dw->data.base;
  9882. + int ret;
  9883. +
  9884. + /* Clear FIFO */
  9885. + writeb_relaxed(HDMI_AHB_DMA_CONF0_SW_FIFO_RST,
  9886. + base + HDMI_AHB_DMA_CONF0);
  9887. +
  9888. + /* Configure interrupt polarities */
  9889. + writeb_relaxed(~0, base + HDMI_AHB_DMA_POL);
  9890. + writeb_relaxed(~0, base + HDMI_AHB_DMA_BUFFPOL);
  9891. +
  9892. + /* Keep interrupts masked, and clear any pending */
  9893. + writeb_relaxed(~0, base + HDMI_AHB_DMA_MASK);
  9894. + writeb_relaxed(~0, base + HDMI_IH_AHBDMAAUD_STAT0);
  9895. +
  9896. + ret = request_irq(dw->data.irq, snd_dw_hdmi_irq, IRQF_SHARED,
  9897. + "dw-hdmi-audio", dw);
  9898. + if (ret)
  9899. + return ret;
  9900. +
  9901. + /* Un-mute done interrupt */
  9902. + writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL &
  9903. + ~HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE,
  9904. + base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
  9905. +
  9906. + runtime->hw = dw_hdmi_hw;
  9907. + dw_hdmi_parse_eld(dw, runtime);
  9908. + snd_pcm_limit_hw_rates(runtime);
  9909. +
  9910. + return 0;
  9911. +}
  9912. +
  9913. +static int dw_hdmi_close(struct snd_pcm_substream *substream)
  9914. +{
  9915. + struct snd_dw_hdmi *dw = substream->private_data;
  9916. +
  9917. + /* Mute all interrupts */
  9918. + writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL,
  9919. + dw->data.base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
  9920. +
  9921. + free_irq(dw->data.irq, dw);
  9922. +
  9923. + return 0;
  9924. +}
  9925. +
  9926. +static int dw_hdmi_hw_free(struct snd_pcm_substream *substream)
  9927. +{
  9928. + return snd_pcm_lib_free_vmalloc_buffer(substream);
  9929. +}
  9930. +
  9931. +static int dw_hdmi_hw_params(struct snd_pcm_substream *substream,
  9932. + struct snd_pcm_hw_params *params)
  9933. +{
  9934. + return snd_pcm_lib_alloc_vmalloc_buffer(substream,
  9935. + params_buffer_bytes(params));
  9936. +}
  9937. +
  9938. +static int dw_hdmi_prepare(struct snd_pcm_substream *substream)
  9939. +{
  9940. + struct snd_pcm_runtime *runtime = substream->runtime;
  9941. + struct snd_dw_hdmi *dw = substream->private_data;
  9942. + uint8_t threshold, conf0, conf1;
  9943. +
  9944. + /* Setup as per 3.0.5 FSL 4.1.0 BSP */
  9945. + switch (dw->revision) {
  9946. + case 0x0a:
  9947. + conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE |
  9948. + HDMI_AHB_DMA_CONF0_INCR4;
  9949. + if (runtime->channels == 2)
  9950. + threshold = 126;
  9951. + else
  9952. + threshold = 124;
  9953. + break;
  9954. + case 0x1a:
  9955. + conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE |
  9956. + HDMI_AHB_DMA_CONF0_INCR8;
  9957. + threshold = 128;
  9958. + break;
  9959. + default:
  9960. + /* NOTREACHED */
  9961. + return -EINVAL;
  9962. + }
  9963. +
  9964. + dw->data.set_sample_rate(dw->data.hdmi, runtime->rate);
  9965. +
  9966. + /* Minimum number of bytes in the fifo. */
  9967. + runtime->hw.fifo_size = threshold * 32;
  9968. +
  9969. + conf0 |= HDMI_AHB_DMA_CONF0_EN_HLOCK;
  9970. + conf1 = (1 << runtime->channels) - 1;
  9971. +
  9972. + writeb_relaxed(threshold, dw->data.base + HDMI_AHB_DMA_THRSLD);
  9973. + writeb_relaxed(conf0, dw->data.base + HDMI_AHB_DMA_CONF0);
  9974. + writeb_relaxed(conf1, dw->data.base + HDMI_AHB_DMA_CONF1);
  9975. +
  9976. + switch (runtime->format) {
  9977. + case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
  9978. + dw->reformat = dw_hdmi_reformat_iec958;
  9979. + break;
  9980. + case SNDRV_PCM_FORMAT_S24_LE:
  9981. + dw_hdmi_create_cs(dw, runtime);
  9982. + dw->reformat = dw_hdmi_reformat_s24;
  9983. + break;
  9984. + }
  9985. + dw->iec_offset = 0;
  9986. + dw->channels = runtime->channels;
  9987. + dw->buf_src = runtime->dma_area;
  9988. + dw->buf_dst = substream->dma_buffer.area;
  9989. + dw->buf_addr = substream->dma_buffer.addr;
  9990. + dw->buf_period = snd_pcm_lib_period_bytes(substream);
  9991. + dw->buf_size = snd_pcm_lib_buffer_bytes(substream);
  9992. +
  9993. + return 0;
  9994. +}
  9995. +
  9996. +static int dw_hdmi_trigger(struct snd_pcm_substream *substream, int cmd)
  9997. +{
  9998. + struct snd_dw_hdmi *dw = substream->private_data;
  9999. + void __iomem *base = dw->data.base;
  10000. + unsigned n[3], cts[3];
  10001. + int ret = 0, i;
  10002. + bool err005174;
  10003. +
  10004. + switch (cmd) {
  10005. + case SNDRV_PCM_TRIGGER_START:
  10006. + err005174 = dw->revision == 0x0a;
  10007. + if (err005174) {
  10008. + for (i = 2; i >= 1; i--) {
  10009. + n[i] = readb_relaxed(base + HDMI_AUD_N1 + i);
  10010. + cts[i] = readb_relaxed(base + HDMI_AUD_CTS1 + i);
  10011. + writeb_relaxed(0, base + HDMI_AUD_N1 + i);
  10012. + writeb_relaxed(0, base + HDMI_AUD_CTS1 + i);
  10013. + }
  10014. + }
  10015. +
  10016. + dw->buf_offset = 0;
  10017. + dw->substream = substream;
  10018. + dw_hdmi_start_dma(dw);
  10019. +
  10020. + if (err005174) {
  10021. + for (i = 2; i >= 1; i--)
  10022. + writeb_relaxed(cts[i], base + HDMI_AUD_CTS1 + i);
  10023. + for (i = 2; i >= 1; i--)
  10024. + writeb_relaxed(n[i], base + HDMI_AUD_N1 + i);
  10025. + }
  10026. +
  10027. + substream->runtime->delay = substream->runtime->period_size;
  10028. + break;
  10029. +
  10030. + case SNDRV_PCM_TRIGGER_STOP:
  10031. + dw_hdmi_stop_dma(dw);
  10032. + break;
  10033. +
  10034. + default:
  10035. + ret = -EINVAL;
  10036. + break;
  10037. + }
  10038. +
  10039. + return ret;
  10040. +}
  10041. +
  10042. +static snd_pcm_uframes_t dw_hdmi_pointer(struct snd_pcm_substream *substream)
  10043. +{
  10044. + struct snd_pcm_runtime *runtime = substream->runtime;
  10045. + struct snd_dw_hdmi *dw = substream->private_data;
  10046. +
  10047. + return bytes_to_frames(runtime, dw->buf_offset);
  10048. +}
  10049. +
  10050. +static struct snd_pcm_ops snd_dw_hdmi_ops = {
  10051. + .open = dw_hdmi_open,
  10052. + .close = dw_hdmi_close,
  10053. + .ioctl = snd_pcm_lib_ioctl,
  10054. + .hw_params = dw_hdmi_hw_params,
  10055. + .hw_free = dw_hdmi_hw_free,
  10056. + .prepare = dw_hdmi_prepare,
  10057. + .trigger = dw_hdmi_trigger,
  10058. + .pointer = dw_hdmi_pointer,
  10059. + .page = snd_pcm_lib_get_vmalloc_page,
  10060. +};
  10061. +
  10062. +static int snd_dw_hdmi_probe(struct platform_device *pdev)
  10063. +{
  10064. + const struct dw_hdmi_audio_data *data = pdev->dev.platform_data;
  10065. + struct device *dev = pdev->dev.parent;
  10066. + struct snd_dw_hdmi *dw;
  10067. + struct snd_card *card;
  10068. + struct snd_pcm *pcm;
  10069. + unsigned revision;
  10070. + int ret;
  10071. +
  10072. + writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL,
  10073. + data->base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
  10074. + revision = readb_relaxed(data->base + HDMI_REVISION_ID);
  10075. + if (revision != 0x0a && revision != 0x1a) {
  10076. + dev_err(dev, "dw-hdmi-audio: unknown revision 0x%02x\n",
  10077. + revision);
  10078. + return -ENXIO;
  10079. + }
  10080. +
  10081. + ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
  10082. + THIS_MODULE, sizeof(struct snd_dw_hdmi), &card);
  10083. + if (ret < 0)
  10084. + return ret;
  10085. +
  10086. + snd_card_set_dev(card, dev);
  10087. +
  10088. + strlcpy(card->driver, DRIVER_NAME, sizeof(card->driver));
  10089. + strlcpy(card->shortname, "DW-HDMI", sizeof(card->shortname));
  10090. + snprintf(card->longname, sizeof(card->longname),
  10091. + "%s rev 0x%02x, irq %d", card->shortname, revision,
  10092. + data->irq);
  10093. +
  10094. + dw = card->private_data;
  10095. + dw->card = card;
  10096. + dw->data = *data;
  10097. + dw->revision = revision;
  10098. +
  10099. + ret = snd_pcm_new(card, "DW HDMI", 0, 1, 0, &pcm);
  10100. + if (ret < 0)
  10101. + goto err;
  10102. +
  10103. + dw->pcm = pcm;
  10104. + pcm->private_data = dw;
  10105. + strlcpy(pcm->name, DRIVER_NAME, sizeof(pcm->name));
  10106. + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_dw_hdmi_ops);
  10107. +
  10108. + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
  10109. + dev, 64 * 1024, 64 * 1024);
  10110. +
  10111. + ret = snd_card_register(card);
  10112. + if (ret < 0)
  10113. + goto err;
  10114. +
  10115. + platform_set_drvdata(pdev, dw);
  10116. +
  10117. + return 0;
  10118. +
  10119. +err:
  10120. + snd_card_free(card);
  10121. + return ret;
  10122. +}
  10123. +
  10124. +static int snd_dw_hdmi_remove(struct platform_device *pdev)
  10125. +{
  10126. + struct snd_dw_hdmi *dw = platform_get_drvdata(pdev);
  10127. +
  10128. + snd_card_free(dw->card);
  10129. +
  10130. + return 0;
  10131. +}
  10132. +
  10133. +#ifdef CONFIG_PM_SLEEP
  10134. +static int snd_dw_hdmi_suspend(struct device *dev)
  10135. +{
  10136. + struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
  10137. +
  10138. + snd_power_change_state(dw->card, SNDRV_CTL_POWER_D3cold);
  10139. + snd_pcm_suspend_all(dw->pcm);
  10140. +
  10141. + return 0;
  10142. +}
  10143. +
  10144. +static int snd_dw_hdmi_resume(struct device *dev)
  10145. +{
  10146. + struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
  10147. +
  10148. + snd_power_change_state(dw->card, SNDRV_CTL_POWER_D0);
  10149. +
  10150. + return 0;
  10151. +}
  10152. +
  10153. +static SIMPLE_DEV_PM_OPS(snd_dw_hdmi_pm, snd_dw_hdmi_suspend,
  10154. + snd_dw_hdmi_resume);
  10155. +#define PM_OPS &snd_dw_hdmi_pm
  10156. +#else
  10157. +#define PM_OPS NULL
  10158. +#endif
  10159. +
  10160. +static struct platform_driver snd_dw_hdmi_driver = {
  10161. + .probe = snd_dw_hdmi_probe,
  10162. + .remove = snd_dw_hdmi_remove,
  10163. + .driver = {
  10164. + .name = "dw-hdmi-audio",
  10165. + .owner = THIS_MODULE,
  10166. + .pm = PM_OPS,
  10167. + },
  10168. +};
  10169. +
  10170. +module_platform_driver(snd_dw_hdmi_driver);
  10171. +
  10172. +MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
  10173. +MODULE_LICENSE("GPL");
  10174. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/dw-hdmi-audio.h linux-3.15-rc1/drivers/staging/imx-drm/dw-hdmi-audio.h
  10175. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/dw-hdmi-audio.h 1970-01-01 01:00:00.000000000 +0100
  10176. +++ linux-3.15-rc1/drivers/staging/imx-drm/dw-hdmi-audio.h 2014-04-25 14:11:13.591375395 +0200
  10177. @@ -0,0 +1,15 @@
  10178. +#ifndef DW_HDMI_AUDIO_H
  10179. +#define DW_HDMI_AUDIO_H
  10180. +
  10181. +struct imx_hdmi;
  10182. +
  10183. +struct dw_hdmi_audio_data {
  10184. + phys_addr_t phys;
  10185. + void __iomem *base;
  10186. + int irq;
  10187. + struct imx_hdmi *hdmi;
  10188. + u8 *eld;
  10189. + void (*set_sample_rate)(struct imx_hdmi *, unsigned);
  10190. +};
  10191. +
  10192. +#endif
  10193. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/dw-hdmi-cec.c linux-3.15-rc1/drivers/staging/imx-drm/dw-hdmi-cec.c
  10194. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/dw-hdmi-cec.c 1970-01-01 01:00:00.000000000 +0100
  10195. +++ linux-3.15-rc1/drivers/staging/imx-drm/dw-hdmi-cec.c 2014-04-25 14:11:13.591375395 +0200
  10196. @@ -0,0 +1,205 @@
  10197. +/* http://git.freescale.com/git/cgit.cgi/imx/linux-2.6-imx.git/tree/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c?h=imx_3.0.35_4.1.0 */
  10198. +#include <linux/cec-dev.h>
  10199. +#include <linux/interrupt.h>
  10200. +#include <linux/io.h>
  10201. +#include <linux/module.h>
  10202. +#include <linux/platform_device.h>
  10203. +#include <linux/sched.h>
  10204. +#include <linux/slab.h>
  10205. +
  10206. +#include "imx-hdmi.h"
  10207. +#include "dw-hdmi-cec.h"
  10208. +
  10209. +#define DEV_NAME "mxc_hdmi_cec"
  10210. +
  10211. +enum {
  10212. + CEC_STAT_DONE = BIT(0),
  10213. + CEC_STAT_EOM = BIT(1),
  10214. + CEC_STAT_NACK = BIT(2),
  10215. + CEC_STAT_ARBLOST = BIT(3),
  10216. + CEC_STAT_ERROR_INIT = BIT(4),
  10217. + CEC_STAT_ERROR_FOLL = BIT(5),
  10218. + CEC_STAT_WAKEUP = BIT(6),
  10219. +
  10220. + CEC_CTRL_START = BIT(0),
  10221. + CEC_CTRL_NORMAL = 1 << 1,
  10222. +};
  10223. +
  10224. +struct dw_hdmi_cec {
  10225. + struct cec_dev cec;
  10226. +
  10227. + struct device *dev;
  10228. + void __iomem *base;
  10229. + const struct dw_hdmi_cec_ops *ops;
  10230. + void *ops_data;
  10231. + int irq;
  10232. +};
  10233. +
  10234. +static void dw_hdmi_set_address(struct cec_dev *cec_dev, unsigned addresses)
  10235. +{
  10236. + struct dw_hdmi_cec *cec = container_of(cec_dev, struct dw_hdmi_cec, cec);
  10237. +
  10238. + writeb(addresses & 255, cec->base + HDMI_CEC_ADDR_L);
  10239. + writeb(addresses >> 8, cec->base + HDMI_CEC_ADDR_H);
  10240. +}
  10241. +
  10242. +static void dw_hdmi_send_message(struct cec_dev *cec_dev, u8 *msg,
  10243. + size_t count)
  10244. +{
  10245. + struct dw_hdmi_cec *cec = container_of(cec_dev, struct dw_hdmi_cec, cec);
  10246. + unsigned i;
  10247. +
  10248. + for (i = 0; i < count; i++)
  10249. + writeb(msg[i], cec->base + HDMI_CEC_TX_DATA0 + i);
  10250. +
  10251. + writeb(count, cec->base + HDMI_CEC_TX_CNT);
  10252. + writeb(CEC_CTRL_NORMAL | CEC_CTRL_START, cec->base + HDMI_CEC_CTRL);
  10253. +}
  10254. +
  10255. +static irqreturn_t dw_hdmi_cec_irq(int irq, void *data)
  10256. +{
  10257. + struct dw_hdmi_cec *cec = data;
  10258. + struct cec_dev *cec_dev = &cec->cec;
  10259. + unsigned stat = readb(cec->base + HDMI_IH_CEC_STAT0);
  10260. +
  10261. + if (stat == 0)
  10262. + return IRQ_NONE;
  10263. +
  10264. + writeb(stat, cec->base + HDMI_IH_CEC_STAT0);
  10265. +
  10266. + if (stat & CEC_STAT_ERROR_INIT) {
  10267. + if (cec->cec.retries) {
  10268. + unsigned v = readb(cec->base + HDMI_CEC_CTRL);
  10269. + writeb(v | CEC_CTRL_START, cec->base + HDMI_CEC_CTRL);
  10270. + cec->cec.retries -= 1;
  10271. + } else {
  10272. + cec->cec.write_busy = 0;
  10273. + cec_dev_event(cec_dev, MESSAGE_TYPE_SEND_ERROR, NULL, 0);
  10274. + }
  10275. + } else if (stat & (CEC_STAT_DONE | CEC_STAT_NACK))
  10276. + cec_dev_send_complete(cec_dev, stat & CEC_STAT_DONE);
  10277. +
  10278. + if (stat & CEC_STAT_EOM) {
  10279. + unsigned len, i;
  10280. + u8 msg[MAX_MESSAGE_LEN];
  10281. +
  10282. + len = readb(cec->base + HDMI_CEC_RX_CNT);
  10283. + if (len > sizeof(msg))
  10284. + len = sizeof(msg);
  10285. +
  10286. + for (i = 0; i < len; i++)
  10287. + msg[i] = readb(cec->base + HDMI_CEC_RX_DATA0 + i);
  10288. +
  10289. + writeb(0, cec->base + HDMI_CEC_LOCK);
  10290. +
  10291. + cec_dev_receive(cec_dev, msg, len);
  10292. + }
  10293. +
  10294. + return IRQ_HANDLED;
  10295. +}
  10296. +EXPORT_SYMBOL(dw_hdmi_cec_irq);
  10297. +
  10298. +static void dw_hdmi_cec_release(struct cec_dev *cec_dev)
  10299. +{
  10300. + struct dw_hdmi_cec *cec = container_of(cec_dev, struct dw_hdmi_cec, cec);
  10301. +
  10302. + writeb(~0, cec->base + HDMI_CEC_MASK);
  10303. + writeb(~0, cec->base + HDMI_IH_MUTE_CEC_STAT0);
  10304. + writeb(0, cec->base + HDMI_CEC_POLARITY);
  10305. +
  10306. + free_irq(cec->irq, cec);
  10307. +
  10308. + cec->ops->disable(cec->ops_data);
  10309. +}
  10310. +
  10311. +static int dw_hdmi_cec_open(struct cec_dev *cec_dev)
  10312. +{
  10313. + struct dw_hdmi_cec *cec = container_of(cec_dev, struct dw_hdmi_cec, cec);
  10314. + unsigned irqs;
  10315. + int ret;
  10316. +
  10317. + writeb(0, cec->base + HDMI_CEC_CTRL);
  10318. + writeb(~0, cec->base + HDMI_IH_CEC_STAT0);
  10319. + writeb(0, cec->base + HDMI_CEC_LOCK);
  10320. +
  10321. + ret = request_irq(cec->irq, dw_hdmi_cec_irq, IRQF_SHARED,
  10322. + DEV_NAME, cec);
  10323. + if (ret < 0)
  10324. + return ret;
  10325. +
  10326. + dw_hdmi_set_address(cec_dev, cec_dev->addresses);
  10327. +
  10328. + cec->ops->enable(cec->ops_data);
  10329. +
  10330. + irqs = CEC_STAT_ERROR_INIT | CEC_STAT_NACK | CEC_STAT_EOM |
  10331. + CEC_STAT_DONE;
  10332. + writeb(irqs, cec->base + HDMI_CEC_POLARITY);
  10333. + writeb(~irqs, cec->base + HDMI_CEC_MASK);
  10334. + writeb(~irqs, cec->base + HDMI_IH_MUTE_CEC_STAT0);
  10335. +
  10336. + return 0;
  10337. +}
  10338. +
  10339. +static int dw_hdmi_cec_probe(struct platform_device *pdev)
  10340. +{
  10341. + struct dw_hdmi_cec_data *data = dev_get_platdata(&pdev->dev);
  10342. + struct dw_hdmi_cec *cec;
  10343. +
  10344. + if (!data)
  10345. + return -ENXIO;
  10346. +
  10347. + cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
  10348. + if (!cec)
  10349. + return -ENOMEM;
  10350. +
  10351. + cec->dev = &pdev->dev;
  10352. + cec->base = data->base;
  10353. + cec->irq = data->irq;
  10354. + cec->ops = data->ops;
  10355. + cec->ops_data = data->ops_data;
  10356. + cec->cec.open = dw_hdmi_cec_open;
  10357. + cec->cec.release = dw_hdmi_cec_release;
  10358. + cec->cec.send_message = dw_hdmi_send_message;
  10359. + cec->cec.set_address = dw_hdmi_set_address;
  10360. +
  10361. + cec_dev_init(&cec->cec, THIS_MODULE);
  10362. +
  10363. + /* FIXME: soft-reset the CEC interface */
  10364. +
  10365. + dw_hdmi_set_address(&cec->cec, cec->cec.addresses);
  10366. + writeb(0, cec->base + HDMI_CEC_TX_CNT);
  10367. + writeb(~0, cec->base + HDMI_CEC_MASK);
  10368. + writeb(~0, cec->base + HDMI_IH_MUTE_CEC_STAT0);
  10369. + writeb(0, cec->base + HDMI_CEC_POLARITY);
  10370. +
  10371. + /*
  10372. + * Our device is just a convenience - we want to link to the real
  10373. + * hardware device here, so that userspace can see the association
  10374. + * between the HDMI hardware and its associated CEC chardev.
  10375. + */
  10376. + return cec_dev_add(&cec->cec, cec->dev->parent, DEV_NAME);
  10377. +}
  10378. +
  10379. +static int dw_hdmi_cec_remove(struct platform_device *pdev)
  10380. +{
  10381. + struct dw_hdmi_cec *cec = platform_get_drvdata(pdev);
  10382. +
  10383. + cec_dev_remove(&cec->cec);
  10384. +
  10385. + return 0;
  10386. +}
  10387. +
  10388. +static struct platform_driver dw_hdmi_cec_driver = {
  10389. + .probe = dw_hdmi_cec_probe,
  10390. + .remove = dw_hdmi_cec_remove,
  10391. + .driver = {
  10392. + .name = "dw-hdmi-cec",
  10393. + .owner = THIS_MODULE,
  10394. + },
  10395. +};
  10396. +module_platform_driver(dw_hdmi_cec_driver);
  10397. +
  10398. +MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
  10399. +MODULE_DESCRIPTION("Synopsis Designware HDMI CEC driver for i.MX");
  10400. +MODULE_LICENSE("GPL");
  10401. +MODULE_ALIAS(PLATFORM_MODULE_PREFIX "dw-hdmi-cec");
  10402. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/dw-hdmi-cec.h linux-3.15-rc1/drivers/staging/imx-drm/dw-hdmi-cec.h
  10403. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/dw-hdmi-cec.h 1970-01-01 01:00:00.000000000 +0100
  10404. +++ linux-3.15-rc1/drivers/staging/imx-drm/dw-hdmi-cec.h 2014-04-25 14:11:13.591375395 +0200
  10405. @@ -0,0 +1,16 @@
  10406. +#ifndef DW_HDMI_CEC_H
  10407. +#define DW_HDMI_CEC_H
  10408. +
  10409. +struct dw_hdmi_cec_ops {
  10410. + void (*enable)(void *);
  10411. + void (*disable)(void *);
  10412. +};
  10413. +
  10414. +struct dw_hdmi_cec_data {
  10415. + void __iomem *base;
  10416. + int irq;
  10417. + const struct dw_hdmi_cec_ops *ops;
  10418. + void *ops_data;
  10419. +};
  10420. +
  10421. +#endif
  10422. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/imx-drm-core.c linux-3.15-rc1/drivers/staging/imx-drm/imx-drm-core.c
  10423. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/imx-drm-core.c 2014-04-13 23:18:35.000000000 +0200
  10424. +++ linux-3.15-rc1/drivers/staging/imx-drm/imx-drm-core.c 2014-04-25 14:11:13.591375395 +0200
  10425. @@ -517,7 +517,7 @@
  10426. of_node_put(port);
  10427. if (port == imx_crtc->port) {
  10428. ret = of_graph_parse_endpoint(ep, &endpoint);
  10429. - return ret ? ret : endpoint.id;
  10430. + return ret ? ret : endpoint.port;
  10431. }
  10432. } while (ep);
  10433. @@ -675,6 +675,11 @@
  10434. if (!remote || !of_device_is_available(remote)) {
  10435. of_node_put(remote);
  10436. continue;
  10437. + } else if (!of_device_is_available(remote->parent)) {
  10438. + dev_warn(&pdev->dev, "parent device of %s is not available\n",
  10439. + remote->full_name);
  10440. + of_node_put(remote);
  10441. + continue;
  10442. }
  10443. ret = imx_drm_add_component(&pdev->dev, remote);
  10444. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/imx-hdmi.c linux-3.15-rc1/drivers/staging/imx-drm/imx-hdmi.c
  10445. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/imx-hdmi.c 2014-04-13 23:18:35.000000000 +0200
  10446. +++ linux-3.15-rc1/drivers/staging/imx-drm/imx-hdmi.c 2014-04-25 14:11:13.591375395 +0200
  10447. @@ -28,6 +28,9 @@
  10448. #include <drm/drm_edid.h>
  10449. #include <drm/drm_encoder_slave.h>
  10450. +#include "drm-ddc-connector.h"
  10451. +#include "dw-hdmi-audio.h"
  10452. +#include "dw-hdmi-cec.h"
  10453. #include "ipu-v3/imx-ipu-v3.h"
  10454. #include "imx-hdmi.h"
  10455. #include "imx-drm.h"
  10456. @@ -112,27 +115,27 @@
  10457. };
  10458. struct imx_hdmi {
  10459. - struct drm_connector connector;
  10460. + struct drm_ddc_connector *ddc_conn;
  10461. struct drm_encoder encoder;
  10462. + struct platform_device *audio;
  10463. + struct platform_device *cec;
  10464. enum imx_hdmi_devtype dev_type;
  10465. struct device *dev;
  10466. struct clk *isfr_clk;
  10467. struct clk *iahb_clk;
  10468. - enum drm_connector_status connector_status;
  10469. -
  10470. struct hdmi_data_info hdmi_data;
  10471. int vic;
  10472. u8 edid[HDMI_EDID_LEN];
  10473. + u8 mc_clkdis;
  10474. bool cable_plugin;
  10475. bool phy_enabled;
  10476. struct drm_display_mode previous_mode;
  10477. struct regmap *regmap;
  10478. - struct i2c_adapter *ddc;
  10479. void __iomem *regs;
  10480. unsigned int sample_rate;
  10481. @@ -362,6 +365,12 @@
  10482. hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock);
  10483. }
  10484. +static void imx_hdmi_set_sample_rate(struct imx_hdmi *hdmi, unsigned rate)
  10485. +{
  10486. + hdmi->sample_rate = rate;
  10487. + hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock);
  10488. +}
  10489. +
  10490. /*
  10491. * this submodule is responsible for the video data synchronization.
  10492. * for example, for RGB 4:4:4 input, the data map is defined as
  10493. @@ -1148,8 +1157,6 @@
  10494. /* HDMI Initialization Step B.4 */
  10495. static void imx_hdmi_enable_video_path(struct imx_hdmi *hdmi)
  10496. {
  10497. - u8 clkdis;
  10498. -
  10499. /* control period minimum duration */
  10500. hdmi_writeb(hdmi, 12, HDMI_FC_CTRLDUR);
  10501. hdmi_writeb(hdmi, 32, HDMI_FC_EXCTRLDUR);
  10502. @@ -1161,23 +1168,28 @@
  10503. hdmi_writeb(hdmi, 0x21, HDMI_FC_CH2PREAM);
  10504. /* Enable pixel clock and tmds data path */
  10505. - clkdis = 0x7F;
  10506. - clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
  10507. - hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
  10508. + hdmi->mc_clkdis |= HDMI_MC_CLKDIS_HDCPCLK_DISABLE |
  10509. + HDMI_MC_CLKDIS_CSCCLK_DISABLE |
  10510. + HDMI_MC_CLKDIS_AUDCLK_DISABLE |
  10511. + HDMI_MC_CLKDIS_PREPCLK_DISABLE |
  10512. + HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
  10513. + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
  10514. + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
  10515. - clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
  10516. - hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
  10517. + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
  10518. + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
  10519. /* Enable csc path */
  10520. if (is_color_space_conversion(hdmi)) {
  10521. - clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
  10522. - hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
  10523. + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
  10524. + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
  10525. }
  10526. }
  10527. static void hdmi_enable_audio_clk(struct imx_hdmi *hdmi)
  10528. {
  10529. - hdmi_modb(hdmi, 0, HDMI_MC_CLKDIS_AUDCLK_DISABLE, HDMI_MC_CLKDIS);
  10530. + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE;
  10531. + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
  10532. }
  10533. /* Workaround to clear the overflow condition */
  10534. @@ -1380,41 +1392,16 @@
  10535. static enum drm_connector_status imx_hdmi_connector_detect(struct drm_connector
  10536. *connector, bool force)
  10537. {
  10538. - struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
  10539. - connector);
  10540. - return hdmi->connector_status;
  10541. -}
  10542. -
  10543. -static int imx_hdmi_connector_get_modes(struct drm_connector *connector)
  10544. -{
  10545. - struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
  10546. - connector);
  10547. - struct edid *edid;
  10548. - int ret;
  10549. + struct imx_hdmi *hdmi = drm_ddc_private(connector);
  10550. - if (!hdmi->ddc)
  10551. - return 0;
  10552. -
  10553. - edid = drm_get_edid(connector, hdmi->ddc);
  10554. - if (edid) {
  10555. - dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n",
  10556. - edid->width_cm, edid->height_cm);
  10557. -
  10558. - drm_mode_connector_update_edid_property(connector, edid);
  10559. - ret = drm_add_edid_modes(connector, edid);
  10560. - kfree(edid);
  10561. - } else {
  10562. - dev_dbg(hdmi->dev, "failed to get edid\n");
  10563. - }
  10564. -
  10565. - return 0;
  10566. + return hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_HPD ?
  10567. + connector_status_connected : connector_status_disconnected;
  10568. }
  10569. static struct drm_encoder *imx_hdmi_connector_best_encoder(struct drm_connector
  10570. *connector)
  10571. {
  10572. - struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
  10573. - connector);
  10574. + struct imx_hdmi *hdmi = drm_ddc_private(connector);
  10575. return &hdmi->encoder;
  10576. }
  10577. @@ -1483,15 +1470,8 @@
  10578. .disable = imx_hdmi_encoder_disable,
  10579. };
  10580. -static struct drm_connector_funcs imx_hdmi_connector_funcs = {
  10581. - .dpms = drm_helper_connector_dpms,
  10582. - .fill_modes = drm_helper_probe_single_connector_modes,
  10583. - .detect = imx_hdmi_connector_detect,
  10584. - .destroy = imx_drm_connector_destroy,
  10585. -};
  10586. -
  10587. static struct drm_connector_helper_funcs imx_hdmi_connector_helper_funcs = {
  10588. - .get_modes = imx_hdmi_connector_get_modes,
  10589. + .get_modes = drm_ddc_connector_get_modes,
  10590. .mode_valid = imx_drm_connector_mode_valid,
  10591. .best_encoder = imx_hdmi_connector_best_encoder,
  10592. };
  10593. @@ -1524,7 +1504,6 @@
  10594. hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0);
  10595. - hdmi->connector_status = connector_status_connected;
  10596. imx_hdmi_poweron(hdmi);
  10597. } else {
  10598. dev_dbg(hdmi->dev, "EVENT=plugout\n");
  10599. @@ -1532,10 +1511,9 @@
  10600. hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD,
  10601. HDMI_PHY_POL0);
  10602. - hdmi->connector_status = connector_status_disconnected;
  10603. imx_hdmi_poweroff(hdmi);
  10604. }
  10605. - drm_helper_hpd_irq_event(hdmi->connector.dev);
  10606. + drm_helper_hpd_irq_event(hdmi->ddc_conn->connector.dev);
  10607. }
  10608. hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
  10609. @@ -1553,24 +1531,42 @@
  10610. if (ret)
  10611. return ret;
  10612. - hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
  10613. + hdmi->ddc_conn->connector.polled = DRM_CONNECTOR_POLL_HPD;
  10614. drm_encoder_helper_add(&hdmi->encoder, &imx_hdmi_encoder_helper_funcs);
  10615. drm_encoder_init(drm, &hdmi->encoder, &imx_hdmi_encoder_funcs,
  10616. DRM_MODE_ENCODER_TMDS);
  10617. - drm_connector_helper_add(&hdmi->connector,
  10618. + drm_connector_helper_add(&hdmi->ddc_conn->connector,
  10619. &imx_hdmi_connector_helper_funcs);
  10620. - drm_connector_init(drm, &hdmi->connector, &imx_hdmi_connector_funcs,
  10621. - DRM_MODE_CONNECTOR_HDMIA);
  10622. -
  10623. - hdmi->connector.encoder = &hdmi->encoder;
  10624. + drm_ddc_connector_add(drm, hdmi->ddc_conn, DRM_MODE_CONNECTOR_HDMIA);
  10625. - drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
  10626. + drm_mode_connector_attach_encoder(&hdmi->ddc_conn->connector, &hdmi->encoder);
  10627. return 0;
  10628. }
  10629. +static void imx_hdmi_cec_enable(void *data)
  10630. +{
  10631. + struct imx_hdmi *hdmi = data;
  10632. +
  10633. + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
  10634. + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
  10635. +}
  10636. +
  10637. +static void imx_hdmi_cec_disable(void *data)
  10638. +{
  10639. + struct imx_hdmi *hdmi = data;
  10640. +
  10641. + hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CECCLK_DISABLE;
  10642. + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
  10643. +}
  10644. +
  10645. +static const struct dw_hdmi_cec_ops imx_hdmi_cec_ops = {
  10646. + .enable = imx_hdmi_cec_enable,
  10647. + .disable = imx_hdmi_cec_disable,
  10648. +};
  10649. +
  10650. static struct platform_device_id imx_hdmi_devtype[] = {
  10651. {
  10652. .name = "imx6q-hdmi",
  10653. @@ -1592,11 +1588,13 @@
  10654. static int imx_hdmi_bind(struct device *dev, struct device *master, void *data)
  10655. {
  10656. struct platform_device *pdev = to_platform_device(dev);
  10657. + struct platform_device_info pdevinfo;
  10658. const struct of_device_id *of_id =
  10659. of_match_device(imx_hdmi_dt_ids, dev);
  10660. struct drm_device *drm = data;
  10661. struct device_node *np = dev->of_node;
  10662. - struct device_node *ddc_node;
  10663. + struct dw_hdmi_audio_data audio;
  10664. + struct dw_hdmi_cec_data cec;
  10665. struct imx_hdmi *hdmi;
  10666. struct resource *iores;
  10667. int ret, irq;
  10668. @@ -1605,27 +1603,22 @@
  10669. if (!hdmi)
  10670. return -ENOMEM;
  10671. + hdmi->ddc_conn = drm_ddc_connector_create(drm, np, hdmi);
  10672. + if (IS_ERR(hdmi->ddc_conn))
  10673. + return PTR_ERR(hdmi->ddc_conn);
  10674. +
  10675. + hdmi->ddc_conn->detect = imx_hdmi_connector_detect;
  10676. +
  10677. hdmi->dev = dev;
  10678. - hdmi->connector_status = connector_status_disconnected;
  10679. hdmi->sample_rate = 48000;
  10680. hdmi->ratio = 100;
  10681. + hdmi->mc_clkdis = 0x7f;
  10682. if (of_id) {
  10683. const struct platform_device_id *device_id = of_id->data;
  10684. hdmi->dev_type = device_id->driver_data;
  10685. }
  10686. - ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
  10687. - if (ddc_node) {
  10688. - hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
  10689. - if (!hdmi->ddc)
  10690. - dev_dbg(hdmi->dev, "failed to read ddc node\n");
  10691. -
  10692. - of_node_put(ddc_node);
  10693. - } else {
  10694. - dev_dbg(hdmi->dev, "no ddc property found\n");
  10695. - }
  10696. -
  10697. irq = platform_get_irq(pdev, 0);
  10698. if (irq < 0)
  10699. return -EINVAL;
  10700. @@ -1711,6 +1704,35 @@
  10701. /* Unmute interrupts */
  10702. hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
  10703. + memset(&pdevinfo, 0, sizeof(pdevinfo));
  10704. + pdevinfo.parent = dev;
  10705. + pdevinfo.id = PLATFORM_DEVID_AUTO;
  10706. +
  10707. + audio.phys = iores->start;
  10708. + audio.base = hdmi->regs;
  10709. + audio.irq = irq;
  10710. + audio.hdmi = hdmi;
  10711. + audio.eld = hdmi->ddc_conn->connector.eld;
  10712. + audio.set_sample_rate = imx_hdmi_set_sample_rate;
  10713. +
  10714. + pdevinfo.name = "dw-hdmi-audio";
  10715. + pdevinfo.data = &audio;
  10716. + pdevinfo.size_data = sizeof(audio);
  10717. + pdevinfo.dma_mask = DMA_BIT_MASK(32);
  10718. + hdmi->audio = platform_device_register_full(&pdevinfo);
  10719. +
  10720. + cec.base = hdmi->regs;
  10721. + cec.irq = irq;
  10722. + cec.ops = &imx_hdmi_cec_ops;
  10723. + cec.ops_data = hdmi;
  10724. +
  10725. + pdevinfo.name = "dw-hdmi-cec";
  10726. + pdevinfo.data = &cec;
  10727. + pdevinfo.size_data = sizeof(cec);
  10728. + pdevinfo.dma_mask = 0;
  10729. +
  10730. + hdmi->cec = platform_device_register_full(&pdevinfo);
  10731. +
  10732. dev_set_drvdata(dev, hdmi);
  10733. return 0;
  10734. @@ -1728,15 +1750,19 @@
  10735. {
  10736. struct imx_hdmi *hdmi = dev_get_drvdata(dev);
  10737. + if (!IS_ERR(hdmi->audio))
  10738. + platform_device_unregister(hdmi->audio);
  10739. + if (!IS_ERR(hdmi->cec))
  10740. + platform_device_unregister(hdmi->cec);
  10741. +
  10742. /* Disable all interrupts */
  10743. hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
  10744. - hdmi->connector.funcs->destroy(&hdmi->connector);
  10745. + hdmi->ddc_conn->connector.funcs->destroy(&hdmi->ddc_conn->connector);
  10746. hdmi->encoder.funcs->destroy(&hdmi->encoder);
  10747. clk_disable_unprepare(hdmi->iahb_clk);
  10748. clk_disable_unprepare(hdmi->isfr_clk);
  10749. - i2c_put_adapter(hdmi->ddc);
  10750. }
  10751. static const struct component_ops hdmi_ops = {
  10752. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/imx-ldb.c linux-3.15-rc1/drivers/staging/imx-drm/imx-ldb.c
  10753. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/imx-ldb.c 2014-04-13 23:18:35.000000000 +0200
  10754. +++ linux-3.15-rc1/drivers/staging/imx-drm/imx-ldb.c 2014-04-25 14:11:13.595375411 +0200
  10755. @@ -24,6 +24,7 @@
  10756. #include <drm/drmP.h>
  10757. #include <drm/drm_fb_helper.h>
  10758. #include <drm/drm_crtc_helper.h>
  10759. +#include <drm/drm_panel.h>
  10760. #include <linux/mfd/syscon.h>
  10761. #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  10762. #include <linux/of_address.h>
  10763. @@ -60,6 +61,7 @@
  10764. struct imx_ldb *ldb;
  10765. struct drm_connector connector;
  10766. struct drm_encoder encoder;
  10767. + struct drm_panel *panel;
  10768. struct device_node *child;
  10769. int chno;
  10770. void *edid;
  10771. @@ -96,6 +98,13 @@
  10772. struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
  10773. int num_modes = 0;
  10774. + if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs &&
  10775. + imx_ldb_ch->panel->funcs->get_modes) {
  10776. + num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel);
  10777. + if (num_modes > 0)
  10778. + return num_modes;
  10779. + }
  10780. +
  10781. if (imx_ldb_ch->edid) {
  10782. drm_mode_connector_update_edid_property(connector,
  10783. imx_ldb_ch->edid);
  10784. @@ -243,6 +252,8 @@
  10785. }
  10786. regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
  10787. +
  10788. + drm_panel_enable(imx_ldb_ch->panel);
  10789. }
  10790. static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
  10791. @@ -294,6 +305,8 @@
  10792. (ldb->ldb_ctrl & LDB_CH1_MODE_EN_MASK) == 0)
  10793. return;
  10794. + drm_panel_disable(imx_ldb_ch->panel);
  10795. +
  10796. if (imx_ldb_ch == &ldb->channel[0])
  10797. ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
  10798. else if (imx_ldb_ch == &ldb->channel[1])
  10799. @@ -379,6 +392,9 @@
  10800. drm_connector_init(drm, &imx_ldb_ch->connector,
  10801. &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
  10802. + if (imx_ldb_ch->panel)
  10803. + drm_panel_attach(imx_ldb_ch->panel, &imx_ldb_ch->connector);
  10804. +
  10805. drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
  10806. &imx_ldb_ch->encoder);
  10807. @@ -493,6 +509,7 @@
  10808. for_each_child_of_node(np, child) {
  10809. struct imx_ldb_channel *channel;
  10810. + struct device_node *panel_node;
  10811. ret = of_property_read_u32(child, "reg", &i);
  10812. if (ret || i < 0 || i > 1)
  10813. @@ -556,6 +573,10 @@
  10814. return -EINVAL;
  10815. }
  10816. + panel_node = of_parse_phandle(child, "fsl,panel", 0);
  10817. + if (panel_node)
  10818. + channel->panel = of_drm_find_panel(panel_node);
  10819. +
  10820. ret = imx_ldb_register(drm, channel);
  10821. if (ret)
  10822. return ret;
  10823. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/imx-tve.c linux-3.15-rc1/drivers/staging/imx-drm/imx-tve.c
  10824. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/imx-tve.c 2014-04-13 23:18:35.000000000 +0200
  10825. +++ linux-3.15-rc1/drivers/staging/imx-drm/imx-tve.c 2014-04-25 14:11:13.595375411 +0200
  10826. @@ -22,7 +22,6 @@
  10827. #include <linux/clk-provider.h>
  10828. #include <linux/component.h>
  10829. #include <linux/module.h>
  10830. -#include <linux/i2c.h>
  10831. #include <linux/regmap.h>
  10832. #include <linux/regulator/consumer.h>
  10833. #include <linux/spinlock.h>
  10834. @@ -31,6 +30,7 @@
  10835. #include <drm/drm_fb_helper.h>
  10836. #include <drm/drm_crtc_helper.h>
  10837. +#include "drm-ddc-connector.h"
  10838. #include "ipu-v3/imx-ipu-v3.h"
  10839. #include "imx-drm.h"
  10840. @@ -111,7 +111,7 @@
  10841. };
  10842. struct imx_tve {
  10843. - struct drm_connector connector;
  10844. + struct drm_ddc_connector *ddc_conn;
  10845. struct drm_encoder encoder;
  10846. struct device *dev;
  10847. spinlock_t lock; /* register lock */
  10848. @@ -120,7 +120,6 @@
  10849. struct regmap *regmap;
  10850. struct regulator *dac_reg;
  10851. - struct i2c_adapter *ddc;
  10852. struct clk *clk;
  10853. struct clk *di_sel_clk;
  10854. struct clk_hw clk_hw_di;
  10855. @@ -219,35 +218,10 @@
  10856. return 0;
  10857. }
  10858. -static enum drm_connector_status imx_tve_connector_detect(
  10859. - struct drm_connector *connector, bool force)
  10860. -{
  10861. - return connector_status_connected;
  10862. -}
  10863. -
  10864. -static int imx_tve_connector_get_modes(struct drm_connector *connector)
  10865. -{
  10866. - struct imx_tve *tve = con_to_tve(connector);
  10867. - struct edid *edid;
  10868. - int ret = 0;
  10869. -
  10870. - if (!tve->ddc)
  10871. - return 0;
  10872. -
  10873. - edid = drm_get_edid(connector, tve->ddc);
  10874. - if (edid) {
  10875. - drm_mode_connector_update_edid_property(connector, edid);
  10876. - ret = drm_add_edid_modes(connector, edid);
  10877. - kfree(edid);
  10878. - }
  10879. -
  10880. - return ret;
  10881. -}
  10882. -
  10883. static int imx_tve_connector_mode_valid(struct drm_connector *connector,
  10884. struct drm_display_mode *mode)
  10885. {
  10886. - struct imx_tve *tve = con_to_tve(connector);
  10887. + struct imx_tve *tve = to_ddc_conn(connector)->private;
  10888. unsigned long rate;
  10889. int ret;
  10890. @@ -274,7 +248,7 @@
  10891. static struct drm_encoder *imx_tve_connector_best_encoder(
  10892. struct drm_connector *connector)
  10893. {
  10894. - struct imx_tve *tve = con_to_tve(connector);
  10895. + struct imx_tve *tve = drm_ddc_private(connector);
  10896. return &tve->encoder;
  10897. }
  10898. @@ -362,15 +336,8 @@
  10899. tve_disable(tve);
  10900. }
  10901. -static struct drm_connector_funcs imx_tve_connector_funcs = {
  10902. - .dpms = drm_helper_connector_dpms,
  10903. - .fill_modes = drm_helper_probe_single_connector_modes,
  10904. - .detect = imx_tve_connector_detect,
  10905. - .destroy = imx_drm_connector_destroy,
  10906. -};
  10907. -
  10908. static struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
  10909. - .get_modes = imx_tve_connector_get_modes,
  10910. + .get_modes = drm_ddc_connector_get_modes,
  10911. .best_encoder = imx_tve_connector_best_encoder,
  10912. .mode_valid = imx_tve_connector_mode_valid,
  10913. };
  10914. @@ -513,12 +480,11 @@
  10915. drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs,
  10916. encoder_type);
  10917. - drm_connector_helper_add(&tve->connector,
  10918. + drm_connector_helper_add(&tve->ddc_conn->connector,
  10919. &imx_tve_connector_helper_funcs);
  10920. - drm_connector_init(drm, &tve->connector, &imx_tve_connector_funcs,
  10921. - DRM_MODE_CONNECTOR_VGA);
  10922. + drm_ddc_connector_add(drm, tve->ddc_conn, DRM_MODE_CONNECTOR_VGA);
  10923. - drm_mode_connector_attach_encoder(&tve->connector, &tve->encoder);
  10924. + drm_mode_connector_attach_encoder(&tve->ddc_conn->connector, &tve->encoder);
  10925. return 0;
  10926. }
  10927. @@ -567,7 +533,6 @@
  10928. struct platform_device *pdev = to_platform_device(dev);
  10929. struct drm_device *drm = data;
  10930. struct device_node *np = dev->of_node;
  10931. - struct device_node *ddc_node;
  10932. struct imx_tve *tve;
  10933. struct resource *res;
  10934. void __iomem *base;
  10935. @@ -579,15 +544,13 @@
  10936. if (!tve)
  10937. return -ENOMEM;
  10938. + tve->ddc_conn = drm_ddc_connector_create(drm, np, tve);
  10939. + if (IS_ERR(tve->ddc_conn))
  10940. + return PTR_ERR(tve->ddc_conn);
  10941. +
  10942. tve->dev = dev;
  10943. spin_lock_init(&tve->lock);
  10944. - ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0);
  10945. - if (ddc_node) {
  10946. - tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
  10947. - of_node_put(ddc_node);
  10948. - }
  10949. -
  10950. tve->mode = of_get_tve_mode(np);
  10951. if (tve->mode != TVE_MODE_VGA) {
  10952. dev_err(dev, "only VGA mode supported, currently\n");
  10953. @@ -694,7 +657,7 @@
  10954. {
  10955. struct imx_tve *tve = dev_get_drvdata(dev);
  10956. - tve->connector.funcs->destroy(&tve->connector);
  10957. + tve->ddc_conn->connector.funcs->destroy(&tve->ddc_conn->connector);
  10958. tve->encoder.funcs->destroy(&tve->encoder);
  10959. if (!IS_ERR(tve->dac_reg))
  10960. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
  10961. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h 2014-04-13 23:18:35.000000000 +0200
  10962. +++ linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h 2014-04-25 14:11:13.595375411 +0200
  10963. @@ -76,6 +76,7 @@
  10964. IPU_IRQ_EOS = 192,
  10965. };
  10966. +int ipu_map_irq(struct ipu_soc *ipu, int irq);
  10967. int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
  10968. enum ipu_channel_irq irq);
  10969. @@ -114,8 +115,10 @@
  10970. void ipu_dc_put(struct ipu_dc *dc);
  10971. int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
  10972. u32 pixel_fmt, u32 width);
  10973. +void ipu_dc_enable(struct ipu_soc *ipu);
  10974. void ipu_dc_enable_channel(struct ipu_dc *dc);
  10975. void ipu_dc_disable_channel(struct ipu_dc *dc);
  10976. +void ipu_dc_disable(struct ipu_soc *ipu);
  10977. /*
  10978. * IPU Display Interface (di) functions
  10979. @@ -152,8 +155,10 @@
  10980. struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow);
  10981. void ipu_dp_put(struct ipu_dp *);
  10982. +int ipu_dp_enable(struct ipu_soc *ipu);
  10983. int ipu_dp_enable_channel(struct ipu_dp *dp);
  10984. void ipu_dp_disable_channel(struct ipu_dp *dp);
  10985. +void ipu_dp_disable(struct ipu_soc *ipu);
  10986. int ipu_dp_setup_channel(struct ipu_dp *dp,
  10987. enum ipu_color_space in, enum ipu_color_space out);
  10988. int ipu_dp_set_window_pos(struct ipu_dp *, u16 x_pos, u16 y_pos);
  10989. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/ipu-common.c linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/ipu-common.c
  10990. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/ipu-common.c 2014-04-13 23:18:35.000000000 +0200
  10991. +++ linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/ipu-common.c 2014-04-25 14:11:13.595375411 +0200
  10992. @@ -697,6 +697,12 @@
  10993. }
  10994. EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
  10995. +bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
  10996. +{
  10997. + return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
  10998. +}
  10999. +EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
  11000. +
  11001. int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
  11002. {
  11003. struct ipu_soc *ipu = channel->ipu;
  11004. @@ -714,6 +720,22 @@
  11005. }
  11006. EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
  11007. +int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms)
  11008. +{
  11009. + unsigned long timeout;
  11010. +
  11011. + timeout = jiffies + msecs_to_jiffies(ms);
  11012. + ipu_cm_write(ipu, BIT(irq % 32), IPU_INT_STAT(irq / 32));
  11013. + while (!(ipu_cm_read(ipu, IPU_INT_STAT(irq / 32) & BIT(irq % 32)))) {
  11014. + if (time_after(jiffies, timeout))
  11015. + return -ETIMEDOUT;
  11016. + cpu_relax();
  11017. + }
  11018. +
  11019. + return 0;
  11020. +}
  11021. +EXPORT_SYMBOL_GPL(ipu_wait_interrupt);
  11022. +
  11023. int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
  11024. {
  11025. struct ipu_soc *ipu = channel->ipu;
  11026. @@ -933,15 +955,22 @@
  11027. chained_irq_exit(chip, desc);
  11028. }
  11029. -int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
  11030. - enum ipu_channel_irq irq_type)
  11031. +int ipu_map_irq(struct ipu_soc *ipu, int irq)
  11032. {
  11033. - int irq = irq_linear_revmap(ipu->domain, irq_type + channel->num);
  11034. + int virq;
  11035. - if (!irq)
  11036. - irq = irq_create_mapping(ipu->domain, irq_type + channel->num);
  11037. + virq = irq_linear_revmap(ipu->domain, irq);
  11038. + if (!virq)
  11039. + virq = irq_create_mapping(ipu->domain, irq);
  11040. - return irq;
  11041. + return virq;
  11042. +}
  11043. +EXPORT_SYMBOL_GPL(ipu_map_irq);
  11044. +
  11045. +int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
  11046. + enum ipu_channel_irq irq_type)
  11047. +{
  11048. + return ipu_map_irq(ipu, irq_type + channel->num);
  11049. }
  11050. EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
  11051. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/ipu-dc.c linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
  11052. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/ipu-dc.c 2014-04-13 23:18:35.000000000 +0200
  11053. +++ linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/ipu-dc.c 2014-04-25 14:11:13.595375411 +0200
  11054. @@ -18,6 +18,7 @@
  11055. #include <linux/types.h>
  11056. #include <linux/errno.h>
  11057. #include <linux/delay.h>
  11058. +#include <linux/interrupt.h>
  11059. #include <linux/io.h>
  11060. #include "../imx-drm.h"
  11061. @@ -92,6 +93,7 @@
  11062. IPU_DC_MAP_GBR24, /* TVEv2 */
  11063. IPU_DC_MAP_BGR666,
  11064. IPU_DC_MAP_BGR24,
  11065. + IPU_DC_MAP_RGB666,
  11066. };
  11067. struct ipu_dc {
  11068. @@ -110,6 +112,9 @@
  11069. struct device *dev;
  11070. struct ipu_dc channels[IPU_DC_NUM_CHANNELS];
  11071. struct mutex mutex;
  11072. + struct completion comp;
  11073. + int dc_irq;
  11074. + int dp_irq;
  11075. };
  11076. static void dc_link_event(struct ipu_dc *dc, int event, int addr, int priority)
  11077. @@ -155,6 +160,8 @@
  11078. return IPU_DC_MAP_BGR666;
  11079. case V4L2_PIX_FMT_BGR24:
  11080. return IPU_DC_MAP_BGR24;
  11081. + case V4L2_PIX_FMT_RGB666:
  11082. + return IPU_DC_MAP_RGB666;
  11083. default:
  11084. return -EINVAL;
  11085. }
  11086. @@ -220,12 +227,16 @@
  11087. writel(0x0, dc->base + DC_WR_CH_ADDR);
  11088. writel(width, priv->dc_reg + DC_DISP_CONF2(dc->di));
  11089. - ipu_module_enable(priv->ipu, IPU_CONF_DC_EN);
  11090. -
  11091. return 0;
  11092. }
  11093. EXPORT_SYMBOL_GPL(ipu_dc_init_sync);
  11094. +void ipu_dc_enable(struct ipu_soc *ipu)
  11095. +{
  11096. + ipu_module_enable(ipu, IPU_CONF_DC_EN);
  11097. +}
  11098. +EXPORT_SYMBOL_GPL(ipu_dc_enable);
  11099. +
  11100. void ipu_dc_enable_channel(struct ipu_dc *dc)
  11101. {
  11102. int di;
  11103. @@ -239,41 +250,55 @@
  11104. }
  11105. EXPORT_SYMBOL_GPL(ipu_dc_enable_channel);
  11106. +static irqreturn_t dc_irq_handler(int irq, void *dev_id)
  11107. +{
  11108. + struct ipu_dc *dc = dev_id;
  11109. + u32 reg;
  11110. +
  11111. + reg = readl(dc->base + DC_WR_CH_CONF);
  11112. + reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
  11113. + writel(reg, dc->base + DC_WR_CH_CONF);
  11114. +
  11115. + /* The Freescale BSP kernel clears DIx_COUNTER_RELEASE here */
  11116. +
  11117. + complete(&dc->priv->comp);
  11118. + return IRQ_HANDLED;
  11119. +}
  11120. +
  11121. void ipu_dc_disable_channel(struct ipu_dc *dc)
  11122. {
  11123. struct ipu_dc_priv *priv = dc->priv;
  11124. + int irq, ret;
  11125. u32 val;
  11126. - int irq = 0, timeout = 50;
  11127. + /* TODO: Handle MEM_FG_SYNC differently from MEM_BG_SYNC */
  11128. if (dc->chno == 1)
  11129. - irq = IPU_IRQ_DC_FC_1;
  11130. + irq = priv->dc_irq;
  11131. else if (dc->chno == 5)
  11132. - irq = IPU_IRQ_DP_SF_END;
  11133. + irq = priv->dp_irq;
  11134. else
  11135. return;
  11136. - /* should wait for the interrupt here */
  11137. - mdelay(50);
  11138. -
  11139. - if (dc->di == 0)
  11140. - val = 0x00000002;
  11141. - else
  11142. - val = 0x00000020;
  11143. -
  11144. - /* Wait for DC triple buffer to empty */
  11145. - while ((readl(priv->dc_reg + DC_STAT) & val) != val) {
  11146. - usleep_range(2000, 20000);
  11147. - timeout -= 2;
  11148. - if (timeout <= 0)
  11149. - break;
  11150. + init_completion(&priv->comp);
  11151. + enable_irq(irq);
  11152. + ret = wait_for_completion_timeout(&priv->comp, msecs_to_jiffies(50));
  11153. + disable_irq(irq);
  11154. + if (ret <= 0) {
  11155. + dev_warn(priv->dev, "DC stop timeout after 50 ms\n");
  11156. +
  11157. + val = readl(dc->base + DC_WR_CH_CONF);
  11158. + val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
  11159. + writel(val, dc->base + DC_WR_CH_CONF);
  11160. }
  11161. -
  11162. - val = readl(dc->base + DC_WR_CH_CONF);
  11163. - val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
  11164. - writel(val, dc->base + DC_WR_CH_CONF);
  11165. }
  11166. EXPORT_SYMBOL_GPL(ipu_dc_disable_channel);
  11167. +void ipu_dc_disable(struct ipu_soc *ipu)
  11168. +{
  11169. + ipu_module_disable(ipu, IPU_CONF_DC_EN);
  11170. +}
  11171. +EXPORT_SYMBOL_GPL(ipu_dc_disable);
  11172. +
  11173. static void ipu_dc_map_config(struct ipu_dc_priv *priv, enum ipu_dc_map map,
  11174. int byte_num, int offset, int mask)
  11175. {
  11176. @@ -340,7 +365,7 @@
  11177. struct ipu_dc_priv *priv;
  11178. static int channel_offsets[] = { 0, 0x1c, 0x38, 0x54, 0x58, 0x5c,
  11179. 0x78, 0, 0x94, 0xb4};
  11180. - int i;
  11181. + int i, ret;
  11182. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  11183. if (!priv)
  11184. @@ -361,6 +386,23 @@
  11185. priv->channels[i].base = priv->dc_reg + channel_offsets[i];
  11186. }
  11187. + priv->dc_irq = ipu_map_irq(ipu, IPU_IRQ_DC_FC_1);
  11188. + if (!priv->dc_irq)
  11189. + return -EINVAL;
  11190. + ret = devm_request_irq(dev, priv->dc_irq, dc_irq_handler, 0, NULL,
  11191. + &priv->channels[1]);
  11192. + if (ret < 0)
  11193. + return ret;
  11194. + disable_irq(priv->dc_irq);
  11195. + priv->dp_irq = ipu_map_irq(ipu, IPU_IRQ_DP_SF_END);
  11196. + if (!priv->dp_irq)
  11197. + return -EINVAL;
  11198. + ret = devm_request_irq(dev, priv->dp_irq, dc_irq_handler, 0, NULL,
  11199. + &priv->channels[5]);
  11200. + if (ret < 0)
  11201. + return ret;
  11202. + disable_irq(priv->dp_irq);
  11203. +
  11204. writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(1) |
  11205. DC_WR_CH_CONF_PROG_DI_ID,
  11206. priv->channels[1].base + DC_WR_CH_CONF);
  11207. @@ -404,6 +446,12 @@
  11208. ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 1, 15, 0xff); /* green */
  11209. ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 0, 23, 0xff); /* blue */
  11210. + /* rgb666 */
  11211. + ipu_dc_map_clear(priv, IPU_DC_MAP_RGB666);
  11212. + ipu_dc_map_config(priv, IPU_DC_MAP_RGB666, 0, 5, 0xfc); /* blue */
  11213. + ipu_dc_map_config(priv, IPU_DC_MAP_RGB666, 1, 11, 0xfc); /* green */
  11214. + ipu_dc_map_config(priv, IPU_DC_MAP_RGB666, 2, 17, 0xfc); /* red */
  11215. +
  11216. return 0;
  11217. }
  11218. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/ipu-di.c linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/ipu-di.c
  11219. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/ipu-di.c 2014-04-13 23:18:35.000000000 +0200
  11220. +++ linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/ipu-di.c 2014-04-25 14:11:13.595375411 +0200
  11221. @@ -595,7 +595,7 @@
  11222. }
  11223. }
  11224. - if (!sig->clk_pol)
  11225. + if (sig->clk_pol)
  11226. di_gen |= DI_GEN_POLARITY_DISP_CLK;
  11227. ipu_di_write(di, di_gen, DI_GENERAL);
  11228. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
  11229. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c 2014-04-13 23:18:35.000000000 +0200
  11230. +++ linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c 2014-04-25 14:11:13.595375411 +0200
  11231. @@ -28,7 +28,12 @@
  11232. #define DMFC_GENERAL1 0x0014
  11233. #define DMFC_GENERAL2 0x0018
  11234. #define DMFC_IC_CTRL 0x001c
  11235. -#define DMFC_STAT 0x0020
  11236. +#define DMFC_WR_CHAN_ALT 0x0020
  11237. +#define DMFC_WR_CHAN_DEF_ALT 0x0024
  11238. +#define DMFC_DP_CHAN_ALT 0x0028
  11239. +#define DMFC_DP_CHAN_DEF_ALT 0x002c
  11240. +#define DMFC_GENERAL1_ALT 0x0030
  11241. +#define DMFC_STAT 0x0034
  11242. #define DMFC_WR_CHAN_1_28 0
  11243. #define DMFC_WR_CHAN_2_41 8
  11244. @@ -133,6 +138,20 @@
  11245. }
  11246. EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel);
  11247. +static void ipu_dmfc_wait_fifos(struct ipu_dmfc_priv *priv)
  11248. +{
  11249. + unsigned long timeout = jiffies + msecs_to_jiffies(1000);
  11250. +
  11251. + while ((readl(priv->base + DMFC_STAT) & 0x02fff000) != 0x02fff000) {
  11252. + if (time_after(jiffies, timeout)) {
  11253. + dev_warn(priv->dev,
  11254. + "Timeout waiting for DMFC FIFOs to clear\n");
  11255. + break;
  11256. + }
  11257. + cpu_relax();
  11258. + }
  11259. +}
  11260. +
  11261. void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
  11262. {
  11263. struct ipu_dmfc_priv *priv = dmfc->priv;
  11264. @@ -141,8 +160,10 @@
  11265. priv->use_count--;
  11266. - if (!priv->use_count)
  11267. + if (!priv->use_count) {
  11268. + ipu_dmfc_wait_fifos(priv);
  11269. ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN);
  11270. + }
  11271. if (priv->use_count < 0)
  11272. priv->use_count = 0;
  11273. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/ipu-dp.c linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/ipu-dp.c
  11274. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/ipu-dp.c 2014-04-13 23:18:35.000000000 +0200
  11275. +++ linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/ipu-dp.c 2014-04-25 14:11:13.595375411 +0200
  11276. @@ -215,10 +215,9 @@
  11277. }
  11278. EXPORT_SYMBOL_GPL(ipu_dp_setup_channel);
  11279. -int ipu_dp_enable_channel(struct ipu_dp *dp)
  11280. +int ipu_dp_enable(struct ipu_soc *ipu)
  11281. {
  11282. - struct ipu_flow *flow = to_flow(dp);
  11283. - struct ipu_dp_priv *priv = flow->priv;
  11284. + struct ipu_dp_priv *priv = ipu->dp_priv;
  11285. mutex_lock(&priv->mutex);
  11286. @@ -227,15 +226,28 @@
  11287. priv->use_count++;
  11288. - if (dp->foreground) {
  11289. - u32 reg;
  11290. + mutex_unlock(&priv->mutex);
  11291. +
  11292. + return 0;
  11293. +}
  11294. +EXPORT_SYMBOL_GPL(ipu_dp_enable);
  11295. +
  11296. +int ipu_dp_enable_channel(struct ipu_dp *dp)
  11297. +{
  11298. + struct ipu_flow *flow = to_flow(dp);
  11299. + struct ipu_dp_priv *priv = flow->priv;
  11300. + u32 reg;
  11301. +
  11302. + if (!dp->foreground)
  11303. + return 0;
  11304. +
  11305. + mutex_lock(&priv->mutex);
  11306. - reg = readl(flow->base + DP_COM_CONF);
  11307. - reg |= DP_COM_CONF_FG_EN;
  11308. - writel(reg, flow->base + DP_COM_CONF);
  11309. + reg = readl(flow->base + DP_COM_CONF);
  11310. + reg |= DP_COM_CONF_FG_EN;
  11311. + writel(reg, flow->base + DP_COM_CONF);
  11312. - ipu_srm_dp_sync_update(priv->ipu);
  11313. - }
  11314. + ipu_srm_dp_sync_update(priv->ipu);
  11315. mutex_unlock(&priv->mutex);
  11316. @@ -247,25 +259,38 @@
  11317. {
  11318. struct ipu_flow *flow = to_flow(dp);
  11319. struct ipu_dp_priv *priv = flow->priv;
  11320. + u32 reg, csc;
  11321. +
  11322. + if (!dp->foreground)
  11323. + return;
  11324. mutex_lock(&priv->mutex);
  11325. - priv->use_count--;
  11326. + reg = readl(flow->base + DP_COM_CONF);
  11327. + csc = reg & DP_COM_CONF_CSC_DEF_MASK;
  11328. + if (csc == DP_COM_CONF_CSC_DEF_FG)
  11329. + reg &= ~DP_COM_CONF_CSC_DEF_MASK;
  11330. +
  11331. + reg &= ~DP_COM_CONF_FG_EN;
  11332. + writel(reg, flow->base + DP_COM_CONF);
  11333. +
  11334. + writel(0, flow->base + DP_FG_POS);
  11335. + ipu_srm_dp_sync_update(priv->ipu);
  11336. +
  11337. + if (ipu_idmac_channel_busy(priv->ipu, IPUV3_CHANNEL_MEM_BG_SYNC))
  11338. + ipu_wait_interrupt(priv->ipu, IPU_IRQ_DP_SF_END, 50);
  11339. +
  11340. + mutex_unlock(&priv->mutex);
  11341. +}
  11342. +EXPORT_SYMBOL_GPL(ipu_dp_disable_channel);
  11343. - if (dp->foreground) {
  11344. - u32 reg, csc;
  11345. +void ipu_dp_disable(struct ipu_soc *ipu)
  11346. +{
  11347. + struct ipu_dp_priv *priv = ipu->dp_priv;
  11348. - reg = readl(flow->base + DP_COM_CONF);
  11349. - csc = reg & DP_COM_CONF_CSC_DEF_MASK;
  11350. - if (csc == DP_COM_CONF_CSC_DEF_FG)
  11351. - reg &= ~DP_COM_CONF_CSC_DEF_MASK;
  11352. -
  11353. - reg &= ~DP_COM_CONF_FG_EN;
  11354. - writel(reg, flow->base + DP_COM_CONF);
  11355. -
  11356. - writel(0, flow->base + DP_FG_POS);
  11357. - ipu_srm_dp_sync_update(priv->ipu);
  11358. - }
  11359. + mutex_lock(&priv->mutex);
  11360. +
  11361. + priv->use_count--;
  11362. if (!priv->use_count)
  11363. ipu_module_disable(priv->ipu, IPU_CONF_DP_EN);
  11364. @@ -275,7 +300,7 @@
  11365. mutex_unlock(&priv->mutex);
  11366. }
  11367. -EXPORT_SYMBOL_GPL(ipu_dp_disable_channel);
  11368. +EXPORT_SYMBOL_GPL(ipu_dp_disable);
  11369. struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow)
  11370. {
  11371. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/ipu-prv.h linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/ipu-prv.h
  11372. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/ipu-v3/ipu-prv.h 2014-04-13 23:18:35.000000000 +0200
  11373. +++ linux-3.15-rc1/drivers/staging/imx-drm/ipu-v3/ipu-prv.h 2014-04-25 14:11:13.595375411 +0200
  11374. @@ -185,6 +185,9 @@
  11375. int ipu_module_enable(struct ipu_soc *ipu, u32 mask);
  11376. int ipu_module_disable(struct ipu_soc *ipu, u32 mask);
  11377. +bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno);
  11378. +int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms);
  11379. +
  11380. int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
  11381. unsigned long base, u32 module, struct clk *ipu_clk);
  11382. void ipu_di_exit(struct ipu_soc *ipu, int id);
  11383. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/ipuv3-crtc.c linux-3.15-rc1/drivers/staging/imx-drm/ipuv3-crtc.c
  11384. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/ipuv3-crtc.c 2014-04-13 23:18:35.000000000 +0200
  11385. +++ linux-3.15-rc1/drivers/staging/imx-drm/ipuv3-crtc.c 2014-04-25 14:11:13.595375411 +0200
  11386. @@ -60,24 +60,32 @@
  11387. static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
  11388. {
  11389. + struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
  11390. +
  11391. if (ipu_crtc->enabled)
  11392. return;
  11393. - ipu_di_enable(ipu_crtc->di);
  11394. - ipu_dc_enable_channel(ipu_crtc->dc);
  11395. + ipu_dc_enable(ipu);
  11396. ipu_plane_enable(ipu_crtc->plane[0]);
  11397. + /* Start DC channel and DI after IDMAC */
  11398. + ipu_dc_enable_channel(ipu_crtc->dc);
  11399. + ipu_di_enable(ipu_crtc->di);
  11400. ipu_crtc->enabled = 1;
  11401. }
  11402. static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
  11403. {
  11404. + struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
  11405. +
  11406. if (!ipu_crtc->enabled)
  11407. return;
  11408. - ipu_plane_disable(ipu_crtc->plane[0]);
  11409. + /* Stop DC channel and DI before IDMAC */
  11410. ipu_dc_disable_channel(ipu_crtc->dc);
  11411. ipu_di_disable(ipu_crtc->di);
  11412. + ipu_plane_disable(ipu_crtc->plane[0]);
  11413. + ipu_dc_disable(ipu);
  11414. ipu_crtc->enabled = 0;
  11415. }
  11416. @@ -158,7 +166,7 @@
  11417. sig_cfg.Vsync_pol = 1;
  11418. sig_cfg.enable_pol = 1;
  11419. - sig_cfg.clk_pol = 1;
  11420. + sig_cfg.clk_pol = 0;
  11421. sig_cfg.width = mode->hdisplay;
  11422. sig_cfg.height = mode->vdisplay;
  11423. sig_cfg.pixel_fmt = out_pixel_fmt;
  11424. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/ipuv3-plane.c linux-3.15-rc1/drivers/staging/imx-drm/ipuv3-plane.c
  11425. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/ipuv3-plane.c 2014-04-13 23:18:35.000000000 +0200
  11426. +++ linux-3.15-rc1/drivers/staging/imx-drm/ipuv3-plane.c 2014-04-25 14:11:13.599375428 +0200
  11427. @@ -239,6 +239,8 @@
  11428. void ipu_plane_enable(struct ipu_plane *ipu_plane)
  11429. {
  11430. + if (ipu_plane->dp)
  11431. + ipu_dp_enable(ipu_plane->ipu);
  11432. ipu_dmfc_enable_channel(ipu_plane->dmfc);
  11433. ipu_idmac_enable_channel(ipu_plane->ipu_ch);
  11434. if (ipu_plane->dp)
  11435. @@ -257,6 +259,8 @@
  11436. ipu_dp_disable_channel(ipu_plane->dp);
  11437. ipu_idmac_disable_channel(ipu_plane->ipu_ch);
  11438. ipu_dmfc_disable_channel(ipu_plane->dmfc);
  11439. + if (ipu_plane->dp)
  11440. + ipu_dp_disable(ipu_plane->ipu);
  11441. }
  11442. static void ipu_plane_dpms(struct ipu_plane *ipu_plane, int mode)
  11443. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/Kconfig linux-3.15-rc1/drivers/staging/imx-drm/Kconfig
  11444. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/Kconfig 2014-04-13 23:18:35.000000000 +0200
  11445. +++ linux-3.15-rc1/drivers/staging/imx-drm/Kconfig 2014-04-25 14:11:13.587375378 +0200
  11446. @@ -35,6 +35,7 @@
  11447. config DRM_IMX_LDB
  11448. tristate "Support for LVDS displays"
  11449. depends on DRM_IMX && MFD_SYSCON
  11450. + select DRM_PANEL
  11451. help
  11452. Choose this to enable the internal LVDS Display Bridge (LDB)
  11453. found on i.MX53 and i.MX6 processors.
  11454. @@ -60,3 +61,20 @@
  11455. depends on DRM_IMX
  11456. help
  11457. Choose this if you want to use HDMI on i.MX6.
  11458. +
  11459. +config DRM_DW_HDMI_AUDIO
  11460. + tristate "Synopsis Designware Audio interface"
  11461. + depends on DRM_IMX_HDMI != n
  11462. + help
  11463. + Support the Audio interface which is part of the Synopsis
  11464. + Designware HDMI block. This is used in conjunction with
  11465. + the i.MX HDMI driver.
  11466. +
  11467. +config DRM_DW_HDMI_CEC
  11468. + tristate "Synopsis Designware CEC interface"
  11469. + depends on DRM_IMX_HDMI != n
  11470. + select HDMI_CEC_CORE
  11471. + help
  11472. + Support the CEC interface which is part of the Synposis
  11473. + Designware HDMI block. This is used in conjunction with
  11474. + the i.MX HDMI driver.
  11475. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/Makefile linux-3.15-rc1/drivers/staging/imx-drm/Makefile
  11476. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/Makefile 2014-04-13 23:18:35.000000000 +0200
  11477. +++ linux-3.15-rc1/drivers/staging/imx-drm/Makefile 2014-04-25 14:11:13.587375378 +0200
  11478. @@ -3,6 +3,7 @@
  11479. obj-$(CONFIG_DRM_IMX) += imxdrm.o
  11480. +obj-$(CONFIG_DRM_IMX) += drm-ddc-connector.o
  11481. obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
  11482. obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
  11483. obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
  11484. @@ -11,3 +12,5 @@
  11485. imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
  11486. obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
  11487. obj-$(CONFIG_DRM_IMX_HDMI) += imx-hdmi.o
  11488. +obj-$(CONFIG_DRM_DW_HDMI_AUDIO) += dw-hdmi-audio.o
  11489. +obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o
  11490. diff -Nur linux-3.15-rc1.orig/drivers/staging/imx-drm/parallel-display.c linux-3.15-rc1/drivers/staging/imx-drm/parallel-display.c
  11491. --- linux-3.15-rc1.orig/drivers/staging/imx-drm/parallel-display.c 2014-04-13 23:18:35.000000000 +0200
  11492. +++ linux-3.15-rc1/drivers/staging/imx-drm/parallel-display.c 2014-04-25 14:11:13.599375428 +0200
  11493. @@ -219,6 +219,8 @@
  11494. imxpd->interface_pix_fmt = V4L2_PIX_FMT_RGB565;
  11495. else if (!strcmp(fmt, "bgr666"))
  11496. imxpd->interface_pix_fmt = V4L2_PIX_FMT_BGR666;
  11497. + else if (!strcmp(fmt, "rgb666"))
  11498. + imxpd->interface_pix_fmt = V4L2_PIX_FMT_RGB666;
  11499. }
  11500. panel_node = of_parse_phandle(np, "fsl,panel", 0);
  11501. diff -Nur linux-3.15-rc1.orig/include/linux/cec-dev.h linux-3.15-rc1/include/linux/cec-dev.h
  11502. --- linux-3.15-rc1.orig/include/linux/cec-dev.h 1970-01-01 01:00:00.000000000 +0100
  11503. +++ linux-3.15-rc1/include/linux/cec-dev.h 2014-04-25 14:11:13.599375428 +0200
  11504. @@ -0,0 +1,69 @@
  11505. +#ifndef _LINUX_CEC_DEV_H
  11506. +#define _LINUX_CEC_DEV_H
  11507. +
  11508. +#include <linux/cdev.h>
  11509. +#include <linux/list.h>
  11510. +#include <linux/mutex.h>
  11511. +#include <linux/spinlock.h>
  11512. +#include <linux/wait.h>
  11513. +
  11514. +#include <uapi/linux/cec-dev.h>
  11515. +
  11516. +struct device;
  11517. +
  11518. +struct cec_dev {
  11519. + struct cdev cdev;
  11520. + dev_t devn;
  11521. +
  11522. + struct mutex mutex;
  11523. + unsigned users;
  11524. +
  11525. + spinlock_t lock;
  11526. + wait_queue_head_t waitq;
  11527. + struct list_head events;
  11528. + u8 write_busy;
  11529. +
  11530. + u8 retries;
  11531. + u16 addresses;
  11532. + u16 physical;
  11533. +
  11534. + int (*open)(struct cec_dev *);
  11535. + void (*release)(struct cec_dev *);
  11536. + void (*send_message)(struct cec_dev *, u8 *, size_t);
  11537. + void (*set_address)(struct cec_dev *, unsigned);
  11538. +};
  11539. +
  11540. +void cec_dev_event(struct cec_dev *cec_dev, int type, u8 *msg, size_t len);
  11541. +
  11542. +static inline void cec_dev_receive(struct cec_dev *cec_dev, u8 *msg,
  11543. + unsigned len)
  11544. +{
  11545. + cec_dev_event(cec_dev, MESSAGE_TYPE_RECEIVE_SUCCESS, msg, len);
  11546. +}
  11547. +
  11548. +static inline void cec_dev_send_complete(struct cec_dev *cec_dev, int ack)
  11549. +{
  11550. + cec_dev->retries = 0;
  11551. + cec_dev->write_busy = 0;
  11552. +
  11553. + cec_dev_event(cec_dev, ack ? MESSAGE_TYPE_SEND_SUCCESS :
  11554. + MESSAGE_TYPE_NOACK, NULL, 0);
  11555. +}
  11556. +
  11557. +static inline void cec_dev_disconnect(struct cec_dev *cec_dev)
  11558. +{
  11559. + cec_dev->physical = 0;
  11560. + cec_dev_event(cec_dev, MESSAGE_TYPE_DISCONNECTED, NULL, 0);
  11561. +}
  11562. +
  11563. +static inline void cec_dev_connect(struct cec_dev *cec_dev, u32 phys)
  11564. +{
  11565. + cec_dev->physical = phys;
  11566. + cec_dev_event(cec_dev, MESSAGE_TYPE_CONNECTED, NULL, 0);
  11567. +}
  11568. +
  11569. +void cec_dev_init(struct cec_dev *cec_dev, struct module *);
  11570. +int cec_dev_add(struct cec_dev *cec_dev, struct device *, const char *name);
  11571. +void cec_dev_remove(struct cec_dev *cec_dev);
  11572. +
  11573. +#endif
  11574. diff -Nur linux-3.15-rc1.orig/include/linux/mmc/host.h linux-3.15-rc1/include/linux/mmc/host.h
  11575. --- linux-3.15-rc1.orig/include/linux/mmc/host.h 2014-04-13 23:18:35.000000000 +0200
  11576. +++ linux-3.15-rc1/include/linux/mmc/host.h 2014-04-25 14:11:13.599375428 +0200
  11577. @@ -278,6 +278,7 @@
  11578. #define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
  11579. MMC_CAP2_PACKED_WR)
  11580. #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
  11581. +#define MMC_CAP2_SDIO_NOTHREAD (1 << 15)
  11582. mmc_pm_flag_t pm_caps; /* supported pm features */
  11583. @@ -293,6 +294,11 @@
  11584. unsigned long clkgate_delay;
  11585. #endif
  11586. + /* card specific properties to deal with power and reset */
  11587. + struct regulator *card_regulator; /* External VCC needed by the card */
  11588. + struct gpio_desc *card_reset_gpios[2]; /* External resets, active low */
  11589. + struct clk *card_clk; /* External clock needed by the card */
  11590. +
  11591. /* host specific block data */
  11592. unsigned int max_seg_size; /* see blk_queue_max_segment_size */
  11593. unsigned short max_segs; /* see blk_queue_max_segments */
  11594. @@ -391,6 +397,8 @@
  11595. wake_up_process(host->sdio_irq_thread);
  11596. }
  11597. +void sdio_run_irqs(struct mmc_host *host);
  11598. +
  11599. #ifdef CONFIG_REGULATOR
  11600. int mmc_regulator_get_ocrmask(struct regulator *supply);
  11601. int mmc_regulator_set_ocr(struct mmc_host *mmc,
  11602. diff -Nur linux-3.15-rc1.orig/include/linux/mmc/sdhci.h linux-3.15-rc1/include/linux/mmc/sdhci.h
  11603. --- linux-3.15-rc1.orig/include/linux/mmc/sdhci.h 2014-04-13 23:18:35.000000000 +0200
  11604. +++ linux-3.15-rc1/include/linux/mmc/sdhci.h 2014-04-25 14:11:13.599375428 +0200
  11605. @@ -57,12 +57,8 @@
  11606. #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
  11607. /* Controller reports inverted write-protect state */
  11608. #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
  11609. -/* Controller has nonstandard clock management */
  11610. -#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17)
  11611. /* Controller does not like fast PIO transfers */
  11612. #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
  11613. -/* Controller losing signal/interrupt enable states after reset */
  11614. -#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
  11615. /* Controller has to be forced to use block size of 2048 bytes */
  11616. #define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
  11617. /* Controller cannot do multi-block transfers */
  11618. @@ -147,6 +143,7 @@
  11619. bool runtime_suspended; /* Host is runtime suspended */
  11620. bool bus_on; /* Bus power prevents runtime suspend */
  11621. + bool preset_enabled; /* Preset is enabled */
  11622. struct mmc_request *mrq; /* Current request */
  11623. struct mmc_command *cmd; /* Current command */
  11624. @@ -164,8 +161,7 @@
  11625. dma_addr_t adma_addr; /* Mapped ADMA descr. table */
  11626. dma_addr_t align_addr; /* Mapped bounce buffer */
  11627. - struct tasklet_struct card_tasklet; /* Tasklet structures */
  11628. - struct tasklet_struct finish_tasklet;
  11629. + struct tasklet_struct finish_tasklet; /* Tasklet structures */
  11630. struct timer_list timer; /* Timer for timeouts */
  11631. @@ -177,6 +173,13 @@
  11632. unsigned int ocr_avail_mmc;
  11633. u32 ocr_mask; /* available voltages */
  11634. + unsigned timing; /* Current timing */
  11635. +
  11636. + u32 thread_isr;
  11637. +
  11638. + /* cached registers */
  11639. + u32 ier;
  11640. +
  11641. wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */
  11642. unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
  11643. diff -Nur linux-3.15-rc1.orig/include/uapi/linux/cec-dev.h linux-3.15-rc1/include/uapi/linux/cec-dev.h
  11644. --- linux-3.15-rc1.orig/include/uapi/linux/cec-dev.h 1970-01-01 01:00:00.000000000 +0100
  11645. +++ linux-3.15-rc1/include/uapi/linux/cec-dev.h 2014-04-25 14:11:13.599375428 +0200
  11646. @@ -0,0 +1,34 @@
  11647. +#ifndef _UAPI_LINUX_CEC_DEV_H
  11648. +#define _UAPI_LINUX_CEC_DEV_H
  11649. +
  11650. +#include <linux/ioctl.h>
  11651. +#include <linux/types.h>
  11652. +
  11653. +#define MAX_MESSAGE_LEN 16
  11654. +
  11655. +enum {
  11656. + HDMICEC_IOC_MAGIC = 'H',
  11657. + /* This is wrong: we pass the argument as a number, not a pointer */
  11658. + HDMICEC_IOC_O_SETLOGICALADDRESS = _IOW(HDMICEC_IOC_MAGIC, 1, unsigned char),
  11659. + HDMICEC_IOC_SETLOGICALADDRESS = _IO(HDMICEC_IOC_MAGIC, 1),
  11660. + HDMICEC_IOC_STARTDEVICE = _IO(HDMICEC_IOC_MAGIC, 2),
  11661. + HDMICEC_IOC_STOPDEVICE = _IO(HDMICEC_IOC_MAGIC, 3),
  11662. + HDMICEC_IOC_GETPHYADDRESS = _IOR(HDMICEC_IOC_MAGIC, 4, unsigned char[4]),
  11663. +};
  11664. +
  11665. +enum {
  11666. + MESSAGE_TYPE_RECEIVE_SUCCESS = 1,
  11667. + MESSAGE_TYPE_NOACK,
  11668. + MESSAGE_TYPE_DISCONNECTED,
  11669. + MESSAGE_TYPE_CONNECTED,
  11670. + MESSAGE_TYPE_SEND_SUCCESS,
  11671. + MESSAGE_TYPE_SEND_ERROR,
  11672. +};
  11673. +
  11674. +struct cec_user_event {
  11675. + __u32 event_type;
  11676. + __u32 msg_len;
  11677. + __u8 msg[MAX_MESSAGE_LEN];
  11678. +};
  11679. +
  11680. +#endif
  11681. diff -Nur linux-3.15-rc1.orig/include/uapi/linux/videodev2.h linux-3.15-rc1/include/uapi/linux/videodev2.h
  11682. --- linux-3.15-rc1.orig/include/uapi/linux/videodev2.h 2014-04-13 23:18:35.000000000 +0200
  11683. +++ linux-3.15-rc1/include/uapi/linux/videodev2.h 2014-04-25 14:11:13.599375428 +0200
  11684. @@ -299,6 +299,7 @@
  11685. #define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */
  11686. #define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
  11687. #define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */
  11688. +#define V4L2_PIX_FMT_RGB666 v4l2_fourcc('R', 'G', 'B', 'H') /* 18 RGB-6-6-6 */
  11689. #define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
  11690. #define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */
  11691. #define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */
  11692. diff -Nur linux-3.15-rc1.orig/sound/soc/fsl/imx-pcm-dma.c linux-3.15-rc1/sound/soc/fsl/imx-pcm-dma.c
  11693. --- linux-3.15-rc1.orig/sound/soc/fsl/imx-pcm-dma.c 2014-04-13 23:18:35.000000000 +0200
  11694. +++ linux-3.15-rc1/sound/soc/fsl/imx-pcm-dma.c 2014-04-25 14:11:13.599375428 +0200
  11695. @@ -44,7 +44,7 @@
  11696. .buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
  11697. .period_bytes_min = 128,
  11698. .period_bytes_max = 65535, /* Limited by SDMA engine */
  11699. - .periods_min = 2,
  11700. + .periods_min = 4,
  11701. .periods_max = 255,
  11702. .fifo_size = 0,
  11703. };