rmk.patch 369 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452
  1. diff -Nur linux-3.15-rc6.orig/arch/arm/boot/dts/imx6dl-hummingboard.dts linux-3.15-rc6/arch/arm/boot/dts/imx6dl-hummingboard.dts
  2. --- linux-3.15-rc6.orig/arch/arm/boot/dts/imx6dl-hummingboard.dts 2014-05-21 23:42:02.000000000 +0200
  3. +++ linux-3.15-rc6/arch/arm/boot/dts/imx6dl-hummingboard.dts 2014-05-23 11:26:48.244939835 +0200
  4. @@ -67,6 +67,14 @@
  5. status = "okay";
  6. };
  7. +&hdmi {
  8. + pinctrl-names = "default";
  9. + pinctrl-0 = <&pinctrl_hummingboard_hdmi>;
  10. + ddc-i2c-bus = <&i2c2>;
  11. + status = "okay";
  12. + crtcs = <&ipu1 0>;
  13. +};
  14. +
  15. &i2c1 {
  16. pinctrl-names = "default";
  17. pinctrl-0 = <&pinctrl_hummingboard_i2c1>;
  18. @@ -82,6 +90,13 @@
  19. */
  20. };
  21. +&i2c2 {
  22. + clock-frequency = <100000>;
  23. + pinctrl-names = "default";
  24. + pinctrl-0 = <&pinctrl_hummingboard_i2c2>;
  25. + status = "okay";
  26. +};
  27. +
  28. &iomuxc {
  29. hummingboard {
  30. pinctrl_hummingboard_flexcan1: hummingboard-flexcan1 {
  31. @@ -97,6 +112,12 @@
  32. >;
  33. };
  34. + pinctrl_hummingboard_hdmi: hummingboard-hdmi {
  35. + fsl,pins = <
  36. + MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
  37. + >;
  38. + };
  39. +
  40. pinctrl_hummingboard_i2c1: hummingboard-i2c1 {
  41. fsl,pins = <
  42. MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
  43. @@ -104,6 +125,13 @@
  44. >;
  45. };
  46. + pinctrl_hummingboard_i2c2: hummingboard-i2c2 {
  47. + fsl,pins = <
  48. + MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
  49. + MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
  50. + >;
  51. + };
  52. +
  53. pinctrl_hummingboard_spdif: hummingboard-spdif {
  54. fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
  55. };
  56. diff -Nur linux-3.15-rc6.orig/arch/arm/boot/dts/imx6q-cubox-i.dts linux-3.15-rc6/arch/arm/boot/dts/imx6q-cubox-i.dts
  57. --- linux-3.15-rc6.orig/arch/arm/boot/dts/imx6q-cubox-i.dts 2014-05-21 23:42:02.000000000 +0200
  58. +++ linux-3.15-rc6/arch/arm/boot/dts/imx6q-cubox-i.dts 2014-05-23 11:26:48.244939835 +0200
  59. @@ -13,4 +13,8 @@
  60. &sata {
  61. status = "okay";
  62. + fsl,transmit-level-mV = <1104>;
  63. + fsl,transmit-boost-mdB = <0>;
  64. + fsl,transmit-atten-16ths = <9>;
  65. + fsl,no-spread-spectrum;
  66. };
  67. diff -Nur linux-3.15-rc6.orig/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi linux-3.15-rc6/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
  68. --- linux-3.15-rc6.orig/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2014-05-21 23:42:02.000000000 +0200
  69. +++ linux-3.15-rc6/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2014-05-23 11:26:48.244939835 +0200
  70. @@ -12,6 +12,19 @@
  71. pinctrl-0 = <&pinctrl_cubox_i_ir>;
  72. };
  73. + pwmleds {
  74. + compatible = "pwm-leds";
  75. + pinctrl-names = "default";
  76. + pinctrl-0 = <&pinctrl_cubox_i_pwm1>;
  77. +
  78. + front {
  79. + active-low;
  80. + label = "imx6:red:front";
  81. + max-brightness = <248>;
  82. + pwms = <&pwm1 0 50000>;
  83. + };
  84. + };
  85. +
  86. regulators {
  87. compatible = "simple-bus";
  88. @@ -55,6 +68,21 @@
  89. };
  90. };
  91. +&hdmi {
  92. + pinctrl-names = "default";
  93. + pinctrl-0 = <&pinctrl_cubox_i_hdmi>;
  94. + ddc-i2c-bus = <&i2c2>;
  95. + status = "okay";
  96. + crtcs = <&ipu1 0>;
  97. +};
  98. +
  99. +&i2c2 {
  100. + clock-frequency = <100000>;
  101. + pinctrl-names = "default";
  102. + pinctrl-0 = <&pinctrl_cubox_i_i2c2>;
  103. + status = "okay";
  104. +};
  105. +
  106. &i2c3 {
  107. pinctrl-names = "default";
  108. pinctrl-0 = <&pinctrl_cubox_i_i2c3>;
  109. @@ -69,6 +97,19 @@
  110. &iomuxc {
  111. cubox_i {
  112. + pinctrl_cubox_i_hdmi: cubox-i-hdmi {
  113. + fsl,pins = <
  114. + MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
  115. + >;
  116. + };
  117. +
  118. + pinctrl_cubox_i_i2c2: cubox-i-i2c2 {
  119. + fsl,pins = <
  120. + MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
  121. + MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
  122. + >;
  123. + };
  124. +
  125. pinctrl_cubox_i_i2c3: cubox-i-i2c3 {
  126. fsl,pins = <
  127. MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1
  128. @@ -82,6 +123,10 @@
  129. >;
  130. };
  131. + pinctrl_cubox_i_pwm1: cubox-i-pwm1-front-led {
  132. + fsl,pins = <MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b0>;
  133. + };
  134. +
  135. pinctrl_cubox_i_spdif: cubox-i-spdif {
  136. fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
  137. };
  138. @@ -111,6 +156,28 @@
  139. MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
  140. >;
  141. };
  142. +
  143. + pinctrl_cubox_i_usdhc2_100mhz: cubox-i-usdhc2-100mhz {
  144. + fsl,pins = <
  145. + MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9
  146. + MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9
  147. + MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
  148. + MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
  149. + MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
  150. + MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130b9
  151. + >;
  152. + };
  153. +
  154. + pinctrl_cubox_i_usdhc2_200mhz: cubox-i-usdhc2-200mhz {
  155. + fsl,pins = <
  156. + MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9
  157. + MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9
  158. + MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
  159. + MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
  160. + MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
  161. + MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130f9
  162. + >;
  163. + };
  164. };
  165. };
  166. @@ -130,9 +197,19 @@
  167. status = "okay";
  168. };
  169. +&uart4 {
  170. + status = "okay";
  171. +};
  172. +
  173. +&usdhc1 {
  174. + status = "okay";
  175. +};
  176. +
  177. &usdhc2 {
  178. - pinctrl-names = "default";
  179. + pinctrl-names = "default", "state_100mhz", "state_200mhz";
  180. pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
  181. + pinctrl-1 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_100mhz>;
  182. + pinctrl-2 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_200mhz>;
  183. vmmc-supply = <&reg_3p3v>;
  184. cd-gpios = <&gpio1 4 0>;
  185. status = "okay";
  186. diff -Nur linux-3.15-rc6.orig/arch/arm/boot/dts/imx6qdl.dtsi linux-3.15-rc6/arch/arm/boot/dts/imx6qdl.dtsi
  187. --- linux-3.15-rc6.orig/arch/arm/boot/dts/imx6qdl.dtsi 2014-05-21 23:42:02.000000000 +0200
  188. +++ linux-3.15-rc6/arch/arm/boot/dts/imx6qdl.dtsi 2014-05-23 11:26:48.244939835 +0200
  189. @@ -128,6 +128,8 @@
  190. cache-level = <2>;
  191. arm,tag-latency = <4 2 3>;
  192. arm,data-latency = <4 2 3>;
  193. + arm,dynamic-clk-gating;
  194. + arm,standby-mode;
  195. };
  196. pcie: pcie@0x01000000 {
  197. diff -Nur linux-3.15-rc6.orig/arch/arm/boot/dts/imx6qdl-microsom.dtsi linux-3.15-rc6/arch/arm/boot/dts/imx6qdl-microsom.dtsi
  198. --- linux-3.15-rc6.orig/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2014-05-21 23:42:02.000000000 +0200
  199. +++ linux-3.15-rc6/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2014-05-23 11:26:48.244939835 +0200
  200. @@ -1,9 +1,69 @@
  201. /*
  202. * Copyright (C) 2013,2014 Russell King
  203. */
  204. +#include <dt-bindings/gpio/gpio.h>
  205. +/ {
  206. + regulators {
  207. + compatible = "simple-bus";
  208. +
  209. + reg_brcm_osc: brcm-osc-reg {
  210. + compatible = "regulator-fixed";
  211. + enable-active-high;
  212. + gpio = <&gpio5 5 0>;
  213. + pinctrl-names = "default";
  214. + pinctrl-0 = <&pinctrl_microsom_brcm_osc_reg>;
  215. + regulator-name = "brcm_osc_reg";
  216. + regulator-min-microvolt = <3300000>;
  217. + regulator-max-microvolt = <3300000>;
  218. + regulator-always-on;
  219. + regulator-boot-on;
  220. + };
  221. +
  222. + reg_brcm: brcm-reg {
  223. + compatible = "regulator-fixed";
  224. + enable-active-high;
  225. + gpio = <&gpio3 19 0>;
  226. + pinctrl-names = "default";
  227. + pinctrl-0 = <&pinctrl_microsom_brcm_reg>;
  228. + regulator-name = "brcm_reg";
  229. + regulator-min-microvolt = <3300000>;
  230. + regulator-max-microvolt = <3300000>;
  231. + startup-delay-us = <200000>;
  232. + };
  233. + };
  234. +};
  235. &iomuxc {
  236. microsom {
  237. + pinctrl_microsom_brcm_osc_reg: microsom-brcm-osc-reg {
  238. + fsl,pins = <
  239. + MX6QDL_PAD_DISP0_DAT11__GPIO5_IO05 0x40013070
  240. + >;
  241. + };
  242. +
  243. + pinctrl_microsom_brcm_reg: microsom-brcm-reg {
  244. + fsl,pins = <
  245. + MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x40013070
  246. + >;
  247. + };
  248. +
  249. + pinctrl_microsom_brcm_wifi: microsom-brcm-wifi {
  250. + fsl,pins = <
  251. + MX6QDL_PAD_GPIO_8__XTALOSC_REF_CLK_32K 0x1b0b0
  252. + MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x40013070
  253. + MX6QDL_PAD_CSI0_DAT8__GPIO5_IO26 0x40013070
  254. + MX6QDL_PAD_CSI0_DAT9__GPIO5_IO27 0x40013070
  255. + >;
  256. + };
  257. +
  258. + pinctrl_microsom_brcm_bt: microsom-brcm-bt {
  259. + fsl,pins = <
  260. + MX6QDL_PAD_CSI0_DAT14__GPIO6_IO00 0x40013070
  261. + MX6QDL_PAD_CSI0_DAT15__GPIO6_IO01 0x40013070
  262. + MX6QDL_PAD_CSI0_DAT18__GPIO6_IO04 0x40013070
  263. + >;
  264. + };
  265. +
  266. pinctrl_microsom_uart1: microsom-uart1 {
  267. fsl,pins = <
  268. MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
  269. @@ -11,6 +71,15 @@
  270. >;
  271. };
  272. + pinctrl_microsom_uart4_1: microsom-uart4 {
  273. + fsl,pins = <
  274. + MX6QDL_PAD_CSI0_DAT12__UART4_TX_DATA 0x1b0b1
  275. + MX6QDL_PAD_CSI0_DAT13__UART4_RX_DATA 0x1b0b1
  276. + MX6QDL_PAD_CSI0_DAT16__UART4_RTS_B 0x1b0b1
  277. + MX6QDL_PAD_CSI0_DAT17__UART4_CTS_B 0x1b0b1
  278. + >;
  279. + };
  280. +
  281. pinctrl_microsom_usbotg: microsom-usbotg {
  282. /*
  283. * Similar to pinctrl_usbotg_2, but we want it
  284. @@ -18,6 +87,17 @@
  285. */
  286. fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
  287. };
  288. +
  289. + pinctrl_microsom_usdhc1: microsom-usdhc1 {
  290. + fsl,pins = <
  291. + MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
  292. + MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
  293. + MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
  294. + MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
  295. + MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
  296. + MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
  297. + >;
  298. + };
  299. };
  300. };
  301. @@ -27,7 +107,25 @@
  302. status = "okay";
  303. };
  304. +/* UART4 - Connected to optional BRCM Wifi/BT/FM */
  305. +&uart4 {
  306. + pinctrl-names = "default";
  307. + pinctrl-0 = <&pinctrl_microsom_brcm_bt &pinctrl_microsom_uart4_1>;
  308. + fsl,uart-has-rtscts;
  309. +};
  310. +
  311. &usbotg {
  312. pinctrl-names = "default";
  313. pinctrl-0 = <&pinctrl_microsom_usbotg>;
  314. };
  315. +
  316. +/* USDHC1 - Connected to optional BRCM Wifi/BT/FM */
  317. +&usdhc1 {
  318. + card-external-vcc-supply = <&reg_brcm>;
  319. + card-reset-gpios = <&gpio5 26 GPIO_ACTIVE_LOW>, <&gpio6 0 GPIO_ACTIVE_LOW>;
  320. + keep-power-in-suspend;
  321. + non-removable;
  322. + pinctrl-names = "default";
  323. + pinctrl-0 = <&pinctrl_microsom_brcm_wifi &pinctrl_microsom_usdhc1>;
  324. + vmmc-supply = <&reg_brcm>;
  325. +};
  326. diff -Nur linux-3.15-rc6.orig/arch/arm/boot/dts/imx6sl.dtsi linux-3.15-rc6/arch/arm/boot/dts/imx6sl.dtsi
  327. --- linux-3.15-rc6.orig/arch/arm/boot/dts/imx6sl.dtsi 2014-05-21 23:42:02.000000000 +0200
  328. +++ linux-3.15-rc6/arch/arm/boot/dts/imx6sl.dtsi 2014-05-23 11:26:48.244939835 +0200
  329. @@ -111,6 +111,8 @@
  330. cache-level = <2>;
  331. arm,tag-latency = <4 2 3>;
  332. arm,data-latency = <4 2 3>;
  333. + arm,dynamic-clk-gating;
  334. + arm,standby-mode;
  335. };
  336. pmu {
  337. diff -Nur linux-3.15-rc6.orig/arch/arm/boot/dts/marco.dtsi linux-3.15-rc6/arch/arm/boot/dts/marco.dtsi
  338. --- linux-3.15-rc6.orig/arch/arm/boot/dts/marco.dtsi 2014-05-21 23:42:02.000000000 +0200
  339. +++ linux-3.15-rc6/arch/arm/boot/dts/marco.dtsi 2014-05-23 11:26:48.244939835 +0200
  340. @@ -36,7 +36,7 @@
  341. ranges = <0x40000000 0x40000000 0xa0000000>;
  342. l2-cache-controller@c0030000 {
  343. - compatible = "sirf,marco-pl310-cache", "arm,pl310-cache";
  344. + compatible = "arm,pl310-cache";
  345. reg = <0xc0030000 0x1000>;
  346. interrupts = <0 59 0>;
  347. arm,tag-latency = <1 1 1>;
  348. diff -Nur linux-3.15-rc6.orig/arch/arm/boot/dts/prima2.dtsi linux-3.15-rc6/arch/arm/boot/dts/prima2.dtsi
  349. --- linux-3.15-rc6.orig/arch/arm/boot/dts/prima2.dtsi 2014-05-21 23:42:02.000000000 +0200
  350. +++ linux-3.15-rc6/arch/arm/boot/dts/prima2.dtsi 2014-05-23 11:26:48.244939835 +0200
  351. @@ -48,7 +48,7 @@
  352. ranges = <0x40000000 0x40000000 0x80000000>;
  353. l2-cache-controller@80040000 {
  354. - compatible = "arm,pl310-cache", "sirf,prima2-pl310-cache";
  355. + compatible = "arm,pl310-cache";
  356. reg = <0x80040000 0x1000>;
  357. interrupts = <59>;
  358. arm,tag-latency = <1 1 1>;
  359. diff -Nur linux-3.15-rc6.orig/arch/arm/configs/imx_v6_v7_defconfig linux-3.15-rc6/arch/arm/configs/imx_v6_v7_defconfig
  360. --- linux-3.15-rc6.orig/arch/arm/configs/imx_v6_v7_defconfig 2014-05-21 23:42:02.000000000 +0200
  361. +++ linux-3.15-rc6/arch/arm/configs/imx_v6_v7_defconfig 2014-05-23 11:26:48.248939848 +0200
  362. @@ -245,6 +245,7 @@
  363. CONFIG_DRM_IMX_LDB=y
  364. CONFIG_DRM_IMX_IPUV3_CORE=y
  365. CONFIG_DRM_IMX_IPUV3=y
  366. +CONFIG_DRM_IMX_HDMI=y
  367. CONFIG_COMMON_CLK_DEBUG=y
  368. # CONFIG_IOMMU_SUPPORT is not set
  369. CONFIG_PWM=y
  370. diff -Nur linux-3.15-rc6.orig/arch/arm/include/asm/hardware/cache-l2x0.h linux-3.15-rc6/arch/arm/include/asm/hardware/cache-l2x0.h
  371. --- linux-3.15-rc6.orig/arch/arm/include/asm/hardware/cache-l2x0.h 2014-05-21 23:42:02.000000000 +0200
  372. +++ linux-3.15-rc6/arch/arm/include/asm/hardware/cache-l2x0.h 2014-05-23 11:26:48.248939848 +0200
  373. @@ -26,8 +26,8 @@
  374. #define L2X0_CACHE_TYPE 0x004
  375. #define L2X0_CTRL 0x100
  376. #define L2X0_AUX_CTRL 0x104
  377. -#define L2X0_TAG_LATENCY_CTRL 0x108
  378. -#define L2X0_DATA_LATENCY_CTRL 0x10C
  379. +#define L310_TAG_LATENCY_CTRL 0x108
  380. +#define L310_DATA_LATENCY_CTRL 0x10C
  381. #define L2X0_EVENT_CNT_CTRL 0x200
  382. #define L2X0_EVENT_CNT1_CFG 0x204
  383. #define L2X0_EVENT_CNT0_CFG 0x208
  384. @@ -54,53 +54,93 @@
  385. #define L2X0_LOCKDOWN_WAY_D_BASE 0x900
  386. #define L2X0_LOCKDOWN_WAY_I_BASE 0x904
  387. #define L2X0_LOCKDOWN_STRIDE 0x08
  388. -#define L2X0_ADDR_FILTER_START 0xC00
  389. -#define L2X0_ADDR_FILTER_END 0xC04
  390. +#define L310_ADDR_FILTER_START 0xC00
  391. +#define L310_ADDR_FILTER_END 0xC04
  392. #define L2X0_TEST_OPERATION 0xF00
  393. #define L2X0_LINE_DATA 0xF10
  394. #define L2X0_LINE_TAG 0xF30
  395. #define L2X0_DEBUG_CTRL 0xF40
  396. -#define L2X0_PREFETCH_CTRL 0xF60
  397. -#define L2X0_POWER_CTRL 0xF80
  398. -#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1)
  399. -#define L2X0_STNDBY_MODE_EN (1 << 0)
  400. +#define L310_PREFETCH_CTRL 0xF60
  401. +#define L310_POWER_CTRL 0xF80
  402. +#define L310_DYNAMIC_CLK_GATING_EN (1 << 1)
  403. +#define L310_STNDBY_MODE_EN (1 << 0)
  404. /* Registers shifts and masks */
  405. #define L2X0_CACHE_ID_PART_MASK (0xf << 6)
  406. #define L2X0_CACHE_ID_PART_L210 (1 << 6)
  407. +#define L2X0_CACHE_ID_PART_L220 (2 << 6)
  408. #define L2X0_CACHE_ID_PART_L310 (3 << 6)
  409. #define L2X0_CACHE_ID_RTL_MASK 0x3f
  410. -#define L2X0_CACHE_ID_RTL_R0P0 0x0
  411. -#define L2X0_CACHE_ID_RTL_R1P0 0x2
  412. -#define L2X0_CACHE_ID_RTL_R2P0 0x4
  413. -#define L2X0_CACHE_ID_RTL_R3P0 0x5
  414. -#define L2X0_CACHE_ID_RTL_R3P1 0x6
  415. -#define L2X0_CACHE_ID_RTL_R3P2 0x8
  416. -
  417. -#define L2X0_AUX_CTRL_MASK 0xc0000fff
  418. +#define L210_CACHE_ID_RTL_R0P2_02 0x00
  419. +#define L210_CACHE_ID_RTL_R0P1 0x01
  420. +#define L210_CACHE_ID_RTL_R0P2_01 0x02
  421. +#define L210_CACHE_ID_RTL_R0P3 0x03
  422. +#define L210_CACHE_ID_RTL_R0P4 0x0b
  423. +#define L210_CACHE_ID_RTL_R0P5 0x0f
  424. +#define L220_CACHE_ID_RTL_R1P7_01REL0 0x06
  425. +#define L310_CACHE_ID_RTL_R0P0 0x00
  426. +#define L310_CACHE_ID_RTL_R1P0 0x02
  427. +#define L310_CACHE_ID_RTL_R2P0 0x04
  428. +#define L310_CACHE_ID_RTL_R3P0 0x05
  429. +#define L310_CACHE_ID_RTL_R3P1 0x06
  430. +#define L310_CACHE_ID_RTL_R3P1_50REL0 0x07
  431. +#define L310_CACHE_ID_RTL_R3P2 0x08
  432. +#define L310_CACHE_ID_RTL_R3P3 0x09
  433. +
  434. +/* L2C auxiliary control register - bits common to L2C-210/220/310 */
  435. +#define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17
  436. +#define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17)
  437. +#define L2C_AUX_CTRL_WAY_SIZE(n) ((n) << 17)
  438. +#define L2C_AUX_CTRL_EVTMON_ENABLE BIT(20)
  439. +#define L2C_AUX_CTRL_PARITY_ENABLE BIT(21)
  440. +#define L2C_AUX_CTRL_SHARED_OVERRIDE BIT(22)
  441. +/* L2C-210/220 common bits */
  442. #define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
  443. -#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7
  444. +#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK (7 << 0)
  445. #define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
  446. -#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3)
  447. +#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (7 << 3)
  448. #define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
  449. -#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6)
  450. +#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (7 << 6)
  451. #define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
  452. -#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9)
  453. -#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
  454. -#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
  455. -#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
  456. -#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
  457. -#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
  458. -#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
  459. -#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28
  460. -#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
  461. -#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
  462. -
  463. -#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0
  464. -#define L2X0_LATENCY_CTRL_RD_SHIFT 4
  465. -#define L2X0_LATENCY_CTRL_WR_SHIFT 8
  466. -
  467. -#define L2X0_ADDR_FILTER_EN 1
  468. +#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (7 << 9)
  469. +#define L2X0_AUX_CTRL_ASSOC_SHIFT 13
  470. +#define L2X0_AUX_CTRL_ASSOC_MASK (15 << 13)
  471. +/* L2C-210 specific bits */
  472. +#define L210_AUX_CTRL_WRAP_DISABLE BIT(12)
  473. +#define L210_AUX_CTRL_WA_OVERRIDE BIT(23)
  474. +#define L210_AUX_CTRL_EXCLUSIVE_ABORT BIT(24)
  475. +/* L2C-220 specific bits */
  476. +#define L220_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
  477. +#define L220_AUX_CTRL_FWA_SHIFT 23
  478. +#define L220_AUX_CTRL_FWA_MASK (3 << 23)
  479. +#define L220_AUX_CTRL_NS_LOCKDOWN BIT(26)
  480. +#define L220_AUX_CTRL_NS_INT_CTRL BIT(27)
  481. +/* L2C-310 specific bits */
  482. +#define L310_AUX_CTRL_FULL_LINE_ZERO BIT(0) /* R2P0+ */
  483. +#define L310_AUX_CTRL_HIGHPRIO_SO_DEV BIT(10) /* R2P0+ */
  484. +#define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */
  485. +#define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
  486. +#define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16)
  487. +#define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */
  488. +#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26)
  489. +#define L310_AUX_CTRL_NS_INT_CTRL BIT(27)
  490. +#define L310_AUX_CTRL_DATA_PREFETCH BIT(28)
  491. +#define L310_AUX_CTRL_INSTR_PREFETCH BIT(29)
  492. +#define L310_AUX_CTRL_EARLY_BRESP BIT(30) /* R2P0+ */
  493. +
  494. +#define L310_LATENCY_CTRL_SETUP(n) ((n) << 0)
  495. +#define L310_LATENCY_CTRL_RD(n) ((n) << 4)
  496. +#define L310_LATENCY_CTRL_WR(n) ((n) << 8)
  497. +
  498. +#define L310_ADDR_FILTER_EN 1
  499. +
  500. +#define L310_PREFETCH_CTRL_OFFSET_MASK 0x1f
  501. +#define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR BIT(23)
  502. +#define L310_PREFETCH_CTRL_PREFETCH_DROP BIT(24)
  503. +#define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP BIT(27)
  504. +#define L310_PREFETCH_CTRL_DATA_PREFETCH BIT(28)
  505. +#define L310_PREFETCH_CTRL_INSTR_PREFETCH BIT(29)
  506. +#define L310_PREFETCH_CTRL_DBL_LINEFILL BIT(30)
  507. #define L2X0_CTRL_EN 1
  508. diff -Nur linux-3.15-rc6.orig/arch/arm/include/asm/outercache.h linux-3.15-rc6/arch/arm/include/asm/outercache.h
  509. --- linux-3.15-rc6.orig/arch/arm/include/asm/outercache.h 2014-05-21 23:42:02.000000000 +0200
  510. +++ linux-3.15-rc6/arch/arm/include/asm/outercache.h 2014-05-23 11:26:48.248939848 +0200
  511. @@ -21,6 +21,7 @@
  512. #ifndef __ASM_OUTERCACHE_H
  513. #define __ASM_OUTERCACHE_H
  514. +#include <linux/bug.h>
  515. #include <linux/types.h>
  516. struct outer_cache_fns {
  517. @@ -28,53 +29,84 @@
  518. void (*clean_range)(unsigned long, unsigned long);
  519. void (*flush_range)(unsigned long, unsigned long);
  520. void (*flush_all)(void);
  521. - void (*inv_all)(void);
  522. void (*disable)(void);
  523. #ifdef CONFIG_OUTER_CACHE_SYNC
  524. void (*sync)(void);
  525. #endif
  526. - void (*set_debug)(unsigned long);
  527. void (*resume)(void);
  528. +
  529. + /* This is an ARM L2C thing */
  530. + void (*write_sec)(unsigned long, unsigned);
  531. };
  532. extern struct outer_cache_fns outer_cache;
  533. #ifdef CONFIG_OUTER_CACHE
  534. -
  535. +/**
  536. + * outer_inv_range - invalidate range of outer cache lines
  537. + * @start: starting physical address, inclusive
  538. + * @end: end physical address, exclusive
  539. + */
  540. static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
  541. {
  542. if (outer_cache.inv_range)
  543. outer_cache.inv_range(start, end);
  544. }
  545. +
  546. +/**
  547. + * outer_clean_range - clean dirty outer cache lines
  548. + * @start: starting physical address, inclusive
  549. + * @end: end physical address, exclusive
  550. + */
  551. static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
  552. {
  553. if (outer_cache.clean_range)
  554. outer_cache.clean_range(start, end);
  555. }
  556. +
  557. +/**
  558. + * outer_flush_range - clean and invalidate outer cache lines
  559. + * @start: starting physical address, inclusive
  560. + * @end: end physical address, exclusive
  561. + */
  562. static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
  563. {
  564. if (outer_cache.flush_range)
  565. outer_cache.flush_range(start, end);
  566. }
  567. +/**
  568. + * outer_flush_all - clean and invalidate all cache lines in the outer cache
  569. + *
  570. + * Note: depending on implementation, this may not be atomic - it must
  571. + * only be called with interrupts disabled and no other active outer
  572. + * cache masters.
  573. + *
  574. + * It is intended that this function is only used by implementations
  575. + * needing to override the outer_cache.disable() method due to security.
  576. + * (Some implementations perform this as a clean followed by an invalidate.)
  577. + */
  578. static inline void outer_flush_all(void)
  579. {
  580. if (outer_cache.flush_all)
  581. outer_cache.flush_all();
  582. }
  583. -static inline void outer_inv_all(void)
  584. -{
  585. - if (outer_cache.inv_all)
  586. - outer_cache.inv_all();
  587. -}
  588. -
  589. -static inline void outer_disable(void)
  590. -{
  591. - if (outer_cache.disable)
  592. - outer_cache.disable();
  593. -}
  594. -
  595. +/**
  596. + * outer_disable - clean, invalidate and disable the outer cache
  597. + *
  598. + * Disable the outer cache, ensuring that any data contained in the outer
  599. + * cache is pushed out to lower levels of system memory. The note and
  600. + * conditions above concerning outer_flush_all() applies here.
  601. + */
  602. +extern void outer_disable(void);
  603. +
  604. +/**
  605. + * outer_resume - restore the cache configuration and re-enable outer cache
  606. + *
  607. + * Restore any configuration that the cache had when previously enabled,
  608. + * and re-enable the outer cache.
  609. + */
  610. static inline void outer_resume(void)
  611. {
  612. if (outer_cache.resume)
  613. @@ -90,13 +122,18 @@
  614. static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
  615. { }
  616. static inline void outer_flush_all(void) { }
  617. -static inline void outer_inv_all(void) { }
  618. static inline void outer_disable(void) { }
  619. static inline void outer_resume(void) { }
  620. #endif
  621. #ifdef CONFIG_OUTER_CACHE_SYNC
  622. +/**
  623. + * outer_sync - perform a sync point for outer cache
  624. + *
  625. + * Ensure that all outer cache operations are complete and any store
  626. + * buffers are drained.
  627. + */
  628. static inline void outer_sync(void)
  629. {
  630. if (outer_cache.sync)
  631. diff -Nur linux-3.15-rc6.orig/arch/arm/Kconfig linux-3.15-rc6/arch/arm/Kconfig
  632. --- linux-3.15-rc6.orig/arch/arm/Kconfig 2014-05-21 23:42:02.000000000 +0200
  633. +++ linux-3.15-rc6/arch/arm/Kconfig 2014-05-23 11:26:48.248939848 +0200
  634. @@ -1230,19 +1230,6 @@
  635. register of the Cortex-A9 which reduces the linefill issuing
  636. capabilities of the processor.
  637. -config PL310_ERRATA_588369
  638. - bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
  639. - depends on CACHE_L2X0
  640. - help
  641. - The PL310 L2 cache controller implements three types of Clean &
  642. - Invalidate maintenance operations: by Physical Address
  643. - (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC).
  644. - They are architecturally defined to behave as the execution of a
  645. - clean operation followed immediately by an invalidate operation,
  646. - both performing to the same memory location. This functionality
  647. - is not correctly implemented in PL310 as clean lines are not
  648. - invalidated as a result of these operations.
  649. -
  650. config ARM_ERRATA_643719
  651. bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
  652. depends on CPU_V7 && SMP
  653. @@ -1265,17 +1252,6 @@
  654. tables. The workaround changes the TLB flushing routines to invalidate
  655. entries regardless of the ASID.
  656. -config PL310_ERRATA_727915
  657. - bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
  658. - depends on CACHE_L2X0
  659. - help
  660. - PL310 implements the Clean & Invalidate by Way L2 cache maintenance
  661. - operation (offset 0x7FC). This operation runs in background so that
  662. - PL310 can handle normal accesses while it is in progress. Under very
  663. - rare circumstances, due to this erratum, write data can be lost when
  664. - PL310 treats a cacheable write transaction during a Clean &
  665. - Invalidate by Way operation.
  666. -
  667. config ARM_ERRATA_743622
  668. bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
  669. depends on CPU_V7
  670. @@ -1301,21 +1277,6 @@
  671. operation is received by a CPU before the ICIALLUIS has completed,
  672. potentially leading to corrupted entries in the cache or TLB.
  673. -config PL310_ERRATA_753970
  674. - bool "PL310 errata: cache sync operation may be faulty"
  675. - depends on CACHE_PL310
  676. - help
  677. - This option enables the workaround for the 753970 PL310 (r3p0) erratum.
  678. -
  679. - Under some condition the effect of cache sync operation on
  680. - the store buffer still remains when the operation completes.
  681. - This means that the store buffer is always asked to drain and
  682. - this prevents it from merging any further writes. The workaround
  683. - is to replace the normal offset of cache sync operation (0x730)
  684. - by another offset targeting an unmapped PL310 register 0x740.
  685. - This has the same effect as the cache sync operation: store buffer
  686. - drain and waiting for all buffers empty.
  687. -
  688. config ARM_ERRATA_754322
  689. bool "ARM errata: possible faulty MMU translations following an ASID switch"
  690. depends on CPU_V7
  691. @@ -1364,18 +1325,6 @@
  692. relevant cache maintenance functions and sets a specific bit
  693. in the diagnostic control register of the SCU.
  694. -config PL310_ERRATA_769419
  695. - bool "PL310 errata: no automatic Store Buffer drain"
  696. - depends on CACHE_L2X0
  697. - help
  698. - On revisions of the PL310 prior to r3p2, the Store Buffer does
  699. - not automatically drain. This can cause normal, non-cacheable
  700. - writes to be retained when the memory system is idle, leading
  701. - to suboptimal I/O performance for drivers using coherent DMA.
  702. - This option adds a write barrier to the cpu_idle loop so that,
  703. - on systems with an outer cache, the store buffer is drained
  704. - explicitly.
  705. -
  706. config ARM_ERRATA_775420
  707. bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
  708. depends on CPU_V7
  709. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-berlin/berlin.c linux-3.15-rc6/arch/arm/mach-berlin/berlin.c
  710. --- linux-3.15-rc6.orig/arch/arm/mach-berlin/berlin.c 2014-05-21 23:42:02.000000000 +0200
  711. +++ linux-3.15-rc6/arch/arm/mach-berlin/berlin.c 2014-05-23 11:26:48.256939874 +0200
  712. @@ -24,7 +24,7 @@
  713. * with DT probing for L2CCs, berlin_init_machine can be removed.
  714. * Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc
  715. */
  716. - l2x0_of_init(0x70c00000, 0xfeffffff);
  717. + l2x0_of_init(0x30c00000, 0xfeffffff);
  718. of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
  719. }
  720. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-cns3xxx/core.c linux-3.15-rc6/arch/arm/mach-cns3xxx/core.c
  721. --- linux-3.15-rc6.orig/arch/arm/mach-cns3xxx/core.c 2014-05-21 23:42:02.000000000 +0200
  722. +++ linux-3.15-rc6/arch/arm/mach-cns3xxx/core.c 2014-05-23 11:26:48.256939874 +0200
  723. @@ -272,9 +272,9 @@
  724. *
  725. * 1 cycle of latency for setup, read and write accesses
  726. */
  727. - val = readl(base + L2X0_TAG_LATENCY_CTRL);
  728. + val = readl(base + L310_TAG_LATENCY_CTRL);
  729. val &= 0xfffff888;
  730. - writel(val, base + L2X0_TAG_LATENCY_CTRL);
  731. + writel(val, base + L310_TAG_LATENCY_CTRL);
  732. /*
  733. * Data RAM Control register
  734. @@ -285,12 +285,12 @@
  735. *
  736. * 1 cycle of latency for setup, read and write accesses
  737. */
  738. - val = readl(base + L2X0_DATA_LATENCY_CTRL);
  739. + val = readl(base + L310_DATA_LATENCY_CTRL);
  740. val &= 0xfffff888;
  741. - writel(val, base + L2X0_DATA_LATENCY_CTRL);
  742. + writel(val, base + L310_DATA_LATENCY_CTRL);
  743. /* 32 KiB, 8-way, parity disable */
  744. - l2x0_init(base, 0x00540000, 0xfe000fff);
  745. + l2x0_init(base, 0x00500000, 0xfe0f0fff);
  746. }
  747. #endif /* CONFIG_CACHE_L2X0 */
  748. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-exynos/common.h linux-3.15-rc6/arch/arm/mach-exynos/common.h
  749. --- linux-3.15-rc6.orig/arch/arm/mach-exynos/common.h 2014-05-21 23:42:02.000000000 +0200
  750. +++ linux-3.15-rc6/arch/arm/mach-exynos/common.h 2014-05-23 11:26:48.256939874 +0200
  751. @@ -55,7 +55,6 @@
  752. NUM_SYS_POWERDOWN,
  753. };
  754. -extern unsigned long l2x0_regs_phys;
  755. struct exynos_pmu_conf {
  756. void __iomem *reg;
  757. unsigned int val[NUM_SYS_POWERDOWN];
  758. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-exynos/exynos.c linux-3.15-rc6/arch/arm/mach-exynos/exynos.c
  759. --- linux-3.15-rc6.orig/arch/arm/mach-exynos/exynos.c 2014-05-21 23:42:02.000000000 +0200
  760. +++ linux-3.15-rc6/arch/arm/mach-exynos/exynos.c 2014-05-23 11:26:48.256939874 +0200
  761. @@ -32,9 +32,6 @@
  762. #include "mfc.h"
  763. #include "regs-pmu.h"
  764. -#define L2_AUX_VAL 0x7C470001
  765. -#define L2_AUX_MASK 0xC200ffff
  766. -
  767. static struct map_desc exynos4_iodesc[] __initdata = {
  768. {
  769. .virtual = (unsigned long)S3C_VA_SYS,
  770. @@ -321,17 +318,7 @@
  771. static int __init exynos4_l2x0_cache_init(void)
  772. {
  773. - int ret;
  774. -
  775. - ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
  776. - if (ret)
  777. - return ret;
  778. -
  779. - if (IS_ENABLED(CONFIG_S5P_SLEEP)) {
  780. - l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
  781. - clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
  782. - }
  783. - return 0;
  784. + return l2x0_of_init(0x3c400001, 0xc20fffff);
  785. }
  786. early_initcall(exynos4_l2x0_cache_init);
  787. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-exynos/sleep.S linux-3.15-rc6/arch/arm/mach-exynos/sleep.S
  788. --- linux-3.15-rc6.orig/arch/arm/mach-exynos/sleep.S 2014-05-21 23:42:02.000000000 +0200
  789. +++ linux-3.15-rc6/arch/arm/mach-exynos/sleep.S 2014-05-23 11:26:48.256939874 +0200
  790. @@ -16,8 +16,6 @@
  791. */
  792. #include <linux/linkage.h>
  793. -#include <asm/asm-offsets.h>
  794. -#include <asm/hardware/cache-l2x0.h>
  795. #define CPU_MASK 0xff0ffff0
  796. #define CPU_CORTEX_A9 0x410fc090
  797. @@ -53,33 +51,7 @@
  798. and r0, r0, r1
  799. ldr r1, =CPU_CORTEX_A9
  800. cmp r0, r1
  801. - bne skip_l2_resume
  802. - adr r0, l2x0_regs_phys
  803. - ldr r0, [r0]
  804. - cmp r0, #0
  805. - beq skip_l2_resume
  806. - ldr r1, [r0, #L2X0_R_PHY_BASE]
  807. - ldr r2, [r1, #L2X0_CTRL]
  808. - tst r2, #0x1
  809. - bne skip_l2_resume
  810. - ldr r2, [r0, #L2X0_R_AUX_CTRL]
  811. - str r2, [r1, #L2X0_AUX_CTRL]
  812. - ldr r2, [r0, #L2X0_R_TAG_LATENCY]
  813. - str r2, [r1, #L2X0_TAG_LATENCY_CTRL]
  814. - ldr r2, [r0, #L2X0_R_DATA_LATENCY]
  815. - str r2, [r1, #L2X0_DATA_LATENCY_CTRL]
  816. - ldr r2, [r0, #L2X0_R_PREFETCH_CTRL]
  817. - str r2, [r1, #L2X0_PREFETCH_CTRL]
  818. - ldr r2, [r0, #L2X0_R_PWR_CTRL]
  819. - str r2, [r1, #L2X0_POWER_CTRL]
  820. - mov r2, #1
  821. - str r2, [r1, #L2X0_CTRL]
  822. -skip_l2_resume:
  823. + bleq l2c310_early_resume
  824. #endif
  825. b cpu_resume
  826. ENDPROC(exynos_cpu_resume)
  827. -#ifdef CONFIG_CACHE_L2X0
  828. - .globl l2x0_regs_phys
  829. -l2x0_regs_phys:
  830. - .long 0
  831. -#endif
  832. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-highbank/highbank.c linux-3.15-rc6/arch/arm/mach-highbank/highbank.c
  833. --- linux-3.15-rc6.orig/arch/arm/mach-highbank/highbank.c 2014-05-21 23:42:02.000000000 +0200
  834. +++ linux-3.15-rc6/arch/arm/mach-highbank/highbank.c 2014-05-23 11:26:48.256939874 +0200
  835. @@ -51,11 +51,13 @@
  836. }
  837. -static void highbank_l2x0_disable(void)
  838. +static void highbank_l2c310_write_sec(unsigned long val, unsigned reg)
  839. {
  840. - outer_flush_all();
  841. - /* Disable PL310 L2 Cache controller */
  842. - highbank_smc1(0x102, 0x0);
  843. + if (reg == L2X0_CTRL)
  844. + highbank_smc1(0x102, val);
  845. + else
  846. + WARN_ONCE(1, "Highbank L2C310: ignoring write to reg 0x%x\n",
  847. + reg);
  848. }
  849. static void __init highbank_init_irq(void)
  850. @@ -66,11 +68,9 @@
  851. highbank_scu_map_io();
  852. /* Enable PL310 L2 Cache controller */
  853. - if (IS_ENABLED(CONFIG_CACHE_L2X0) &&
  854. - of_find_compatible_node(NULL, NULL, "arm,pl310-cache")) {
  855. - highbank_smc1(0x102, 0x1);
  856. - l2x0_of_init(0, ~0UL);
  857. - outer_cache.disable = highbank_l2x0_disable;
  858. + if (IS_ENABLED(CONFIG_CACHE_L2X0)) {
  859. + outer_cache.write_sec = highbank_l2c310_write_sec;
  860. + l2x0_of_init(0, ~0);
  861. }
  862. }
  863. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-imx/clk-pllv3.c linux-3.15-rc6/arch/arm/mach-imx/clk-pllv3.c
  864. --- linux-3.15-rc6.orig/arch/arm/mach-imx/clk-pllv3.c 2014-05-21 23:42:02.000000000 +0200
  865. +++ linux-3.15-rc6/arch/arm/mach-imx/clk-pllv3.c 2014-05-23 11:26:48.256939874 +0200
  866. @@ -273,9 +273,10 @@
  867. struct clk_pllv3 *pll = to_clk_pllv3(hw);
  868. unsigned long min_rate = parent_rate * 27;
  869. unsigned long max_rate = parent_rate * 54;
  870. - u32 val, div;
  871. + u32 val, newval, div;
  872. u32 mfn, mfd = 1000000;
  873. s64 temp64;
  874. + int ret;
  875. if (rate < min_rate || rate > max_rate)
  876. return -EINVAL;
  877. @@ -287,13 +288,27 @@
  878. mfn = temp64;
  879. val = readl_relaxed(pll->base);
  880. - val &= ~pll->div_mask;
  881. - val |= div;
  882. - writel_relaxed(val, pll->base);
  883. +
  884. + /* set the PLL into bypass mode */
  885. + newval = val | BM_PLL_BYPASS;
  886. + writel_relaxed(newval, pll->base);
  887. +
  888. + /* configure the new frequency */
  889. + newval &= ~pll->div_mask;
  890. + newval |= div;
  891. + writel_relaxed(newval, pll->base);
  892. writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
  893. - writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
  894. + writel(mfd, pll->base + PLL_DENOM_OFFSET);
  895. +
  896. + ret = clk_pllv3_wait_lock(pll);
  897. + if (ret == 0 && val & BM_PLL_POWER) {
  898. + /* only if it locked can we switch back to the PLL */
  899. + newval &= ~BM_PLL_BYPASS;
  900. + newval |= val & BM_PLL_BYPASS;
  901. + writel(newval, pll->base);
  902. + }
  903. - return clk_pllv3_wait_lock(pll);
  904. + return ret;
  905. }
  906. static const struct clk_ops clk_pllv3_av_ops = {
  907. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-imx/mach-vf610.c linux-3.15-rc6/arch/arm/mach-imx/mach-vf610.c
  908. --- linux-3.15-rc6.orig/arch/arm/mach-imx/mach-vf610.c 2014-05-21 23:42:02.000000000 +0200
  909. +++ linux-3.15-rc6/arch/arm/mach-imx/mach-vf610.c 2014-05-23 11:26:48.256939874 +0200
  910. @@ -22,7 +22,7 @@
  911. static void __init vf610_init_irq(void)
  912. {
  913. - l2x0_of_init(0, ~0UL);
  914. + l2x0_of_init(0, ~0);
  915. irqchip_init();
  916. }
  917. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-imx/suspend-imx6.S linux-3.15-rc6/arch/arm/mach-imx/suspend-imx6.S
  918. --- linux-3.15-rc6.orig/arch/arm/mach-imx/suspend-imx6.S 2014-05-21 23:42:02.000000000 +0200
  919. +++ linux-3.15-rc6/arch/arm/mach-imx/suspend-imx6.S 2014-05-23 11:26:48.256939874 +0200
  920. @@ -334,28 +334,10 @@
  921. * turned into relative ones.
  922. */
  923. -#ifdef CONFIG_CACHE_L2X0
  924. - .macro pl310_resume
  925. - adr r0, l2x0_saved_regs_offset
  926. - ldr r2, [r0]
  927. - add r2, r2, r0
  928. - ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0
  929. - ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value
  930. - str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl
  931. - mov r1, #0x1
  932. - str r1, [r0, #L2X0_CTRL] @ re-enable L2
  933. - .endm
  934. -
  935. -l2x0_saved_regs_offset:
  936. - .word l2x0_saved_regs - .
  937. -
  938. -#else
  939. - .macro pl310_resume
  940. - .endm
  941. -#endif
  942. -
  943. ENTRY(v7_cpu_resume)
  944. bl v7_invalidate_l1
  945. - pl310_resume
  946. +#ifdef CONFIG_CACHE_L2X0
  947. + bl l2c310_early_resume
  948. +#endif
  949. b cpu_resume
  950. ENDPROC(v7_cpu_resume)
  951. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-imx/system.c linux-3.15-rc6/arch/arm/mach-imx/system.c
  952. --- linux-3.15-rc6.orig/arch/arm/mach-imx/system.c 2014-05-21 23:42:02.000000000 +0200
  953. +++ linux-3.15-rc6/arch/arm/mach-imx/system.c 2014-05-23 11:26:48.260939887 +0200
  954. @@ -124,7 +124,7 @@
  955. }
  956. /* Configure the L2 PREFETCH and POWER registers */
  957. - val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
  958. + val = readl_relaxed(l2x0_base + L310_PREFETCH_CTRL);
  959. val |= 0x70800000;
  960. /*
  961. * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0
  962. @@ -137,14 +137,12 @@
  963. */
  964. if (cpu_is_imx6q())
  965. val &= ~(1 << 30 | 1 << 23);
  966. - writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL);
  967. - val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN;
  968. - writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL);
  969. + writel_relaxed(val, l2x0_base + L310_PREFETCH_CTRL);
  970. iounmap(l2x0_base);
  971. of_node_put(np);
  972. out:
  973. - l2x0_of_init(0, ~0UL);
  974. + l2x0_of_init(0, ~0);
  975. }
  976. #endif
  977. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-mvebu/board-v7.c linux-3.15-rc6/arch/arm/mach-mvebu/board-v7.c
  978. --- linux-3.15-rc6.orig/arch/arm/mach-mvebu/board-v7.c 2014-05-21 23:42:02.000000000 +0200
  979. +++ linux-3.15-rc6/arch/arm/mach-mvebu/board-v7.c 2014-05-23 11:26:48.260939887 +0200
  980. @@ -60,7 +60,7 @@
  981. coherency_init();
  982. BUG_ON(mvebu_mbus_dt_init());
  983. #ifdef CONFIG_CACHE_L2X0
  984. - l2x0_of_init(0, ~0UL);
  985. + l2x0_of_init(0, ~0);
  986. #endif
  987. if (of_machine_is_compatible("marvell,armada375"))
  988. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-nomadik/cpu-8815.c linux-3.15-rc6/arch/arm/mach-nomadik/cpu-8815.c
  989. --- linux-3.15-rc6.orig/arch/arm/mach-nomadik/cpu-8815.c 2014-05-21 23:42:02.000000000 +0200
  990. +++ linux-3.15-rc6/arch/arm/mach-nomadik/cpu-8815.c 2014-05-23 11:26:48.260939887 +0200
  991. @@ -147,7 +147,7 @@
  992. {
  993. #ifdef CONFIG_CACHE_L2X0
  994. /* At full speed latency must be >=2, so 0x249 in low bits */
  995. - l2x0_of_init(0x00730249, 0xfe000fff);
  996. + l2x0_of_init(0x00700249, 0xfe0fefff);
  997. #endif
  998. of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
  999. }
  1000. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-omap2/common.h linux-3.15-rc6/arch/arm/mach-omap2/common.h
  1001. --- linux-3.15-rc6.orig/arch/arm/mach-omap2/common.h 2014-05-21 23:42:02.000000000 +0200
  1002. +++ linux-3.15-rc6/arch/arm/mach-omap2/common.h 2014-05-23 11:26:48.260939887 +0200
  1003. @@ -91,6 +91,7 @@
  1004. extern void omap3_secure_sync32k_timer_init(void);
  1005. extern void omap3_gptimer_timer_init(void);
  1006. extern void omap4_local_timer_init(void);
  1007. +int omap_l2_cache_init(void);
  1008. extern void omap5_realtime_timer_init(void);
  1009. void omap2420_init_early(void);
  1010. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-omap2/io.c linux-3.15-rc6/arch/arm/mach-omap2/io.c
  1011. --- linux-3.15-rc6.orig/arch/arm/mach-omap2/io.c 2014-05-21 23:42:02.000000000 +0200
  1012. +++ linux-3.15-rc6/arch/arm/mach-omap2/io.c 2014-05-23 11:26:48.260939887 +0200
  1013. @@ -609,6 +609,7 @@
  1014. am43xx_clockdomains_init();
  1015. am43xx_hwmod_init();
  1016. omap_hwmod_init_postsetup();
  1017. + omap_l2_cache_init();
  1018. omap_clk_soc_init = am43xx_dt_clk_init;
  1019. }
  1020. @@ -640,6 +641,7 @@
  1021. omap44xx_clockdomains_init();
  1022. omap44xx_hwmod_init();
  1023. omap_hwmod_init_postsetup();
  1024. + omap_l2_cache_init();
  1025. omap_clk_soc_init = omap4xxx_dt_clk_init;
  1026. }
  1027. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-omap2/Kconfig linux-3.15-rc6/arch/arm/mach-omap2/Kconfig
  1028. --- linux-3.15-rc6.orig/arch/arm/mach-omap2/Kconfig 2014-05-21 23:42:02.000000000 +0200
  1029. +++ linux-3.15-rc6/arch/arm/mach-omap2/Kconfig 2014-05-23 11:26:48.260939887 +0200
  1030. @@ -65,6 +65,7 @@
  1031. select ARCH_HAS_OPP
  1032. select ARM_GIC
  1033. select MACH_OMAP_GENERIC
  1034. + select MIGHT_HAVE_CACHE_L2X0
  1035. config SOC_DRA7XX
  1036. bool "TI DRA7XX"
  1037. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-omap2/omap4-common.c linux-3.15-rc6/arch/arm/mach-omap2/omap4-common.c
  1038. --- linux-3.15-rc6.orig/arch/arm/mach-omap2/omap4-common.c 2014-05-21 23:42:02.000000000 +0200
  1039. +++ linux-3.15-rc6/arch/arm/mach-omap2/omap4-common.c 2014-05-23 11:26:48.260939887 +0200
  1040. @@ -167,75 +167,57 @@
  1041. return l2cache_base;
  1042. }
  1043. -static void omap4_l2x0_disable(void)
  1044. +static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
  1045. {
  1046. - outer_flush_all();
  1047. - /* Disable PL310 L2 Cache controller */
  1048. - omap_smc1(0x102, 0x0);
  1049. -}
  1050. + unsigned smc_op;
  1051. -static void omap4_l2x0_set_debug(unsigned long val)
  1052. -{
  1053. - /* Program PL310 L2 Cache controller debug register */
  1054. - omap_smc1(0x100, val);
  1055. + switch (reg) {
  1056. + case L2X0_CTRL:
  1057. + smc_op = OMAP4_MON_L2X0_CTRL_INDEX;
  1058. + break;
  1059. +
  1060. + case L2X0_AUX_CTRL:
  1061. + smc_op = OMAP4_MON_L2X0_AUXCTRL_INDEX;
  1062. + break;
  1063. +
  1064. + case L2X0_DEBUG_CTRL:
  1065. + smc_op = OMAP4_MON_L2X0_DBG_CTRL_INDEX;
  1066. + break;
  1067. +
  1068. + case L310_PREFETCH_CTRL:
  1069. + smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
  1070. + break;
  1071. +
  1072. + default:
  1073. + WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
  1074. + return;
  1075. + }
  1076. +
  1077. + omap_smc1(smc_op, val);
  1078. }
  1079. -static int __init omap_l2_cache_init(void)
  1080. +int __init omap_l2_cache_init(void)
  1081. {
  1082. - u32 aux_ctrl = 0;
  1083. -
  1084. - /*
  1085. - * To avoid code running on other OMAPs in
  1086. - * multi-omap builds
  1087. - */
  1088. - if (!cpu_is_omap44xx())
  1089. - return -ENODEV;
  1090. + u32 aux_ctrl;
  1091. /* Static mapping, never released */
  1092. l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
  1093. if (WARN_ON(!l2cache_base))
  1094. return -ENOMEM;
  1095. - /*
  1096. - * 16-way associativity, parity disabled
  1097. - * Way size - 32KB (es1.0)
  1098. - * Way size - 64KB (es2.0 +)
  1099. - */
  1100. - aux_ctrl = ((1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT) |
  1101. - (0x1 << 25) |
  1102. - (0x1 << L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT) |
  1103. - (0x1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT));
  1104. -
  1105. - if (omap_rev() == OMAP4430_REV_ES1_0) {
  1106. - aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT;
  1107. - } else {
  1108. - aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
  1109. - (1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
  1110. - (1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
  1111. - (1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
  1112. - (1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
  1113. - }
  1114. - if (omap_rev() != OMAP4430_REV_ES1_0)
  1115. - omap_smc1(0x109, aux_ctrl);
  1116. -
  1117. - /* Enable PL310 L2 Cache controller */
  1118. - omap_smc1(0x102, 0x1);
  1119. + /* 16-way associativity, parity disabled, way size - 64KB (es2.0 +) */
  1120. + aux_ctrl = L2C_AUX_CTRL_SHARED_OVERRIDE |
  1121. + L310_AUX_CTRL_DATA_PREFETCH |
  1122. + L310_AUX_CTRL_INSTR_PREFETCH;
  1123. + outer_cache.write_sec = omap4_l2c310_write_sec;
  1124. if (of_have_populated_dt())
  1125. - l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK);
  1126. + l2x0_of_init(aux_ctrl, 0xcf9fffff);
  1127. else
  1128. - l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK);
  1129. -
  1130. - /*
  1131. - * Override default outer_cache.disable with a OMAP4
  1132. - * specific one
  1133. - */
  1134. - outer_cache.disable = omap4_l2x0_disable;
  1135. - outer_cache.set_debug = omap4_l2x0_set_debug;
  1136. + l2x0_init(l2cache_base, aux_ctrl, 0xcf9fffff);
  1137. return 0;
  1138. }
  1139. -omap_early_initcall(omap_l2_cache_init);
  1140. #endif
  1141. void __iomem *omap4_get_sar_ram_base(void)
  1142. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-omap2/omap-mpuss-lowpower.c linux-3.15-rc6/arch/arm/mach-omap2/omap-mpuss-lowpower.c
  1143. --- linux-3.15-rc6.orig/arch/arm/mach-omap2/omap-mpuss-lowpower.c 2014-05-21 23:42:02.000000000 +0200
  1144. +++ linux-3.15-rc6/arch/arm/mach-omap2/omap-mpuss-lowpower.c 2014-05-23 11:26:48.268939913 +0200
  1145. @@ -187,19 +187,15 @@
  1146. * in every restore MPUSS OFF path.
  1147. */
  1148. #ifdef CONFIG_CACHE_L2X0
  1149. -static void save_l2x0_context(void)
  1150. +static void __init save_l2x0_context(void)
  1151. {
  1152. - u32 val;
  1153. - void __iomem *l2x0_base = omap4_get_l2cache_base();
  1154. - if (l2x0_base) {
  1155. - val = __raw_readl(l2x0_base + L2X0_AUX_CTRL);
  1156. - __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET);
  1157. - val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL);
  1158. - __raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET);
  1159. - }
  1160. + __raw_writel(l2x0_saved_regs.aux_ctrl,
  1161. + sar_base + L2X0_AUXCTRL_OFFSET);
  1162. + __raw_writel(l2x0_saved_regs.prefetch_ctrl,
  1163. + sar_base + L2X0_PREFETCH_CTRL_OFFSET);
  1164. }
  1165. #else
  1166. -static void save_l2x0_context(void)
  1167. +static void __init save_l2x0_context(void)
  1168. {}
  1169. #endif
  1170. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-prima2/l2x0.c linux-3.15-rc6/arch/arm/mach-prima2/l2x0.c
  1171. --- linux-3.15-rc6.orig/arch/arm/mach-prima2/l2x0.c 2014-05-21 23:42:02.000000000 +0200
  1172. +++ linux-3.15-rc6/arch/arm/mach-prima2/l2x0.c 2014-05-23 11:26:48.268939913 +0200
  1173. @@ -8,42 +8,10 @@
  1174. #include <linux/init.h>
  1175. #include <linux/kernel.h>
  1176. -#include <linux/of.h>
  1177. #include <asm/hardware/cache-l2x0.h>
  1178. -struct l2x0_aux {
  1179. - u32 val;
  1180. - u32 mask;
  1181. -};
  1182. -
  1183. -static const struct l2x0_aux prima2_l2x0_aux __initconst = {
  1184. - .val = 2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT,
  1185. - .mask = 0,
  1186. -};
  1187. -
  1188. -static const struct l2x0_aux marco_l2x0_aux __initconst = {
  1189. - .val = (2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
  1190. - (1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT),
  1191. - .mask = L2X0_AUX_CTRL_MASK,
  1192. -};
  1193. -
  1194. -static const struct of_device_id sirf_l2x0_ids[] __initconst = {
  1195. - { .compatible = "sirf,prima2-pl310-cache", .data = &prima2_l2x0_aux, },
  1196. - { .compatible = "sirf,marco-pl310-cache", .data = &marco_l2x0_aux, },
  1197. - {},
  1198. -};
  1199. -
  1200. static int __init sirfsoc_l2x0_init(void)
  1201. {
  1202. - struct device_node *np;
  1203. - const struct l2x0_aux *aux;
  1204. -
  1205. - np = of_find_matching_node(NULL, sirf_l2x0_ids);
  1206. - if (np) {
  1207. - aux = of_match_node(sirf_l2x0_ids, np)->data;
  1208. - return l2x0_of_init(aux->val, aux->mask);
  1209. - }
  1210. -
  1211. - return 0;
  1212. + return l2x0_of_init(0, ~0);
  1213. }
  1214. early_initcall(sirfsoc_l2x0_init);
  1215. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-prima2/pm.c linux-3.15-rc6/arch/arm/mach-prima2/pm.c
  1216. --- linux-3.15-rc6.orig/arch/arm/mach-prima2/pm.c 2014-05-21 23:42:02.000000000 +0200
  1217. +++ linux-3.15-rc6/arch/arm/mach-prima2/pm.c 2014-05-23 11:26:48.268939913 +0200
  1218. @@ -71,7 +71,6 @@
  1219. case PM_SUSPEND_MEM:
  1220. sirfsoc_pre_suspend_power_off();
  1221. - outer_flush_all();
  1222. outer_disable();
  1223. /* go zzz */
  1224. cpu_suspend(0, sirfsoc_finish_suspend);
  1225. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-realview/realview_eb.c linux-3.15-rc6/arch/arm/mach-realview/realview_eb.c
  1226. --- linux-3.15-rc6.orig/arch/arm/mach-realview/realview_eb.c 2014-05-21 23:42:02.000000000 +0200
  1227. +++ linux-3.15-rc6/arch/arm/mach-realview/realview_eb.c 2014-05-23 11:26:48.268939913 +0200
  1228. @@ -442,8 +442,13 @@
  1229. realview_eb11mp_fixup();
  1230. #ifdef CONFIG_CACHE_L2X0
  1231. - /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled
  1232. - * Bits: .... ...0 0111 1001 0000 .... .... .... */
  1233. + /*
  1234. + * The PL220 needs to be manually configured as the hardware
  1235. + * doesn't report the correct sizes.
  1236. + * 1MB (128KB/way), 8-way associativity, event monitor and
  1237. + * parity enabled, ignore share bit, no force write allocate
  1238. + * Bits: .... ...0 0111 1001 0000 .... .... ....
  1239. + */
  1240. l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff);
  1241. #endif
  1242. platform_device_register(&pmu_device);
  1243. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-realview/realview_pb1176.c linux-3.15-rc6/arch/arm/mach-realview/realview_pb1176.c
  1244. --- linux-3.15-rc6.orig/arch/arm/mach-realview/realview_pb1176.c 2014-05-21 23:42:02.000000000 +0200
  1245. +++ linux-3.15-rc6/arch/arm/mach-realview/realview_pb1176.c 2014-05-23 11:26:48.268939913 +0200
  1246. @@ -355,7 +355,13 @@
  1247. int i;
  1248. #ifdef CONFIG_CACHE_L2X0
  1249. - /* 128Kb (16Kb/way) 8-way associativity. evmon/parity/share enabled. */
  1250. + /*
  1251. + * The PL220 needs to be manually configured as the hardware
  1252. + * doesn't report the correct sizes.
  1253. + * 128kB (16kB/way), 8-way associativity, event monitor and
  1254. + * parity enabled, ignore share bit, no force write allocate
  1255. + * Bits: .... ...0 0111 0011 0000 .... .... ....
  1256. + */
  1257. l2x0_init(__io_address(REALVIEW_PB1176_L220_BASE), 0x00730000, 0xfe000fff);
  1258. #endif
  1259. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-realview/realview_pb11mp.c linux-3.15-rc6/arch/arm/mach-realview/realview_pb11mp.c
  1260. --- linux-3.15-rc6.orig/arch/arm/mach-realview/realview_pb11mp.c 2014-05-21 23:42:02.000000000 +0200
  1261. +++ linux-3.15-rc6/arch/arm/mach-realview/realview_pb11mp.c 2014-05-23 11:26:48.268939913 +0200
  1262. @@ -337,8 +337,13 @@
  1263. int i;
  1264. #ifdef CONFIG_CACHE_L2X0
  1265. - /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled
  1266. - * Bits: .... ...0 0111 1001 0000 .... .... .... */
  1267. + /*
  1268. + * The PL220 needs to be manually configured as the hardware
  1269. + * doesn't report the correct sizes.
  1270. + * 1MB (128KB/way), 8-way associativity, event monitor and
  1271. + * parity enabled, ignore share bit, no force write allocate
  1272. + * Bits: .... ...0 0111 1001 0000 .... .... ....
  1273. + */
  1274. l2x0_init(__io_address(REALVIEW_TC11MP_L220_BASE), 0x00790000, 0xfe000fff);
  1275. #endif
  1276. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-realview/realview_pbx.c linux-3.15-rc6/arch/arm/mach-realview/realview_pbx.c
  1277. --- linux-3.15-rc6.orig/arch/arm/mach-realview/realview_pbx.c 2014-05-21 23:42:02.000000000 +0200
  1278. +++ linux-3.15-rc6/arch/arm/mach-realview/realview_pbx.c 2014-05-23 11:26:48.268939913 +0200
  1279. @@ -370,8 +370,8 @@
  1280. __io_address(REALVIEW_PBX_TILE_L220_BASE);
  1281. /* set RAM latencies to 1 cycle for eASIC */
  1282. - writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
  1283. - writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
  1284. + writel(0, l2x0_base + L310_TAG_LATENCY_CTRL);
  1285. + writel(0, l2x0_base + L310_DATA_LATENCY_CTRL);
  1286. /* 16KB way size, 8-way associativity, parity disabled
  1287. * Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */
  1288. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-rockchip/rockchip.c linux-3.15-rc6/arch/arm/mach-rockchip/rockchip.c
  1289. --- linux-3.15-rc6.orig/arch/arm/mach-rockchip/rockchip.c 2014-05-21 23:42:02.000000000 +0200
  1290. +++ linux-3.15-rc6/arch/arm/mach-rockchip/rockchip.c 2014-05-23 11:26:48.268939913 +0200
  1291. @@ -26,7 +26,7 @@
  1292. static void __init rockchip_dt_init(void)
  1293. {
  1294. - l2x0_of_init(0, ~0UL);
  1295. + l2x0_of_init(0, ~0);
  1296. of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
  1297. }
  1298. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-shmobile/board-armadillo800eva.c linux-3.15-rc6/arch/arm/mach-shmobile/board-armadillo800eva.c
  1299. --- linux-3.15-rc6.orig/arch/arm/mach-shmobile/board-armadillo800eva.c 2014-05-21 23:42:02.000000000 +0200
  1300. +++ linux-3.15-rc6/arch/arm/mach-shmobile/board-armadillo800eva.c 2014-05-23 11:26:48.272939927 +0200
  1301. @@ -1271,8 +1271,8 @@
  1302. #ifdef CONFIG_CACHE_L2X0
  1303. - /* Early BRESP enable, Shared attribute override enable, 32K*8way */
  1304. - l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff);
  1305. + /* Shared attribute override enable, 32K*8way */
  1306. + l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
  1307. #endif
  1308. i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
  1309. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-shmobile/board-armadillo800eva-reference.c linux-3.15-rc6/arch/arm/mach-shmobile/board-armadillo800eva-reference.c
  1310. --- linux-3.15-rc6.orig/arch/arm/mach-shmobile/board-armadillo800eva-reference.c 2014-05-21 23:42:02.000000000 +0200
  1311. +++ linux-3.15-rc6/arch/arm/mach-shmobile/board-armadillo800eva-reference.c 2014-05-23 11:26:48.272939927 +0200
  1312. @@ -164,8 +164,8 @@
  1313. r8a7740_meram_workaround();
  1314. #ifdef CONFIG_CACHE_L2X0
  1315. - /* Early BRESP enable, Shared attribute override enable, 32K*8way */
  1316. - l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff);
  1317. + /* Shared attribute override enable, 32K*8way */
  1318. + l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
  1319. #endif
  1320. r8a7740_add_standard_devices_dt();
  1321. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-shmobile/board-kzm9g.c linux-3.15-rc6/arch/arm/mach-shmobile/board-kzm9g.c
  1322. --- linux-3.15-rc6.orig/arch/arm/mach-shmobile/board-kzm9g.c 2014-05-21 23:42:02.000000000 +0200
  1323. +++ linux-3.15-rc6/arch/arm/mach-shmobile/board-kzm9g.c 2014-05-23 11:26:48.272939927 +0200
  1324. @@ -876,8 +876,8 @@
  1325. gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */
  1326. #ifdef CONFIG_CACHE_L2X0
  1327. - /* Early BRESP enable, Shared attribute override enable, 64K*8way */
  1328. - l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
  1329. + /* Shared attribute override enable, 64K*8way */
  1330. + l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
  1331. #endif
  1332. i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
  1333. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-shmobile/board-kzm9g-reference.c linux-3.15-rc6/arch/arm/mach-shmobile/board-kzm9g-reference.c
  1334. --- linux-3.15-rc6.orig/arch/arm/mach-shmobile/board-kzm9g-reference.c 2014-05-21 23:42:02.000000000 +0200
  1335. +++ linux-3.15-rc6/arch/arm/mach-shmobile/board-kzm9g-reference.c 2014-05-23 11:26:48.272939927 +0200
  1336. @@ -36,8 +36,8 @@
  1337. sh73a0_add_standard_devices_dt();
  1338. #ifdef CONFIG_CACHE_L2X0
  1339. - /* Early BRESP enable, Shared attribute override enable, 64K*8way */
  1340. - l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
  1341. + /* Shared attribute override enable, 64K*8way */
  1342. + l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
  1343. #endif
  1344. }
  1345. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-shmobile/setup-r8a7778.c linux-3.15-rc6/arch/arm/mach-shmobile/setup-r8a7778.c
  1346. --- linux-3.15-rc6.orig/arch/arm/mach-shmobile/setup-r8a7778.c 2014-05-21 23:42:02.000000000 +0200
  1347. +++ linux-3.15-rc6/arch/arm/mach-shmobile/setup-r8a7778.c 2014-05-23 11:26:48.272939927 +0200
  1348. @@ -298,10 +298,10 @@
  1349. void __iomem *base = ioremap_nocache(0xf0100000, 0x1000);
  1350. if (base) {
  1351. /*
  1352. - * Early BRESP enable, Shared attribute override enable, 64K*16way
  1353. + * Shared attribute override enable, 64K*16way
  1354. * don't call iounmap(base)
  1355. */
  1356. - l2x0_init(base, 0x40470000, 0x82000fff);
  1357. + l2x0_init(base, 0x00400000, 0xc20f0fff);
  1358. }
  1359. #endif
  1360. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-shmobile/setup-r8a7779.c linux-3.15-rc6/arch/arm/mach-shmobile/setup-r8a7779.c
  1361. --- linux-3.15-rc6.orig/arch/arm/mach-shmobile/setup-r8a7779.c 2014-05-21 23:42:02.000000000 +0200
  1362. +++ linux-3.15-rc6/arch/arm/mach-shmobile/setup-r8a7779.c 2014-05-23 11:26:48.272939927 +0200
  1363. @@ -700,8 +700,8 @@
  1364. void __init r8a7779_add_standard_devices(void)
  1365. {
  1366. #ifdef CONFIG_CACHE_L2X0
  1367. - /* Early BRESP enable, Shared attribute override enable, 64K*16way */
  1368. - l2x0_init(IOMEM(0xf0100000), 0x40470000, 0x82000fff);
  1369. + /* Shared attribute override enable, 64K*16way */
  1370. + l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
  1371. #endif
  1372. r8a7779_pm_init();
  1373. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-socfpga/socfpga.c linux-3.15-rc6/arch/arm/mach-socfpga/socfpga.c
  1374. --- linux-3.15-rc6.orig/arch/arm/mach-socfpga/socfpga.c 2014-05-21 23:42:02.000000000 +0200
  1375. +++ linux-3.15-rc6/arch/arm/mach-socfpga/socfpga.c 2014-05-23 11:26:48.272939927 +0200
  1376. @@ -100,7 +100,7 @@
  1377. static void __init socfpga_cyclone5_init(void)
  1378. {
  1379. - l2x0_of_init(0, ~0UL);
  1380. + l2x0_of_init(0, ~0);
  1381. of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
  1382. }
  1383. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-spear/platsmp.c linux-3.15-rc6/arch/arm/mach-spear/platsmp.c
  1384. --- linux-3.15-rc6.orig/arch/arm/mach-spear/platsmp.c 2014-05-21 23:42:02.000000000 +0200
  1385. +++ linux-3.15-rc6/arch/arm/mach-spear/platsmp.c 2014-05-23 11:26:48.272939927 +0200
  1386. @@ -20,6 +20,18 @@
  1387. #include <mach/spear.h>
  1388. #include "generic.h"
  1389. +/*
  1390. + * Write pen_release in a way that is guaranteed to be visible to all
  1391. + * observers, irrespective of whether they're taking part in coherency
  1392. + * or not. This is necessary for the hotplug code to work reliably.
  1393. + */
  1394. +static void write_pen_release(int val)
  1395. +{
  1396. + pen_release = val;
  1397. + smp_wmb();
  1398. + sync_cache_w(&pen_release);
  1399. +}
  1400. +
  1401. static DEFINE_SPINLOCK(boot_lock);
  1402. static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
  1403. @@ -30,8 +42,7 @@
  1404. * let the primary processor know we're out of the
  1405. * pen, then head off into the C entry point
  1406. */
  1407. - pen_release = -1;
  1408. - smp_wmb();
  1409. + write_pen_release(-1);
  1410. /*
  1411. * Synchronise with the boot thread.
  1412. @@ -58,9 +69,7 @@
  1413. * Note that "pen_release" is the hardware CPU ID, whereas
  1414. * "cpu" is Linux's internal ID.
  1415. */
  1416. - pen_release = cpu;
  1417. - flush_cache_all();
  1418. - outer_flush_all();
  1419. + write_pen_release(cpu);
  1420. timeout = jiffies + (1 * HZ);
  1421. while (time_before(jiffies, timeout)) {
  1422. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-spear/spear13xx.c linux-3.15-rc6/arch/arm/mach-spear/spear13xx.c
  1423. --- linux-3.15-rc6.orig/arch/arm/mach-spear/spear13xx.c 2014-05-21 23:42:02.000000000 +0200
  1424. +++ linux-3.15-rc6/arch/arm/mach-spear/spear13xx.c 2014-05-23 11:26:48.272939927 +0200
  1425. @@ -38,15 +38,15 @@
  1426. if (!IS_ENABLED(CONFIG_CACHE_L2X0))
  1427. return;
  1428. - writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL);
  1429. + writel_relaxed(0x06, VA_L2CC_BASE + L310_PREFETCH_CTRL);
  1430. /*
  1431. * Program following latencies in order to make
  1432. * SPEAr1340 work at 600 MHz
  1433. */
  1434. - writel_relaxed(0x221, VA_L2CC_BASE + L2X0_TAG_LATENCY_CTRL);
  1435. - writel_relaxed(0x441, VA_L2CC_BASE + L2X0_DATA_LATENCY_CTRL);
  1436. - l2x0_init(VA_L2CC_BASE, 0x70A60001, 0xfe00ffff);
  1437. + writel_relaxed(0x221, VA_L2CC_BASE + L310_TAG_LATENCY_CTRL);
  1438. + writel_relaxed(0x441, VA_L2CC_BASE + L310_DATA_LATENCY_CTRL);
  1439. + l2x0_init(VA_L2CC_BASE, 0x30a00001, 0xfe0fffff);
  1440. }
  1441. /*
  1442. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-sti/board-dt.c linux-3.15-rc6/arch/arm/mach-sti/board-dt.c
  1443. --- linux-3.15-rc6.orig/arch/arm/mach-sti/board-dt.c 2014-05-21 23:42:02.000000000 +0200
  1444. +++ linux-3.15-rc6/arch/arm/mach-sti/board-dt.c 2014-05-23 11:26:48.272939927 +0200
  1445. @@ -16,15 +16,9 @@
  1446. void __init stih41x_l2x0_init(void)
  1447. {
  1448. - u32 way_size = 0x4;
  1449. - u32 aux_ctrl;
  1450. - /* may be this can be encoded in macros like BIT*() */
  1451. - aux_ctrl = (0x1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
  1452. - (0x1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
  1453. - (0x1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
  1454. - (way_size << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
  1455. -
  1456. - l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK);
  1457. + l2x0_of_init(L2C_AUX_CTRL_SHARED_OVERRIDE |
  1458. + L310_AUX_CTRL_DATA_PREFETCH |
  1459. + L310_AUX_CTRL_INSTR_PREFETCH, 0xc00f0fff);
  1460. }
  1461. static void __init stih41x_machine_init(void)
  1462. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-tegra/pm.h linux-3.15-rc6/arch/arm/mach-tegra/pm.h
  1463. --- linux-3.15-rc6.orig/arch/arm/mach-tegra/pm.h 2014-05-21 23:42:02.000000000 +0200
  1464. +++ linux-3.15-rc6/arch/arm/mach-tegra/pm.h 2014-05-23 11:26:48.272939927 +0200
  1465. @@ -35,8 +35,6 @@
  1466. void tegra30_lp1_iram_hook(void);
  1467. void tegra30_sleep_core_init(void);
  1468. -extern unsigned long l2x0_saved_regs_addr;
  1469. -
  1470. void tegra_clear_cpu_in_lp2(void);
  1471. bool tegra_set_cpu_in_lp2(void);
  1472. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-tegra/reset-handler.S linux-3.15-rc6/arch/arm/mach-tegra/reset-handler.S
  1473. --- linux-3.15-rc6.orig/arch/arm/mach-tegra/reset-handler.S 2014-05-21 23:42:02.000000000 +0200
  1474. +++ linux-3.15-rc6/arch/arm/mach-tegra/reset-handler.S 2014-05-23 11:26:48.276939940 +0200
  1475. @@ -19,7 +19,6 @@
  1476. #include <asm/cache.h>
  1477. #include <asm/asm-offsets.h>
  1478. -#include <asm/hardware/cache-l2x0.h>
  1479. #include "flowctrl.h"
  1480. #include "fuse.h"
  1481. @@ -78,8 +77,10 @@
  1482. str r1, [r0]
  1483. #endif
  1484. +#ifdef CONFIG_CACHE_L2X0
  1485. /* L2 cache resume & re-enable */
  1486. - l2_cache_resume r0, r1, r2, l2x0_saved_regs_addr
  1487. + bl l2c310_early_resume
  1488. +#endif
  1489. end_ca9_scu_l2_resume:
  1490. mov32 r9, 0xc0f
  1491. cmp r8, r9
  1492. @@ -89,12 +90,6 @@
  1493. ENDPROC(tegra_resume)
  1494. #endif
  1495. -#ifdef CONFIG_CACHE_L2X0
  1496. - .globl l2x0_saved_regs_addr
  1497. -l2x0_saved_regs_addr:
  1498. - .long 0
  1499. -#endif
  1500. -
  1501. .align L1_CACHE_SHIFT
  1502. ENTRY(__tegra_cpu_reset_handler_start)
  1503. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-tegra/sleep.h linux-3.15-rc6/arch/arm/mach-tegra/sleep.h
  1504. --- linux-3.15-rc6.orig/arch/arm/mach-tegra/sleep.h 2014-05-21 23:42:02.000000000 +0200
  1505. +++ linux-3.15-rc6/arch/arm/mach-tegra/sleep.h 2014-05-23 11:26:48.276939940 +0200
  1506. @@ -120,37 +120,6 @@
  1507. mov \tmp1, \tmp1, lsr #8
  1508. .endm
  1509. -/* Macro to resume & re-enable L2 cache */
  1510. -#ifndef L2X0_CTRL_EN
  1511. -#define L2X0_CTRL_EN 1
  1512. -#endif
  1513. -
  1514. -#ifdef CONFIG_CACHE_L2X0
  1515. -.macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs
  1516. - W(adr) \tmp1, \phys_l2x0_saved_regs
  1517. - ldr \tmp1, [\tmp1]
  1518. - ldr \tmp2, [\tmp1, #L2X0_R_PHY_BASE]
  1519. - ldr \tmp3, [\tmp2, #L2X0_CTRL]
  1520. - tst \tmp3, #L2X0_CTRL_EN
  1521. - bne exit_l2_resume
  1522. - ldr \tmp3, [\tmp1, #L2X0_R_TAG_LATENCY]
  1523. - str \tmp3, [\tmp2, #L2X0_TAG_LATENCY_CTRL]
  1524. - ldr \tmp3, [\tmp1, #L2X0_R_DATA_LATENCY]
  1525. - str \tmp3, [\tmp2, #L2X0_DATA_LATENCY_CTRL]
  1526. - ldr \tmp3, [\tmp1, #L2X0_R_PREFETCH_CTRL]
  1527. - str \tmp3, [\tmp2, #L2X0_PREFETCH_CTRL]
  1528. - ldr \tmp3, [\tmp1, #L2X0_R_PWR_CTRL]
  1529. - str \tmp3, [\tmp2, #L2X0_POWER_CTRL]
  1530. - ldr \tmp3, [\tmp1, #L2X0_R_AUX_CTRL]
  1531. - str \tmp3, [\tmp2, #L2X0_AUX_CTRL]
  1532. - mov \tmp3, #L2X0_CTRL_EN
  1533. - str \tmp3, [\tmp2, #L2X0_CTRL]
  1534. -exit_l2_resume:
  1535. -.endm
  1536. -#else /* CONFIG_CACHE_L2X0 */
  1537. -.macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs
  1538. -.endm
  1539. -#endif /* CONFIG_CACHE_L2X0 */
  1540. #else
  1541. void tegra_pen_lock(void);
  1542. void tegra_pen_unlock(void);
  1543. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-tegra/tegra.c linux-3.15-rc6/arch/arm/mach-tegra/tegra.c
  1544. --- linux-3.15-rc6.orig/arch/arm/mach-tegra/tegra.c 2014-05-21 23:42:02.000000000 +0200
  1545. +++ linux-3.15-rc6/arch/arm/mach-tegra/tegra.c 2014-05-23 11:26:48.276939940 +0200
  1546. @@ -73,27 +73,7 @@
  1547. static void __init tegra_init_cache(void)
  1548. {
  1549. #ifdef CONFIG_CACHE_L2X0
  1550. - static const struct of_device_id pl310_ids[] __initconst = {
  1551. - { .compatible = "arm,pl310-cache", },
  1552. - {}
  1553. - };
  1554. -
  1555. - struct device_node *np;
  1556. - int ret;
  1557. - void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
  1558. - u32 aux_ctrl, cache_type;
  1559. -
  1560. - np = of_find_matching_node(NULL, pl310_ids);
  1561. - if (!np)
  1562. - return;
  1563. -
  1564. - cache_type = readl(p + L2X0_CACHE_TYPE);
  1565. - aux_ctrl = (cache_type & 0x700) << (17-8);
  1566. - aux_ctrl |= 0x7C400001;
  1567. -
  1568. - ret = l2x0_of_init(aux_ctrl, 0x8200c3fe);
  1569. - if (!ret)
  1570. - l2x0_saved_regs_addr = virt_to_phys(&l2x0_saved_regs);
  1571. + l2x0_of_init(0x3c400001, 0xc20fc3fe);
  1572. #endif
  1573. }
  1574. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-ux500/cache-l2x0.c linux-3.15-rc6/arch/arm/mach-ux500/cache-l2x0.c
  1575. --- linux-3.15-rc6.orig/arch/arm/mach-ux500/cache-l2x0.c 2014-05-21 23:42:02.000000000 +0200
  1576. +++ linux-3.15-rc6/arch/arm/mach-ux500/cache-l2x0.c 2014-05-23 11:26:48.276939940 +0200
  1577. @@ -35,10 +35,16 @@
  1578. return 0;
  1579. }
  1580. -static int __init ux500_l2x0_init(void)
  1581. +static void ux500_l2c310_write_sec(unsigned long val, unsigned reg)
  1582. {
  1583. - u32 aux_val = 0x3e000000;
  1584. + /*
  1585. + * We can't write to secure registers as we are in non-secure
  1586. + * mode, until we have some SMI service available.
  1587. + */
  1588. +}
  1589. +static int __init ux500_l2x0_init(void)
  1590. +{
  1591. if (cpu_is_u8500_family() || cpu_is_ux540_family())
  1592. l2x0_base = __io_address(U8500_L2CC_BASE);
  1593. else
  1594. @@ -48,28 +54,12 @@
  1595. /* Unlock before init */
  1596. ux500_l2x0_unlock();
  1597. - /* DBx540's L2 has 128KB way size */
  1598. - if (cpu_is_ux540_family())
  1599. - /* 128KB way size */
  1600. - aux_val |= (0x4 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
  1601. - else
  1602. - /* 64KB way size */
  1603. - aux_val |= (0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT);
  1604. + outer_cache.write_sec = ux500_l2c310_write_sec;
  1605. - /* 64KB way size, 8 way associativity, force WA */
  1606. if (of_have_populated_dt())
  1607. - l2x0_of_init(aux_val, 0xc0000fff);
  1608. + l2x0_of_init(0, ~0);
  1609. else
  1610. - l2x0_init(l2x0_base, aux_val, 0xc0000fff);
  1611. -
  1612. - /*
  1613. - * We can't disable l2 as we are in non secure mode, currently
  1614. - * this seems be called only during kexec path. So let's
  1615. - * override outer.disable with nasty assignment until we have
  1616. - * some SMI service available.
  1617. - */
  1618. - outer_cache.disable = NULL;
  1619. - outer_cache.set_debug = NULL;
  1620. + l2x0_init(l2x0_base, 0, ~0);
  1621. return 0;
  1622. }
  1623. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-vexpress/ct-ca9x4.c linux-3.15-rc6/arch/arm/mach-vexpress/ct-ca9x4.c
  1624. --- linux-3.15-rc6.orig/arch/arm/mach-vexpress/ct-ca9x4.c 2014-05-21 23:42:02.000000000 +0200
  1625. +++ linux-3.15-rc6/arch/arm/mach-vexpress/ct-ca9x4.c 2014-05-23 11:26:48.276939940 +0200
  1626. @@ -45,6 +45,23 @@
  1627. iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
  1628. }
  1629. +static void __init ca9x4_l2_init(void)
  1630. +{
  1631. +#ifdef CONFIG_CACHE_L2X0
  1632. + void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
  1633. +
  1634. + if (l2x0_base) {
  1635. + /* set RAM latencies to 1 cycle for this core tile. */
  1636. + writel(0, l2x0_base + L310_TAG_LATENCY_CTRL);
  1637. + writel(0, l2x0_base + L310_DATA_LATENCY_CTRL);
  1638. +
  1639. + l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
  1640. + } else {
  1641. + pr_err("L2C: unable to map L2 cache controller\n");
  1642. + }
  1643. +#endif
  1644. +}
  1645. +
  1646. #ifdef CONFIG_HAVE_ARM_TWD
  1647. static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER);
  1648. @@ -63,6 +80,7 @@
  1649. gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K),
  1650. ioremap(A9_MPCORE_GIC_CPU, SZ_256));
  1651. ca9x4_twd_init();
  1652. + ca9x4_l2_init();
  1653. }
  1654. static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
  1655. @@ -141,16 +159,6 @@
  1656. {
  1657. int i;
  1658. -#ifdef CONFIG_CACHE_L2X0
  1659. - void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
  1660. -
  1661. - /* set RAM latencies to 1 cycle for this core tile. */
  1662. - writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
  1663. - writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
  1664. -
  1665. - l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
  1666. -#endif
  1667. -
  1668. for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
  1669. amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
  1670. diff -Nur linux-3.15-rc6.orig/arch/arm/mach-zynq/common.c linux-3.15-rc6/arch/arm/mach-zynq/common.c
  1671. --- linux-3.15-rc6.orig/arch/arm/mach-zynq/common.c 2014-05-21 23:42:02.000000000 +0200
  1672. +++ linux-3.15-rc6/arch/arm/mach-zynq/common.c 2014-05-23 11:26:48.276939940 +0200
  1673. @@ -70,7 +70,7 @@
  1674. /*
  1675. * 64KB way size, 8-way associativity, parity disabled
  1676. */
  1677. - l2x0_of_init(0x02060000, 0xF0F0FFFF);
  1678. + l2x0_of_init(0x02000000, 0xf0ffffff);
  1679. of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
  1680. diff -Nur linux-3.15-rc6.orig/arch/arm/mm/cache-feroceon-l2.c linux-3.15-rc6/arch/arm/mm/cache-feroceon-l2.c
  1681. --- linux-3.15-rc6.orig/arch/arm/mm/cache-feroceon-l2.c 2014-05-21 23:42:02.000000000 +0200
  1682. +++ linux-3.15-rc6/arch/arm/mm/cache-feroceon-l2.c 2014-05-23 11:26:48.280939953 +0200
  1683. @@ -350,7 +350,6 @@
  1684. outer_cache.inv_range = feroceon_l2_inv_range;
  1685. outer_cache.clean_range = feroceon_l2_clean_range;
  1686. outer_cache.flush_range = feroceon_l2_flush_range;
  1687. - outer_cache.inv_all = l2_inv_all;
  1688. enable_l2();
  1689. diff -Nur linux-3.15-rc6.orig/arch/arm/mm/cache-l2x0.c linux-3.15-rc6/arch/arm/mm/cache-l2x0.c
  1690. --- linux-3.15-rc6.orig/arch/arm/mm/cache-l2x0.c 2014-05-21 23:42:02.000000000 +0200
  1691. +++ linux-3.15-rc6/arch/arm/mm/cache-l2x0.c 2014-05-23 11:26:48.280939953 +0200
  1692. @@ -16,18 +16,33 @@
  1693. * along with this program; if not, write to the Free Software
  1694. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  1695. */
  1696. +#include <linux/cpu.h>
  1697. #include <linux/err.h>
  1698. #include <linux/init.h>
  1699. +#include <linux/smp.h>
  1700. #include <linux/spinlock.h>
  1701. #include <linux/io.h>
  1702. #include <linux/of.h>
  1703. #include <linux/of_address.h>
  1704. #include <asm/cacheflush.h>
  1705. +#include <asm/cp15.h>
  1706. +#include <asm/cputype.h>
  1707. #include <asm/hardware/cache-l2x0.h>
  1708. #include "cache-tauros3.h"
  1709. #include "cache-aurora-l2.h"
  1710. +struct l2c_init_data {
  1711. + const char *type;
  1712. + unsigned way_size_0;
  1713. + unsigned num_lock;
  1714. + void (*of_parse)(const struct device_node *, u32 *, u32 *);
  1715. + void (*enable)(void __iomem *, u32, unsigned);
  1716. + void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
  1717. + void (*save)(void __iomem *);
  1718. + struct outer_cache_fns outer_cache;
  1719. +};
  1720. +
  1721. #define CACHE_LINE_SIZE 32
  1722. static void __iomem *l2x0_base;
  1723. @@ -36,96 +51,116 @@
  1724. static u32 l2x0_size;
  1725. static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
  1726. -/* Aurora don't have the cache ID register available, so we have to
  1727. - * pass it though the device tree */
  1728. -static u32 cache_id_part_number_from_dt;
  1729. -
  1730. struct l2x0_regs l2x0_saved_regs;
  1731. -struct l2x0_of_data {
  1732. - void (*setup)(const struct device_node *, u32 *, u32 *);
  1733. - void (*save)(void);
  1734. - struct outer_cache_fns outer_cache;
  1735. -};
  1736. -
  1737. -static bool of_init = false;
  1738. -
  1739. -static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
  1740. +/*
  1741. + * Common code for all cache controllers.
  1742. + */
  1743. +static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
  1744. {
  1745. /* wait for cache operation by line or way to complete */
  1746. while (readl_relaxed(reg) & mask)
  1747. cpu_relax();
  1748. }
  1749. -#ifdef CONFIG_CACHE_PL310
  1750. -static inline void cache_wait(void __iomem *reg, unsigned long mask)
  1751. +/*
  1752. + * By default, we write directly to secure registers. Platforms must
  1753. + * override this if they are running non-secure.
  1754. + */
  1755. +static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
  1756. {
  1757. - /* cache operations by line are atomic on PL310 */
  1758. + if (val == readl_relaxed(base + reg))
  1759. + return;
  1760. + if (outer_cache.write_sec)
  1761. + outer_cache.write_sec(val, reg);
  1762. + else
  1763. + writel_relaxed(val, base + reg);
  1764. }
  1765. -#else
  1766. -#define cache_wait cache_wait_way
  1767. -#endif
  1768. -static inline void cache_sync(void)
  1769. +/*
  1770. + * This should only be called when we have a requirement that the
  1771. + * register be written due to a work-around, as platforms running
  1772. + * in non-secure mode may not be able to access this register.
  1773. + */
  1774. +static inline void l2c_set_debug(void __iomem *base, unsigned long val)
  1775. {
  1776. - void __iomem *base = l2x0_base;
  1777. -
  1778. - writel_relaxed(0, base + sync_reg_offset);
  1779. - cache_wait(base + L2X0_CACHE_SYNC, 1);
  1780. + l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
  1781. }
  1782. -static inline void l2x0_clean_line(unsigned long addr)
  1783. +static void __l2c_op_way(void __iomem *reg)
  1784. {
  1785. - void __iomem *base = l2x0_base;
  1786. - cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
  1787. - writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
  1788. + writel_relaxed(l2x0_way_mask, reg);
  1789. + l2c_wait_mask(reg, l2x0_way_mask);
  1790. }
  1791. -static inline void l2x0_inv_line(unsigned long addr)
  1792. +static inline void l2c_unlock(void __iomem *base, unsigned num)
  1793. {
  1794. - void __iomem *base = l2x0_base;
  1795. - cache_wait(base + L2X0_INV_LINE_PA, 1);
  1796. - writel_relaxed(addr, base + L2X0_INV_LINE_PA);
  1797. + unsigned i;
  1798. +
  1799. + for (i = 0; i < num; i++) {
  1800. + writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
  1801. + i * L2X0_LOCKDOWN_STRIDE);
  1802. + writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
  1803. + i * L2X0_LOCKDOWN_STRIDE);
  1804. + }
  1805. }
  1806. -#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
  1807. -static inline void debug_writel(unsigned long val)
  1808. +/*
  1809. + * Enable the L2 cache controller. This function must only be
  1810. + * called when the cache controller is known to be disabled.
  1811. + */
  1812. +static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
  1813. {
  1814. - if (outer_cache.set_debug)
  1815. - outer_cache.set_debug(val);
  1816. + unsigned long flags;
  1817. +
  1818. + l2c_write_sec(aux, base, L2X0_AUX_CTRL);
  1819. +
  1820. + l2c_unlock(base, num_lock);
  1821. +
  1822. + local_irq_save(flags);
  1823. + __l2c_op_way(base + L2X0_INV_WAY);
  1824. + writel_relaxed(0, base + sync_reg_offset);
  1825. + l2c_wait_mask(base + sync_reg_offset, 1);
  1826. + local_irq_restore(flags);
  1827. +
  1828. + l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
  1829. }
  1830. -static void pl310_set_debug(unsigned long val)
  1831. +static void l2c_disable(void)
  1832. {
  1833. - writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
  1834. + void __iomem *base = l2x0_base;
  1835. +
  1836. + outer_cache.flush_all();
  1837. + l2c_write_sec(0, base, L2X0_CTRL);
  1838. + dsb(st);
  1839. }
  1840. -#else
  1841. -/* Optimised out for non-errata case */
  1842. -static inline void debug_writel(unsigned long val)
  1843. +
  1844. +#ifdef CONFIG_CACHE_PL310
  1845. +static inline void cache_wait(void __iomem *reg, unsigned long mask)
  1846. {
  1847. + /* cache operations by line are atomic on PL310 */
  1848. }
  1849. -
  1850. -#define pl310_set_debug NULL
  1851. +#else
  1852. +#define cache_wait l2c_wait_mask
  1853. #endif
  1854. -#ifdef CONFIG_PL310_ERRATA_588369
  1855. -static inline void l2x0_flush_line(unsigned long addr)
  1856. +static inline void cache_sync(void)
  1857. {
  1858. void __iomem *base = l2x0_base;
  1859. - /* Clean by PA followed by Invalidate by PA */
  1860. - cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
  1861. - writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
  1862. - cache_wait(base + L2X0_INV_LINE_PA, 1);
  1863. - writel_relaxed(addr, base + L2X0_INV_LINE_PA);
  1864. + writel_relaxed(0, base + sync_reg_offset);
  1865. + cache_wait(base + L2X0_CACHE_SYNC, 1);
  1866. }
  1867. -#else
  1868. -static inline void l2x0_flush_line(unsigned long addr)
  1869. +#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
  1870. +static inline void debug_writel(unsigned long val)
  1871. +{
  1872. + l2c_set_debug(l2x0_base, val);
  1873. +}
  1874. +#else
  1875. +/* Optimised out for non-errata case */
  1876. +static inline void debug_writel(unsigned long val)
  1877. {
  1878. - void __iomem *base = l2x0_base;
  1879. - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
  1880. - writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
  1881. }
  1882. #endif
  1883. @@ -141,8 +176,7 @@
  1884. static void __l2x0_flush_all(void)
  1885. {
  1886. debug_writel(0x03);
  1887. - writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
  1888. - cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
  1889. + __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
  1890. cache_sync();
  1891. debug_writel(0x00);
  1892. }
  1893. @@ -157,274 +191,882 @@
  1894. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  1895. }
  1896. -static void l2x0_clean_all(void)
  1897. +static void l2x0_disable(void)
  1898. {
  1899. unsigned long flags;
  1900. - /* clean all ways */
  1901. raw_spin_lock_irqsave(&l2x0_lock, flags);
  1902. - writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
  1903. - cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
  1904. - cache_sync();
  1905. + __l2x0_flush_all();
  1906. + l2c_write_sec(0, l2x0_base, L2X0_CTRL);
  1907. + dsb(st);
  1908. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  1909. }
  1910. -static void l2x0_inv_all(void)
  1911. +static void l2c_save(void __iomem *base)
  1912. {
  1913. - unsigned long flags;
  1914. + l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  1915. +}
  1916. - /* invalidate all ways */
  1917. - raw_spin_lock_irqsave(&l2x0_lock, flags);
  1918. - /* Invalidating when L2 is enabled is a nono */
  1919. - BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
  1920. - writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
  1921. - cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
  1922. - cache_sync();
  1923. - raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  1924. +/*
  1925. + * L2C-210 specific code.
  1926. + *
  1927. + * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
  1928. + * ensure that no background operation is running. The way operations
  1929. + * are all background tasks.
  1930. + *
  1931. + * While a background operation is in progress, any new operation is
  1932. + * ignored (unspecified whether this causes an error.) Thankfully, not
  1933. + * used on SMP.
  1934. + *
  1935. + * Never has a different sync register other than L2X0_CACHE_SYNC, but
  1936. + * we use sync_reg_offset here so we can share some of this with L2C-310.
  1937. + */
  1938. +static void __l2c210_cache_sync(void __iomem *base)
  1939. +{
  1940. + writel_relaxed(0, base + sync_reg_offset);
  1941. }
  1942. -static void l2x0_inv_range(unsigned long start, unsigned long end)
  1943. +static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
  1944. + unsigned long end)
  1945. +{
  1946. + while (start < end) {
  1947. + writel_relaxed(start, reg);
  1948. + start += CACHE_LINE_SIZE;
  1949. + }
  1950. +}
  1951. +
  1952. +static void l2c210_inv_range(unsigned long start, unsigned long end)
  1953. {
  1954. void __iomem *base = l2x0_base;
  1955. - unsigned long flags;
  1956. - raw_spin_lock_irqsave(&l2x0_lock, flags);
  1957. if (start & (CACHE_LINE_SIZE - 1)) {
  1958. start &= ~(CACHE_LINE_SIZE - 1);
  1959. - debug_writel(0x03);
  1960. - l2x0_flush_line(start);
  1961. - debug_writel(0x00);
  1962. + writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
  1963. start += CACHE_LINE_SIZE;
  1964. }
  1965. if (end & (CACHE_LINE_SIZE - 1)) {
  1966. end &= ~(CACHE_LINE_SIZE - 1);
  1967. - debug_writel(0x03);
  1968. - l2x0_flush_line(end);
  1969. - debug_writel(0x00);
  1970. + writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
  1971. }
  1972. + __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
  1973. + __l2c210_cache_sync(base);
  1974. +}
  1975. +
  1976. +static void l2c210_clean_range(unsigned long start, unsigned long end)
  1977. +{
  1978. + void __iomem *base = l2x0_base;
  1979. +
  1980. + start &= ~(CACHE_LINE_SIZE - 1);
  1981. + __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
  1982. + __l2c210_cache_sync(base);
  1983. +}
  1984. +
  1985. +static void l2c210_flush_range(unsigned long start, unsigned long end)
  1986. +{
  1987. + void __iomem *base = l2x0_base;
  1988. +
  1989. + start &= ~(CACHE_LINE_SIZE - 1);
  1990. + __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
  1991. + __l2c210_cache_sync(base);
  1992. +}
  1993. +
  1994. +static void l2c210_flush_all(void)
  1995. +{
  1996. + void __iomem *base = l2x0_base;
  1997. +
  1998. + BUG_ON(!irqs_disabled());
  1999. +
  2000. + __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
  2001. + __l2c210_cache_sync(base);
  2002. +}
  2003. +
  2004. +static void l2c210_sync(void)
  2005. +{
  2006. + __l2c210_cache_sync(l2x0_base);
  2007. +}
  2008. +
  2009. +static void l2c210_resume(void)
  2010. +{
  2011. + void __iomem *base = l2x0_base;
  2012. +
  2013. + if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
  2014. + l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
  2015. +}
  2016. +
  2017. +static const struct l2c_init_data l2c210_data __initconst = {
  2018. + .type = "L2C-210",
  2019. + .way_size_0 = SZ_8K,
  2020. + .num_lock = 1,
  2021. + .enable = l2c_enable,
  2022. + .save = l2c_save,
  2023. + .outer_cache = {
  2024. + .inv_range = l2c210_inv_range,
  2025. + .clean_range = l2c210_clean_range,
  2026. + .flush_range = l2c210_flush_range,
  2027. + .flush_all = l2c210_flush_all,
  2028. + .disable = l2c_disable,
  2029. + .sync = l2c210_sync,
  2030. + .resume = l2c210_resume,
  2031. + },
  2032. +};
  2033. +
  2034. +/*
  2035. + * L2C-220 specific code.
  2036. + *
  2037. + * All operations are background operations: they have to be waited for.
  2038. + * Conflicting requests generate a slave error (which will cause an
  2039. + * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
  2040. + * sync register here.
  2041. + *
  2042. + * However, we can re-use the l2c210_resume call.
  2043. + */
  2044. +static inline void __l2c220_cache_sync(void __iomem *base)
  2045. +{
  2046. + writel_relaxed(0, base + L2X0_CACHE_SYNC);
  2047. + l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
  2048. +}
  2049. +
  2050. +static void l2c220_op_way(void __iomem *base, unsigned reg)
  2051. +{
  2052. + unsigned long flags;
  2053. +
  2054. + raw_spin_lock_irqsave(&l2x0_lock, flags);
  2055. + __l2c_op_way(base + reg);
  2056. + __l2c220_cache_sync(base);
  2057. + raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2058. +}
  2059. +
  2060. +static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
  2061. + unsigned long end, unsigned long flags)
  2062. +{
  2063. + raw_spinlock_t *lock = &l2x0_lock;
  2064. +
  2065. while (start < end) {
  2066. unsigned long blk_end = start + min(end - start, 4096UL);
  2067. while (start < blk_end) {
  2068. - l2x0_inv_line(start);
  2069. + l2c_wait_mask(reg, 1);
  2070. + writel_relaxed(start, reg);
  2071. start += CACHE_LINE_SIZE;
  2072. }
  2073. if (blk_end < end) {
  2074. - raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2075. - raw_spin_lock_irqsave(&l2x0_lock, flags);
  2076. + raw_spin_unlock_irqrestore(lock, flags);
  2077. + raw_spin_lock_irqsave(lock, flags);
  2078. }
  2079. }
  2080. - cache_wait(base + L2X0_INV_LINE_PA, 1);
  2081. - cache_sync();
  2082. - raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2083. +
  2084. + return flags;
  2085. }
  2086. -static void l2x0_clean_range(unsigned long start, unsigned long end)
  2087. +static void l2c220_inv_range(unsigned long start, unsigned long end)
  2088. {
  2089. void __iomem *base = l2x0_base;
  2090. unsigned long flags;
  2091. - if ((end - start) >= l2x0_size) {
  2092. - l2x0_clean_all();
  2093. - return;
  2094. - }
  2095. -
  2096. raw_spin_lock_irqsave(&l2x0_lock, flags);
  2097. - start &= ~(CACHE_LINE_SIZE - 1);
  2098. - while (start < end) {
  2099. - unsigned long blk_end = start + min(end - start, 4096UL);
  2100. -
  2101. - while (start < blk_end) {
  2102. - l2x0_clean_line(start);
  2103. + if ((start | end) & (CACHE_LINE_SIZE - 1)) {
  2104. + if (start & (CACHE_LINE_SIZE - 1)) {
  2105. + start &= ~(CACHE_LINE_SIZE - 1);
  2106. + writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
  2107. start += CACHE_LINE_SIZE;
  2108. }
  2109. - if (blk_end < end) {
  2110. - raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2111. - raw_spin_lock_irqsave(&l2x0_lock, flags);
  2112. + if (end & (CACHE_LINE_SIZE - 1)) {
  2113. + end &= ~(CACHE_LINE_SIZE - 1);
  2114. + l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
  2115. + writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
  2116. }
  2117. }
  2118. - cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
  2119. - cache_sync();
  2120. +
  2121. + flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
  2122. + start, end, flags);
  2123. + l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
  2124. + __l2c220_cache_sync(base);
  2125. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2126. }
  2127. -static void l2x0_flush_range(unsigned long start, unsigned long end)
  2128. +static void l2c220_clean_range(unsigned long start, unsigned long end)
  2129. {
  2130. void __iomem *base = l2x0_base;
  2131. unsigned long flags;
  2132. + start &= ~(CACHE_LINE_SIZE - 1);
  2133. if ((end - start) >= l2x0_size) {
  2134. - l2x0_flush_all();
  2135. + l2c220_op_way(base, L2X0_CLEAN_WAY);
  2136. return;
  2137. }
  2138. raw_spin_lock_irqsave(&l2x0_lock, flags);
  2139. + flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
  2140. + start, end, flags);
  2141. + l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
  2142. + __l2c220_cache_sync(base);
  2143. + raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2144. +}
  2145. +
  2146. +static void l2c220_flush_range(unsigned long start, unsigned long end)
  2147. +{
  2148. + void __iomem *base = l2x0_base;
  2149. + unsigned long flags;
  2150. +
  2151. start &= ~(CACHE_LINE_SIZE - 1);
  2152. + if ((end - start) >= l2x0_size) {
  2153. + l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
  2154. + return;
  2155. + }
  2156. +
  2157. + raw_spin_lock_irqsave(&l2x0_lock, flags);
  2158. + flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
  2159. + start, end, flags);
  2160. + l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
  2161. + __l2c220_cache_sync(base);
  2162. + raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2163. +}
  2164. +
  2165. +static void l2c220_flush_all(void)
  2166. +{
  2167. + l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
  2168. +}
  2169. +
  2170. +static void l2c220_sync(void)
  2171. +{
  2172. + unsigned long flags;
  2173. +
  2174. + raw_spin_lock_irqsave(&l2x0_lock, flags);
  2175. + __l2c220_cache_sync(l2x0_base);
  2176. + raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2177. +}
  2178. +
  2179. +static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock)
  2180. +{
  2181. + /*
  2182. + * Always enable non-secure access to the lockdown registers -
  2183. + * we write to them as part of the L2C enable sequence so they
  2184. + * need to be accessible.
  2185. + */
  2186. + aux |= L220_AUX_CTRL_NS_LOCKDOWN;
  2187. +
  2188. + l2c_enable(base, aux, num_lock);
  2189. +}
  2190. +
  2191. +static const struct l2c_init_data l2c220_data = {
  2192. + .type = "L2C-220",
  2193. + .way_size_0 = SZ_8K,
  2194. + .num_lock = 1,
  2195. + .enable = l2c220_enable,
  2196. + .save = l2c_save,
  2197. + .outer_cache = {
  2198. + .inv_range = l2c220_inv_range,
  2199. + .clean_range = l2c220_clean_range,
  2200. + .flush_range = l2c220_flush_range,
  2201. + .flush_all = l2c220_flush_all,
  2202. + .disable = l2c_disable,
  2203. + .sync = l2c220_sync,
  2204. + .resume = l2c210_resume,
  2205. + },
  2206. +};
  2207. +
  2208. +/*
  2209. + * L2C-310 specific code.
  2210. + *
  2211. + * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
  2212. + * and the way operations are all background tasks. However, issuing an
  2213. + * operation while a background operation is in progress results in a
  2214. + * SLVERR response. We can reuse:
  2215. + *
  2216. + * __l2c210_cache_sync (using sync_reg_offset)
  2217. + * l2c210_sync
  2218. + * l2c210_inv_range (if 588369 is not applicable)
  2219. + * l2c210_clean_range
  2220. + * l2c210_flush_range (if 588369 is not applicable)
  2221. + * l2c210_flush_all (if 727915 is not applicable)
  2222. + *
  2223. + * Errata:
  2224. + * 588369: PL310 R0P0->R1P0, fixed R2P0.
  2225. + * Affects: all clean+invalidate operations
  2226. + * clean and invalidate skips the invalidate step, so we need to issue
  2227. + * separate operations. We also require the above debug workaround
  2228. + * enclosing this code fragment on affected parts. On unaffected parts,
  2229. + * we must not use this workaround without the debug register writes
  2230. + * to avoid exposing a problem similar to 727915.
  2231. + *
  2232. + * 727915: PL310 R2P0->R3P0, fixed R3P1.
  2233. + * Affects: clean+invalidate by way
  2234. + * clean and invalidate by way runs in the background, and a store can
  2235. + * hit the line between the clean operation and invalidate operation,
  2236. + * resulting in the store being lost.
  2237. + *
  2238. + * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
  2239. + * Affects: 8x64-bit (double fill) line fetches
  2240. + * double fill line fetches can fail to cause dirty data to be evicted
  2241. + * from the cache before the new data overwrites the second line.
  2242. + *
  2243. + * 753970: PL310 R3P0, fixed R3P1.
  2244. + * Affects: sync
  2245. + * prevents merging writes after the sync operation, until another L2C
  2246. + * operation is performed (or a number of other conditions.)
  2247. + *
  2248. + * 769419: PL310 R0P0->R3P1, fixed R3P2.
  2249. + * Affects: store buffer
  2250. + * store buffer is not automatically drained.
  2251. + */
  2252. +static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
  2253. +{
  2254. + void __iomem *base = l2x0_base;
  2255. +
  2256. + if ((start | end) & (CACHE_LINE_SIZE - 1)) {
  2257. + unsigned long flags;
  2258. +
  2259. + /* Erratum 588369 for both clean+invalidate operations */
  2260. + raw_spin_lock_irqsave(&l2x0_lock, flags);
  2261. + l2c_set_debug(base, 0x03);
  2262. +
  2263. + if (start & (CACHE_LINE_SIZE - 1)) {
  2264. + start &= ~(CACHE_LINE_SIZE - 1);
  2265. + writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
  2266. + writel_relaxed(start, base + L2X0_INV_LINE_PA);
  2267. + start += CACHE_LINE_SIZE;
  2268. + }
  2269. +
  2270. + if (end & (CACHE_LINE_SIZE - 1)) {
  2271. + end &= ~(CACHE_LINE_SIZE - 1);
  2272. + writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
  2273. + writel_relaxed(end, base + L2X0_INV_LINE_PA);
  2274. + }
  2275. +
  2276. + l2c_set_debug(base, 0x00);
  2277. + raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2278. + }
  2279. +
  2280. + __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
  2281. + __l2c210_cache_sync(base);
  2282. +}
  2283. +
  2284. +static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
  2285. +{
  2286. + raw_spinlock_t *lock = &l2x0_lock;
  2287. + unsigned long flags;
  2288. + void __iomem *base = l2x0_base;
  2289. +
  2290. + raw_spin_lock_irqsave(lock, flags);
  2291. while (start < end) {
  2292. unsigned long blk_end = start + min(end - start, 4096UL);
  2293. - debug_writel(0x03);
  2294. + l2c_set_debug(base, 0x03);
  2295. while (start < blk_end) {
  2296. - l2x0_flush_line(start);
  2297. + writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
  2298. + writel_relaxed(start, base + L2X0_INV_LINE_PA);
  2299. start += CACHE_LINE_SIZE;
  2300. }
  2301. - debug_writel(0x00);
  2302. + l2c_set_debug(base, 0x00);
  2303. if (blk_end < end) {
  2304. - raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2305. - raw_spin_lock_irqsave(&l2x0_lock, flags);
  2306. + raw_spin_unlock_irqrestore(lock, flags);
  2307. + raw_spin_lock_irqsave(lock, flags);
  2308. }
  2309. }
  2310. - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
  2311. - cache_sync();
  2312. - raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2313. + raw_spin_unlock_irqrestore(lock, flags);
  2314. + __l2c210_cache_sync(base);
  2315. }
  2316. -static void l2x0_disable(void)
  2317. +static void l2c310_flush_all_erratum(void)
  2318. {
  2319. + void __iomem *base = l2x0_base;
  2320. unsigned long flags;
  2321. raw_spin_lock_irqsave(&l2x0_lock, flags);
  2322. - __l2x0_flush_all();
  2323. - writel_relaxed(0, l2x0_base + L2X0_CTRL);
  2324. - dsb(st);
  2325. + l2c_set_debug(base, 0x03);
  2326. + __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
  2327. + l2c_set_debug(base, 0x00);
  2328. + __l2c210_cache_sync(base);
  2329. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  2330. }
  2331. -static void l2x0_unlock(u32 cache_id)
  2332. +static void __init l2c310_save(void __iomem *base)
  2333. {
  2334. - int lockregs;
  2335. - int i;
  2336. + unsigned revision;
  2337. - switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
  2338. - case L2X0_CACHE_ID_PART_L310:
  2339. - lockregs = 8;
  2340. - break;
  2341. - case AURORA_CACHE_ID:
  2342. - lockregs = 4;
  2343. + l2c_save(base);
  2344. +
  2345. + l2x0_saved_regs.tag_latency = readl_relaxed(base +
  2346. + L310_TAG_LATENCY_CTRL);
  2347. + l2x0_saved_regs.data_latency = readl_relaxed(base +
  2348. + L310_DATA_LATENCY_CTRL);
  2349. + l2x0_saved_regs.filter_end = readl_relaxed(base +
  2350. + L310_ADDR_FILTER_END);
  2351. + l2x0_saved_regs.filter_start = readl_relaxed(base +
  2352. + L310_ADDR_FILTER_START);
  2353. +
  2354. + revision = readl_relaxed(base + L2X0_CACHE_ID) &
  2355. + L2X0_CACHE_ID_RTL_MASK;
  2356. +
  2357. + /* From r2p0, there is Prefetch offset/control register */
  2358. + if (revision >= L310_CACHE_ID_RTL_R2P0)
  2359. + l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
  2360. + L310_PREFETCH_CTRL);
  2361. +
  2362. + /* From r3p0, there is Power control register */
  2363. + if (revision >= L310_CACHE_ID_RTL_R3P0)
  2364. + l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
  2365. + L310_POWER_CTRL);
  2366. +}
  2367. +
  2368. +static void l2c310_resume(void)
  2369. +{
  2370. + void __iomem *base = l2x0_base;
  2371. +
  2372. + if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  2373. + unsigned revision;
  2374. +
  2375. + /* restore pl310 setup */
  2376. + writel_relaxed(l2x0_saved_regs.tag_latency,
  2377. + base + L310_TAG_LATENCY_CTRL);
  2378. + writel_relaxed(l2x0_saved_regs.data_latency,
  2379. + base + L310_DATA_LATENCY_CTRL);
  2380. + writel_relaxed(l2x0_saved_regs.filter_end,
  2381. + base + L310_ADDR_FILTER_END);
  2382. + writel_relaxed(l2x0_saved_regs.filter_start,
  2383. + base + L310_ADDR_FILTER_START);
  2384. +
  2385. + revision = readl_relaxed(base + L2X0_CACHE_ID) &
  2386. + L2X0_CACHE_ID_RTL_MASK;
  2387. +
  2388. + if (revision >= L310_CACHE_ID_RTL_R2P0)
  2389. + l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
  2390. + L310_PREFETCH_CTRL);
  2391. + if (revision >= L310_CACHE_ID_RTL_R3P0)
  2392. + l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
  2393. + L310_POWER_CTRL);
  2394. +
  2395. + l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
  2396. +
  2397. + /* Re-enable full-line-of-zeros for Cortex-A9 */
  2398. + if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
  2399. + set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
  2400. + }
  2401. +}
  2402. +
  2403. +static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
  2404. +{
  2405. + switch (act & ~CPU_TASKS_FROZEN) {
  2406. + case CPU_STARTING:
  2407. + set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
  2408. break;
  2409. - default:
  2410. - /* L210 and unknown types */
  2411. - lockregs = 1;
  2412. + case CPU_DYING:
  2413. + set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
  2414. break;
  2415. }
  2416. + return NOTIFY_OK;
  2417. +}
  2418. - for (i = 0; i < lockregs; i++) {
  2419. - writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
  2420. - i * L2X0_LOCKDOWN_STRIDE);
  2421. - writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
  2422. - i * L2X0_LOCKDOWN_STRIDE);
  2423. +static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
  2424. +{
  2425. + unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
  2426. + bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
  2427. +
  2428. + if (rev >= L310_CACHE_ID_RTL_R2P0) {
  2429. + if (cortex_a9) {
  2430. + aux |= L310_AUX_CTRL_EARLY_BRESP;
  2431. + pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
  2432. + } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
  2433. + pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
  2434. + aux &= ~L310_AUX_CTRL_EARLY_BRESP;
  2435. + }
  2436. + }
  2437. +
  2438. + if (cortex_a9) {
  2439. + u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
  2440. + u32 acr = get_auxcr();
  2441. +
  2442. + pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
  2443. +
  2444. + if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
  2445. + pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
  2446. +
  2447. + if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
  2448. + pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
  2449. +
  2450. + if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
  2451. + aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
  2452. + pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
  2453. + }
  2454. + } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
  2455. + pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
  2456. + aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
  2457. + }
  2458. +
  2459. + if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
  2460. + u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
  2461. +
  2462. + pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
  2463. + aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
  2464. + aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
  2465. + 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
  2466. + }
  2467. +
  2468. + /* r3p0 or later has power control register */
  2469. + if (rev >= L310_CACHE_ID_RTL_R3P0) {
  2470. + u32 power_ctrl;
  2471. +
  2472. + l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN,
  2473. + base, L310_POWER_CTRL);
  2474. + power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
  2475. + pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
  2476. + power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
  2477. + power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
  2478. + }
  2479. +
  2480. + /*
  2481. + * Always enable non-secure access to the lockdown registers -
  2482. + * we write to them as part of the L2C enable sequence so they
  2483. + * need to be accessible.
  2484. + */
  2485. + aux |= L310_AUX_CTRL_NS_LOCKDOWN;
  2486. +
  2487. + l2c_enable(base, aux, num_lock);
  2488. +
  2489. + if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
  2490. + set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
  2491. + cpu_notifier(l2c310_cpu_enable_flz, 0);
  2492. }
  2493. }
  2494. -void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
  2495. +static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
  2496. + struct outer_cache_fns *fns)
  2497. {
  2498. - u32 aux;
  2499. - u32 cache_id;
  2500. - u32 way_size = 0;
  2501. - int ways;
  2502. - int way_size_shift = L2X0_WAY_SIZE_SHIFT;
  2503. - const char *type;
  2504. + unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
  2505. + const char *errata[8];
  2506. + unsigned n = 0;
  2507. - l2x0_base = base;
  2508. - if (cache_id_part_number_from_dt)
  2509. - cache_id = cache_id_part_number_from_dt;
  2510. - else
  2511. - cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
  2512. - aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  2513. + if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
  2514. + revision < L310_CACHE_ID_RTL_R2P0 &&
  2515. + /* For bcm compatibility */
  2516. + fns->inv_range == l2c210_inv_range) {
  2517. + fns->inv_range = l2c310_inv_range_erratum;
  2518. + fns->flush_range = l2c310_flush_range_erratum;
  2519. + errata[n++] = "588369";
  2520. + }
  2521. +
  2522. + if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
  2523. + revision >= L310_CACHE_ID_RTL_R2P0 &&
  2524. + revision < L310_CACHE_ID_RTL_R3P1) {
  2525. + fns->flush_all = l2c310_flush_all_erratum;
  2526. + errata[n++] = "727915";
  2527. + }
  2528. +
  2529. + if (revision >= L310_CACHE_ID_RTL_R3P0 &&
  2530. + revision < L310_CACHE_ID_RTL_R3P2) {
  2531. + u32 val = readl_relaxed(base + L310_PREFETCH_CTRL);
  2532. + /* I don't think bit23 is required here... but iMX6 does so */
  2533. + if (val & (BIT(30) | BIT(23))) {
  2534. + val &= ~(BIT(30) | BIT(23));
  2535. + l2c_write_sec(val, base, L310_PREFETCH_CTRL);
  2536. + errata[n++] = "752271";
  2537. + }
  2538. + }
  2539. +
  2540. + if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
  2541. + revision == L310_CACHE_ID_RTL_R3P0) {
  2542. + sync_reg_offset = L2X0_DUMMY_REG;
  2543. + errata[n++] = "753970";
  2544. + }
  2545. +
  2546. + if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
  2547. + errata[n++] = "769419";
  2548. +
  2549. + if (n) {
  2550. + unsigned i;
  2551. +
  2552. + pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
  2553. + for (i = 0; i < n; i++)
  2554. + pr_cont(" %s", errata[i]);
  2555. + pr_cont(" enabled\n");
  2556. + }
  2557. +}
  2558. +
  2559. +static void l2c310_disable(void)
  2560. +{
  2561. + /*
  2562. + * If full-line-of-zeros is enabled, we must first disable it in the
  2563. + * Cortex-A9 auxiliary control register before disabling the L2 cache.
  2564. + */
  2565. + if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
  2566. + set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
  2567. + l2c_disable();
  2568. +}
  2569. +
  2570. +static const struct l2c_init_data l2c310_init_fns __initconst = {
  2571. + .type = "L2C-310",
  2572. + .way_size_0 = SZ_8K,
  2573. + .num_lock = 8,
  2574. + .enable = l2c310_enable,
  2575. + .fixup = l2c310_fixup,
  2576. + .save = l2c310_save,
  2577. + .outer_cache = {
  2578. + .inv_range = l2c210_inv_range,
  2579. + .clean_range = l2c210_clean_range,
  2580. + .flush_range = l2c210_flush_range,
  2581. + .flush_all = l2c210_flush_all,
  2582. + .disable = l2c310_disable,
  2583. + .sync = l2c210_sync,
  2584. + .resume = l2c310_resume,
  2585. + },
  2586. +};
  2587. +
  2588. +static void __init __l2c_init(const struct l2c_init_data *data,
  2589. + u32 aux_val, u32 aux_mask, u32 cache_id)
  2590. +{
  2591. + struct outer_cache_fns fns;
  2592. + unsigned way_size_bits, ways;
  2593. + u32 aux, old_aux;
  2594. +
  2595. + /*
  2596. + * Sanity check the aux values. aux_mask is the bits we preserve
  2597. + * from reading the hardware register, and aux_val is the bits we
  2598. + * set.
  2599. + */
  2600. + if (aux_val & aux_mask)
  2601. + pr_alert("L2C: platform provided aux values permit register corruption.\n");
  2602. +
  2603. + old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  2604. aux &= aux_mask;
  2605. aux |= aux_val;
  2606. + if (old_aux != aux)
  2607. + pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
  2608. + old_aux, aux);
  2609. +
  2610. /* Determine the number of ways */
  2611. switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
  2612. case L2X0_CACHE_ID_PART_L310:
  2613. + if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
  2614. + pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
  2615. if (aux & (1 << 16))
  2616. ways = 16;
  2617. else
  2618. ways = 8;
  2619. - type = "L310";
  2620. -#ifdef CONFIG_PL310_ERRATA_753970
  2621. - /* Unmapped register. */
  2622. - sync_reg_offset = L2X0_DUMMY_REG;
  2623. -#endif
  2624. - if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
  2625. - outer_cache.set_debug = pl310_set_debug;
  2626. break;
  2627. +
  2628. case L2X0_CACHE_ID_PART_L210:
  2629. + case L2X0_CACHE_ID_PART_L220:
  2630. ways = (aux >> 13) & 0xf;
  2631. - type = "L210";
  2632. break;
  2633. case AURORA_CACHE_ID:
  2634. - sync_reg_offset = AURORA_SYNC_REG;
  2635. ways = (aux >> 13) & 0xf;
  2636. ways = 2 << ((ways + 1) >> 2);
  2637. - way_size_shift = AURORA_WAY_SIZE_SHIFT;
  2638. - type = "Aurora";
  2639. break;
  2640. +
  2641. default:
  2642. /* Assume unknown chips have 8 ways */
  2643. ways = 8;
  2644. - type = "L2x0 series";
  2645. break;
  2646. }
  2647. l2x0_way_mask = (1 << ways) - 1;
  2648. /*
  2649. - * L2 cache Size = Way size * Number of ways
  2650. + * way_size_0 is the size that a way_size value of zero would be
  2651. + * given the calculation: way_size = way_size_0 << way_size_bits.
  2652. + * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
  2653. + * then way_size_0 would be 8k.
  2654. + *
  2655. + * L2 cache size = number of ways * way size.
  2656. + */
  2657. + way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
  2658. + L2C_AUX_CTRL_WAY_SIZE_SHIFT;
  2659. + l2x0_size = ways * (data->way_size_0 << way_size_bits);
  2660. +
  2661. + fns = data->outer_cache;
  2662. + fns.write_sec = outer_cache.write_sec;
  2663. + if (data->fixup)
  2664. + data->fixup(l2x0_base, cache_id, &fns);
  2665. +
  2666. + /*
  2667. + * Check if l2x0 controller is already enabled. If we are booting
  2668. + * in non-secure mode accessing the below registers will fault.
  2669. */
  2670. - way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
  2671. - way_size = 1 << (way_size + way_size_shift);
  2672. + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
  2673. + data->enable(l2x0_base, aux, data->num_lock);
  2674. - l2x0_size = ways * way_size * SZ_1K;
  2675. + outer_cache = fns;
  2676. /*
  2677. - * Check if l2x0 controller is already enabled.
  2678. - * If you are booting from non-secure mode
  2679. - * accessing the below registers will fault.
  2680. + * It is strange to save the register state before initialisation,
  2681. + * but hey, this is what the DT implementations decided to do.
  2682. */
  2683. - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  2684. - /* Make sure that I&D is not locked down when starting */
  2685. - l2x0_unlock(cache_id);
  2686. + if (data->save)
  2687. + data->save(l2x0_base);
  2688. +
  2689. + /* Re-read it in case some bits are reserved. */
  2690. + aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  2691. +
  2692. + pr_info("%s cache controller enabled, %d ways, %d kB\n",
  2693. + data->type, ways, l2x0_size >> 10);
  2694. + pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
  2695. + data->type, cache_id, aux);
  2696. +}
  2697. - /* l2x0 controller is disabled */
  2698. - writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
  2699. +void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
  2700. +{
  2701. + const struct l2c_init_data *data;
  2702. + u32 cache_id;
  2703. - l2x0_inv_all();
  2704. + l2x0_base = base;
  2705. - /* enable L2X0 */
  2706. - writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
  2707. + cache_id = readl_relaxed(base + L2X0_CACHE_ID);
  2708. +
  2709. + switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
  2710. + default:
  2711. + case L2X0_CACHE_ID_PART_L210:
  2712. + data = &l2c210_data;
  2713. + break;
  2714. +
  2715. + case L2X0_CACHE_ID_PART_L220:
  2716. + data = &l2c220_data;
  2717. + break;
  2718. +
  2719. + case L2X0_CACHE_ID_PART_L310:
  2720. + data = &l2c310_init_fns;
  2721. + break;
  2722. }
  2723. - /* Re-read it in case some bits are reserved. */
  2724. - aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  2725. + __l2c_init(data, aux_val, aux_mask, cache_id);
  2726. +}
  2727. +
  2728. +#ifdef CONFIG_OF
  2729. +static int l2_wt_override;
  2730. +
  2731. +/* Aurora don't have the cache ID register available, so we have to
  2732. + * pass it though the device tree */
  2733. +static u32 cache_id_part_number_from_dt;
  2734. +
  2735. +static void __init l2x0_of_parse(const struct device_node *np,
  2736. + u32 *aux_val, u32 *aux_mask)
  2737. +{
  2738. + u32 data[2] = { 0, 0 };
  2739. + u32 tag = 0;
  2740. + u32 dirty = 0;
  2741. + u32 val = 0, mask = 0;
  2742. +
  2743. + of_property_read_u32(np, "arm,tag-latency", &tag);
  2744. + if (tag) {
  2745. + mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
  2746. + val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
  2747. + }
  2748. +
  2749. + of_property_read_u32_array(np, "arm,data-latency",
  2750. + data, ARRAY_SIZE(data));
  2751. + if (data[0] && data[1]) {
  2752. + mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
  2753. + L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
  2754. + val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
  2755. + ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
  2756. + }
  2757. +
  2758. + of_property_read_u32(np, "arm,dirty-latency", &dirty);
  2759. + if (dirty) {
  2760. + mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
  2761. + val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
  2762. + }
  2763. - /* Save the value for resuming. */
  2764. - l2x0_saved_regs.aux_ctrl = aux;
  2765. + *aux_val &= ~mask;
  2766. + *aux_val |= val;
  2767. + *aux_mask &= ~mask;
  2768. +}
  2769. +
  2770. +static const struct l2c_init_data of_l2c210_data __initconst = {
  2771. + .type = "L2C-210",
  2772. + .way_size_0 = SZ_8K,
  2773. + .num_lock = 1,
  2774. + .of_parse = l2x0_of_parse,
  2775. + .enable = l2c_enable,
  2776. + .save = l2c_save,
  2777. + .outer_cache = {
  2778. + .inv_range = l2c210_inv_range,
  2779. + .clean_range = l2c210_clean_range,
  2780. + .flush_range = l2c210_flush_range,
  2781. + .flush_all = l2c210_flush_all,
  2782. + .disable = l2c_disable,
  2783. + .sync = l2c210_sync,
  2784. + .resume = l2c210_resume,
  2785. + },
  2786. +};
  2787. +
  2788. +static const struct l2c_init_data of_l2c220_data __initconst = {
  2789. + .type = "L2C-220",
  2790. + .way_size_0 = SZ_8K,
  2791. + .num_lock = 1,
  2792. + .of_parse = l2x0_of_parse,
  2793. + .enable = l2c220_enable,
  2794. + .save = l2c_save,
  2795. + .outer_cache = {
  2796. + .inv_range = l2c220_inv_range,
  2797. + .clean_range = l2c220_clean_range,
  2798. + .flush_range = l2c220_flush_range,
  2799. + .flush_all = l2c220_flush_all,
  2800. + .disable = l2c_disable,
  2801. + .sync = l2c220_sync,
  2802. + .resume = l2c210_resume,
  2803. + },
  2804. +};
  2805. +
  2806. +static void __init l2c310_of_parse(const struct device_node *np,
  2807. + u32 *aux_val, u32 *aux_mask)
  2808. +{
  2809. + u32 data[3] = { 0, 0, 0 };
  2810. + u32 tag[3] = { 0, 0, 0 };
  2811. + u32 filter[2] = { 0, 0 };
  2812. +
  2813. + of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
  2814. + if (tag[0] && tag[1] && tag[2])
  2815. + writel_relaxed(
  2816. + L310_LATENCY_CTRL_RD(tag[0] - 1) |
  2817. + L310_LATENCY_CTRL_WR(tag[1] - 1) |
  2818. + L310_LATENCY_CTRL_SETUP(tag[2] - 1),
  2819. + l2x0_base + L310_TAG_LATENCY_CTRL);
  2820. +
  2821. + of_property_read_u32_array(np, "arm,data-latency",
  2822. + data, ARRAY_SIZE(data));
  2823. + if (data[0] && data[1] && data[2])
  2824. + writel_relaxed(
  2825. + L310_LATENCY_CTRL_RD(data[0] - 1) |
  2826. + L310_LATENCY_CTRL_WR(data[1] - 1) |
  2827. + L310_LATENCY_CTRL_SETUP(data[2] - 1),
  2828. + l2x0_base + L310_DATA_LATENCY_CTRL);
  2829. - if (!of_init) {
  2830. - outer_cache.inv_range = l2x0_inv_range;
  2831. - outer_cache.clean_range = l2x0_clean_range;
  2832. - outer_cache.flush_range = l2x0_flush_range;
  2833. - outer_cache.sync = l2x0_cache_sync;
  2834. - outer_cache.flush_all = l2x0_flush_all;
  2835. - outer_cache.inv_all = l2x0_inv_all;
  2836. - outer_cache.disable = l2x0_disable;
  2837. - }
  2838. -
  2839. - pr_info("%s cache controller enabled\n", type);
  2840. - pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
  2841. - ways, cache_id, aux, l2x0_size >> 10);
  2842. + of_property_read_u32_array(np, "arm,filter-ranges",
  2843. + filter, ARRAY_SIZE(filter));
  2844. + if (filter[1]) {
  2845. + writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
  2846. + l2x0_base + L310_ADDR_FILTER_END);
  2847. + writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN,
  2848. + l2x0_base + L310_ADDR_FILTER_START);
  2849. + }
  2850. }
  2851. -#ifdef CONFIG_OF
  2852. -static int l2_wt_override;
  2853. +static const struct l2c_init_data of_l2c310_data __initconst = {
  2854. + .type = "L2C-310",
  2855. + .way_size_0 = SZ_8K,
  2856. + .num_lock = 8,
  2857. + .of_parse = l2c310_of_parse,
  2858. + .enable = l2c310_enable,
  2859. + .fixup = l2c310_fixup,
  2860. + .save = l2c310_save,
  2861. + .outer_cache = {
  2862. + .inv_range = l2c210_inv_range,
  2863. + .clean_range = l2c210_clean_range,
  2864. + .flush_range = l2c210_flush_range,
  2865. + .flush_all = l2c210_flush_all,
  2866. + .disable = l2c310_disable,
  2867. + .sync = l2c210_sync,
  2868. + .resume = l2c310_resume,
  2869. + },
  2870. +};
  2871. /*
  2872. * Note that the end addresses passed to Linux primitives are
  2873. @@ -524,6 +1166,100 @@
  2874. }
  2875. }
  2876. +static void aurora_save(void __iomem *base)
  2877. +{
  2878. + l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
  2879. + l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
  2880. +}
  2881. +
  2882. +static void aurora_resume(void)
  2883. +{
  2884. + void __iomem *base = l2x0_base;
  2885. +
  2886. + if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  2887. + writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
  2888. + writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
  2889. + }
  2890. +}
  2891. +
  2892. +/*
  2893. + * For Aurora cache in no outer mode, enable via the CP15 coprocessor
  2894. + * broadcasting of cache commands to L2.
  2895. + */
  2896. +static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
  2897. + unsigned num_lock)
  2898. +{
  2899. + u32 u;
  2900. +
  2901. + asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
  2902. + u |= AURORA_CTRL_FW; /* Set the FW bit */
  2903. + asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
  2904. +
  2905. + isb();
  2906. +
  2907. + l2c_enable(base, aux, num_lock);
  2908. +}
  2909. +
  2910. +static void __init aurora_fixup(void __iomem *base, u32 cache_id,
  2911. + struct outer_cache_fns *fns)
  2912. +{
  2913. + sync_reg_offset = AURORA_SYNC_REG;
  2914. +}
  2915. +
  2916. +static void __init aurora_of_parse(const struct device_node *np,
  2917. + u32 *aux_val, u32 *aux_mask)
  2918. +{
  2919. + u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
  2920. + u32 mask = AURORA_ACR_REPLACEMENT_MASK;
  2921. +
  2922. + of_property_read_u32(np, "cache-id-part",
  2923. + &cache_id_part_number_from_dt);
  2924. +
  2925. + /* Determine and save the write policy */
  2926. + l2_wt_override = of_property_read_bool(np, "wt-override");
  2927. +
  2928. + if (l2_wt_override) {
  2929. + val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
  2930. + mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
  2931. + }
  2932. +
  2933. + *aux_val &= ~mask;
  2934. + *aux_val |= val;
  2935. + *aux_mask &= ~mask;
  2936. +}
  2937. +
  2938. +static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
  2939. + .type = "Aurora",
  2940. + .way_size_0 = SZ_4K,
  2941. + .num_lock = 4,
  2942. + .of_parse = aurora_of_parse,
  2943. + .enable = l2c_enable,
  2944. + .fixup = aurora_fixup,
  2945. + .save = aurora_save,
  2946. + .outer_cache = {
  2947. + .inv_range = aurora_inv_range,
  2948. + .clean_range = aurora_clean_range,
  2949. + .flush_range = aurora_flush_range,
  2950. + .flush_all = l2x0_flush_all,
  2951. + .disable = l2x0_disable,
  2952. + .sync = l2x0_cache_sync,
  2953. + .resume = aurora_resume,
  2954. + },
  2955. +};
  2956. +
  2957. +static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
  2958. + .type = "Aurora",
  2959. + .way_size_0 = SZ_4K,
  2960. + .num_lock = 4,
  2961. + .of_parse = aurora_of_parse,
  2962. + .enable = aurora_enable_no_outer,
  2963. + .fixup = aurora_fixup,
  2964. + .save = aurora_save,
  2965. + .outer_cache = {
  2966. + .resume = aurora_resume,
  2967. + },
  2968. +};
  2969. +
  2970. /*
  2971. * For certain Broadcom SoCs, depending on the address range, different offsets
  2972. * need to be added to the address before passing it to L2 for
  2973. @@ -588,16 +1324,16 @@
  2974. /* normal case, no cross section between start and end */
  2975. if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
  2976. - l2x0_inv_range(new_start, new_end);
  2977. + l2c210_inv_range(new_start, new_end);
  2978. return;
  2979. }
  2980. /* They cross sections, so it can only be a cross from section
  2981. * 2 to section 3
  2982. */
  2983. - l2x0_inv_range(new_start,
  2984. + l2c210_inv_range(new_start,
  2985. bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
  2986. - l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  2987. + l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  2988. new_end);
  2989. }
  2990. @@ -610,26 +1346,21 @@
  2991. if (unlikely(end <= start))
  2992. return;
  2993. - if ((end - start) >= l2x0_size) {
  2994. - l2x0_clean_all();
  2995. - return;
  2996. - }
  2997. -
  2998. new_start = bcm_l2_phys_addr(start);
  2999. new_end = bcm_l2_phys_addr(end);
  3000. /* normal case, no cross section between start and end */
  3001. if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
  3002. - l2x0_clean_range(new_start, new_end);
  3003. + l2c210_clean_range(new_start, new_end);
  3004. return;
  3005. }
  3006. /* They cross sections, so it can only be a cross from section
  3007. * 2 to section 3
  3008. */
  3009. - l2x0_clean_range(new_start,
  3010. + l2c210_clean_range(new_start,
  3011. bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
  3012. - l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  3013. + l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  3014. new_end);
  3015. }
  3016. @@ -643,7 +1374,7 @@
  3017. return;
  3018. if ((end - start) >= l2x0_size) {
  3019. - l2x0_flush_all();
  3020. + outer_cache.flush_all();
  3021. return;
  3022. }
  3023. @@ -652,283 +1383,67 @@
  3024. /* normal case, no cross section between start and end */
  3025. if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
  3026. - l2x0_flush_range(new_start, new_end);
  3027. + l2c210_flush_range(new_start, new_end);
  3028. return;
  3029. }
  3030. /* They cross sections, so it can only be a cross from section
  3031. * 2 to section 3
  3032. */
  3033. - l2x0_flush_range(new_start,
  3034. + l2c210_flush_range(new_start,
  3035. bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
  3036. - l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  3037. + l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
  3038. new_end);
  3039. }
  3040. -static void __init l2x0_of_setup(const struct device_node *np,
  3041. - u32 *aux_val, u32 *aux_mask)
  3042. -{
  3043. - u32 data[2] = { 0, 0 };
  3044. - u32 tag = 0;
  3045. - u32 dirty = 0;
  3046. - u32 val = 0, mask = 0;
  3047. -
  3048. - of_property_read_u32(np, "arm,tag-latency", &tag);
  3049. - if (tag) {
  3050. - mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
  3051. - val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
  3052. - }
  3053. -
  3054. - of_property_read_u32_array(np, "arm,data-latency",
  3055. - data, ARRAY_SIZE(data));
  3056. - if (data[0] && data[1]) {
  3057. - mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
  3058. - L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
  3059. - val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
  3060. - ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
  3061. - }
  3062. -
  3063. - of_property_read_u32(np, "arm,dirty-latency", &dirty);
  3064. - if (dirty) {
  3065. - mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
  3066. - val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
  3067. - }
  3068. -
  3069. - *aux_val &= ~mask;
  3070. - *aux_val |= val;
  3071. - *aux_mask &= ~mask;
  3072. -}
  3073. -
  3074. -static void __init pl310_of_setup(const struct device_node *np,
  3075. - u32 *aux_val, u32 *aux_mask)
  3076. -{
  3077. - u32 data[3] = { 0, 0, 0 };
  3078. - u32 tag[3] = { 0, 0, 0 };
  3079. - u32 filter[2] = { 0, 0 };
  3080. -
  3081. - of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
  3082. - if (tag[0] && tag[1] && tag[2])
  3083. - writel_relaxed(
  3084. - ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
  3085. - ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
  3086. - ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
  3087. - l2x0_base + L2X0_TAG_LATENCY_CTRL);
  3088. -
  3089. - of_property_read_u32_array(np, "arm,data-latency",
  3090. - data, ARRAY_SIZE(data));
  3091. - if (data[0] && data[1] && data[2])
  3092. - writel_relaxed(
  3093. - ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
  3094. - ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
  3095. - ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
  3096. - l2x0_base + L2X0_DATA_LATENCY_CTRL);
  3097. -
  3098. - of_property_read_u32_array(np, "arm,filter-ranges",
  3099. - filter, ARRAY_SIZE(filter));
  3100. - if (filter[1]) {
  3101. - writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
  3102. - l2x0_base + L2X0_ADDR_FILTER_END);
  3103. - writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
  3104. - l2x0_base + L2X0_ADDR_FILTER_START);
  3105. - }
  3106. -}
  3107. -
  3108. -static void __init pl310_save(void)
  3109. -{
  3110. - u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
  3111. - L2X0_CACHE_ID_RTL_MASK;
  3112. -
  3113. - l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
  3114. - L2X0_TAG_LATENCY_CTRL);
  3115. - l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
  3116. - L2X0_DATA_LATENCY_CTRL);
  3117. - l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
  3118. - L2X0_ADDR_FILTER_END);
  3119. - l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
  3120. - L2X0_ADDR_FILTER_START);
  3121. -
  3122. - if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
  3123. - /*
  3124. - * From r2p0, there is Prefetch offset/control register
  3125. - */
  3126. - l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
  3127. - L2X0_PREFETCH_CTRL);
  3128. - /*
  3129. - * From r3p0, there is Power control register
  3130. - */
  3131. - if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
  3132. - l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
  3133. - L2X0_POWER_CTRL);
  3134. - }
  3135. -}
  3136. +/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
  3137. +static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
  3138. + .type = "BCM-L2C-310",
  3139. + .way_size_0 = SZ_8K,
  3140. + .num_lock = 8,
  3141. + .of_parse = l2c310_of_parse,
  3142. + .enable = l2c310_enable,
  3143. + .save = l2c310_save,
  3144. + .outer_cache = {
  3145. + .inv_range = bcm_inv_range,
  3146. + .clean_range = bcm_clean_range,
  3147. + .flush_range = bcm_flush_range,
  3148. + .flush_all = l2c210_flush_all,
  3149. + .disable = l2c310_disable,
  3150. + .sync = l2c210_sync,
  3151. + .resume = l2c310_resume,
  3152. + },
  3153. +};
  3154. -static void aurora_save(void)
  3155. +static void __init tauros3_save(void __iomem *base)
  3156. {
  3157. - l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
  3158. - l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  3159. -}
  3160. + l2c_save(base);
  3161. -static void __init tauros3_save(void)
  3162. -{
  3163. l2x0_saved_regs.aux2_ctrl =
  3164. - readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL);
  3165. + readl_relaxed(base + TAUROS3_AUX2_CTRL);
  3166. l2x0_saved_regs.prefetch_ctrl =
  3167. - readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
  3168. -}
  3169. -
  3170. -static void l2x0_resume(void)
  3171. -{
  3172. - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  3173. - /* restore aux ctrl and enable l2 */
  3174. - l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
  3175. -
  3176. - writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
  3177. - L2X0_AUX_CTRL);
  3178. -
  3179. - l2x0_inv_all();
  3180. -
  3181. - writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
  3182. - }
  3183. -}
  3184. -
  3185. -static void pl310_resume(void)
  3186. -{
  3187. - u32 l2x0_revision;
  3188. -
  3189. - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  3190. - /* restore pl310 setup */
  3191. - writel_relaxed(l2x0_saved_regs.tag_latency,
  3192. - l2x0_base + L2X0_TAG_LATENCY_CTRL);
  3193. - writel_relaxed(l2x0_saved_regs.data_latency,
  3194. - l2x0_base + L2X0_DATA_LATENCY_CTRL);
  3195. - writel_relaxed(l2x0_saved_regs.filter_end,
  3196. - l2x0_base + L2X0_ADDR_FILTER_END);
  3197. - writel_relaxed(l2x0_saved_regs.filter_start,
  3198. - l2x0_base + L2X0_ADDR_FILTER_START);
  3199. -
  3200. - l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
  3201. - L2X0_CACHE_ID_RTL_MASK;
  3202. -
  3203. - if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
  3204. - writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
  3205. - l2x0_base + L2X0_PREFETCH_CTRL);
  3206. - if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
  3207. - writel_relaxed(l2x0_saved_regs.pwr_ctrl,
  3208. - l2x0_base + L2X0_POWER_CTRL);
  3209. - }
  3210. - }
  3211. -
  3212. - l2x0_resume();
  3213. -}
  3214. -
  3215. -static void aurora_resume(void)
  3216. -{
  3217. - if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  3218. - writel_relaxed(l2x0_saved_regs.aux_ctrl,
  3219. - l2x0_base + L2X0_AUX_CTRL);
  3220. - writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
  3221. - }
  3222. + readl_relaxed(base + L310_PREFETCH_CTRL);
  3223. }
  3224. static void tauros3_resume(void)
  3225. {
  3226. - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  3227. + void __iomem *base = l2x0_base;
  3228. +
  3229. + if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  3230. writel_relaxed(l2x0_saved_regs.aux2_ctrl,
  3231. - l2x0_base + TAUROS3_AUX2_CTRL);
  3232. + base + TAUROS3_AUX2_CTRL);
  3233. writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
  3234. - l2x0_base + L2X0_PREFETCH_CTRL);
  3235. - }
  3236. -
  3237. - l2x0_resume();
  3238. -}
  3239. -
  3240. -static void __init aurora_broadcast_l2_commands(void)
  3241. -{
  3242. - __u32 u;
  3243. - /* Enable Broadcasting of cache commands to L2*/
  3244. - __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
  3245. - u |= AURORA_CTRL_FW; /* Set the FW bit */
  3246. - __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
  3247. - isb();
  3248. -}
  3249. -
  3250. -static void __init aurora_of_setup(const struct device_node *np,
  3251. - u32 *aux_val, u32 *aux_mask)
  3252. -{
  3253. - u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
  3254. - u32 mask = AURORA_ACR_REPLACEMENT_MASK;
  3255. + base + L310_PREFETCH_CTRL);
  3256. - of_property_read_u32(np, "cache-id-part",
  3257. - &cache_id_part_number_from_dt);
  3258. -
  3259. - /* Determine and save the write policy */
  3260. - l2_wt_override = of_property_read_bool(np, "wt-override");
  3261. -
  3262. - if (l2_wt_override) {
  3263. - val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
  3264. - mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
  3265. + l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
  3266. }
  3267. -
  3268. - *aux_val &= ~mask;
  3269. - *aux_val |= val;
  3270. - *aux_mask &= ~mask;
  3271. }
  3272. -static const struct l2x0_of_data pl310_data = {
  3273. - .setup = pl310_of_setup,
  3274. - .save = pl310_save,
  3275. - .outer_cache = {
  3276. - .resume = pl310_resume,
  3277. - .inv_range = l2x0_inv_range,
  3278. - .clean_range = l2x0_clean_range,
  3279. - .flush_range = l2x0_flush_range,
  3280. - .sync = l2x0_cache_sync,
  3281. - .flush_all = l2x0_flush_all,
  3282. - .inv_all = l2x0_inv_all,
  3283. - .disable = l2x0_disable,
  3284. - },
  3285. -};
  3286. -
  3287. -static const struct l2x0_of_data l2x0_data = {
  3288. - .setup = l2x0_of_setup,
  3289. - .save = NULL,
  3290. - .outer_cache = {
  3291. - .resume = l2x0_resume,
  3292. - .inv_range = l2x0_inv_range,
  3293. - .clean_range = l2x0_clean_range,
  3294. - .flush_range = l2x0_flush_range,
  3295. - .sync = l2x0_cache_sync,
  3296. - .flush_all = l2x0_flush_all,
  3297. - .inv_all = l2x0_inv_all,
  3298. - .disable = l2x0_disable,
  3299. - },
  3300. -};
  3301. -
  3302. -static const struct l2x0_of_data aurora_with_outer_data = {
  3303. - .setup = aurora_of_setup,
  3304. - .save = aurora_save,
  3305. - .outer_cache = {
  3306. - .resume = aurora_resume,
  3307. - .inv_range = aurora_inv_range,
  3308. - .clean_range = aurora_clean_range,
  3309. - .flush_range = aurora_flush_range,
  3310. - .sync = l2x0_cache_sync,
  3311. - .flush_all = l2x0_flush_all,
  3312. - .inv_all = l2x0_inv_all,
  3313. - .disable = l2x0_disable,
  3314. - },
  3315. -};
  3316. -
  3317. -static const struct l2x0_of_data aurora_no_outer_data = {
  3318. - .setup = aurora_of_setup,
  3319. - .save = aurora_save,
  3320. - .outer_cache = {
  3321. - .resume = aurora_resume,
  3322. - },
  3323. -};
  3324. -
  3325. -static const struct l2x0_of_data tauros3_data = {
  3326. - .setup = NULL,
  3327. +static const struct l2c_init_data of_tauros3_data __initconst = {
  3328. + .type = "Tauros3",
  3329. + .way_size_0 = SZ_8K,
  3330. + .num_lock = 8,
  3331. + .enable = l2c_enable,
  3332. .save = tauros3_save,
  3333. /* Tauros3 broadcasts L1 cache operations to L2 */
  3334. .outer_cache = {
  3335. @@ -936,43 +1451,26 @@
  3336. },
  3337. };
  3338. -static const struct l2x0_of_data bcm_l2x0_data = {
  3339. - .setup = pl310_of_setup,
  3340. - .save = pl310_save,
  3341. - .outer_cache = {
  3342. - .resume = pl310_resume,
  3343. - .inv_range = bcm_inv_range,
  3344. - .clean_range = bcm_clean_range,
  3345. - .flush_range = bcm_flush_range,
  3346. - .sync = l2x0_cache_sync,
  3347. - .flush_all = l2x0_flush_all,
  3348. - .inv_all = l2x0_inv_all,
  3349. - .disable = l2x0_disable,
  3350. - },
  3351. -};
  3352. -
  3353. +#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
  3354. static const struct of_device_id l2x0_ids[] __initconst = {
  3355. - { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
  3356. - { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
  3357. - { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
  3358. - { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */
  3359. - .data = (void *)&bcm_l2x0_data},
  3360. - { .compatible = "brcm,bcm11351-a2-pl310-cache",
  3361. - .data = (void *)&bcm_l2x0_data},
  3362. - { .compatible = "marvell,aurora-outer-cache",
  3363. - .data = (void *)&aurora_with_outer_data},
  3364. - { .compatible = "marvell,aurora-system-cache",
  3365. - .data = (void *)&aurora_no_outer_data},
  3366. - { .compatible = "marvell,tauros3-cache",
  3367. - .data = (void *)&tauros3_data },
  3368. + L2C_ID("arm,l210-cache", of_l2c210_data),
  3369. + L2C_ID("arm,l220-cache", of_l2c220_data),
  3370. + L2C_ID("arm,pl310-cache", of_l2c310_data),
  3371. + L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
  3372. + L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
  3373. + L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
  3374. + L2C_ID("marvell,tauros3-cache", of_tauros3_data),
  3375. + /* Deprecated IDs */
  3376. + L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
  3377. {}
  3378. };
  3379. int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
  3380. {
  3381. + const struct l2c_init_data *data;
  3382. struct device_node *np;
  3383. - const struct l2x0_of_data *data;
  3384. struct resource res;
  3385. + u32 cache_id, old_aux;
  3386. np = of_find_matching_node(NULL, l2x0_ids);
  3387. if (!np)
  3388. @@ -989,23 +1487,29 @@
  3389. data = of_match_node(l2x0_ids, np)->data;
  3390. - /* L2 configuration can only be changed if the cache is disabled */
  3391. - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
  3392. - if (data->setup)
  3393. - data->setup(np, &aux_val, &aux_mask);
  3394. -
  3395. - /* For aurora cache in no outer mode select the
  3396. - * correct mode using the coprocessor*/
  3397. - if (data == &aurora_no_outer_data)
  3398. - aurora_broadcast_l2_commands();
  3399. + old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
  3400. + if (old_aux != ((old_aux & aux_mask) | aux_val)) {
  3401. + pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
  3402. + old_aux, (old_aux & aux_mask) | aux_val);
  3403. + } else if (aux_mask != ~0U && aux_val != 0) {
  3404. + pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
  3405. }
  3406. - if (data->save)
  3407. - data->save();
  3408. + /* All L2 caches are unified, so this property should be specified */
  3409. + if (!of_property_read_bool(np, "cache-unified"))
  3410. + pr_err("L2C: device tree omits to specify unified cache\n");
  3411. +
  3412. + /* L2 configuration can only be changed if the cache is disabled */
  3413. + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
  3414. + if (data->of_parse)
  3415. + data->of_parse(np, &aux_val, &aux_mask);
  3416. +
  3417. + if (cache_id_part_number_from_dt)
  3418. + cache_id = cache_id_part_number_from_dt;
  3419. + else
  3420. + cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
  3421. - of_init = true;
  3422. - memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
  3423. - l2x0_init(l2x0_base, aux_val, aux_mask);
  3424. + __l2c_init(data, aux_val, aux_mask, cache_id);
  3425. return 0;
  3426. }
  3427. diff -Nur linux-3.15-rc6.orig/arch/arm/mm/Kconfig linux-3.15-rc6/arch/arm/mm/Kconfig
  3428. --- linux-3.15-rc6.orig/arch/arm/mm/Kconfig 2014-05-21 23:42:02.000000000 +0200
  3429. +++ linux-3.15-rc6/arch/arm/mm/Kconfig 2014-05-23 11:26:48.280939953 +0200
  3430. @@ -897,6 +897,57 @@
  3431. This option enables optimisations for the PL310 cache
  3432. controller.
  3433. +config PL310_ERRATA_588369
  3434. + bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
  3435. + depends on CACHE_L2X0
  3436. + help
  3437. + The PL310 L2 cache controller implements three types of Clean &
  3438. + Invalidate maintenance operations: by Physical Address
  3439. + (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC).
  3440. + They are architecturally defined to behave as the execution of a
  3441. + clean operation followed immediately by an invalidate operation,
  3442. + both performing to the same memory location. This functionality
  3443. + is not correctly implemented in PL310 as clean lines are not
  3444. + invalidated as a result of these operations.
  3445. +
  3446. +config PL310_ERRATA_727915
  3447. + bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
  3448. + depends on CACHE_L2X0
  3449. + help
  3450. + PL310 implements the Clean & Invalidate by Way L2 cache maintenance
  3451. + operation (offset 0x7FC). This operation runs in background so that
  3452. + PL310 can handle normal accesses while it is in progress. Under very
  3453. + rare circumstances, due to this erratum, write data can be lost when
  3454. + PL310 treats a cacheable write transaction during a Clean &
  3455. + Invalidate by Way operation.
  3456. +
  3457. +config PL310_ERRATA_753970
  3458. + bool "PL310 errata: cache sync operation may be faulty"
  3459. + depends on CACHE_PL310
  3460. + help
  3461. + This option enables the workaround for the 753970 PL310 (r3p0) erratum.
  3462. +
  3463. + Under some condition the effect of cache sync operation on
  3464. + the store buffer still remains when the operation completes.
  3465. + This means that the store buffer is always asked to drain and
  3466. + this prevents it from merging any further writes. The workaround
  3467. + is to replace the normal offset of cache sync operation (0x730)
  3468. + by another offset targeting an unmapped PL310 register 0x740.
  3469. + This has the same effect as the cache sync operation: store buffer
  3470. + drain and waiting for all buffers empty.
  3471. +
  3472. +config PL310_ERRATA_769419
  3473. + bool "PL310 errata: no automatic Store Buffer drain"
  3474. + depends on CACHE_L2X0
  3475. + help
  3476. + On revisions of the PL310 prior to r3p2, the Store Buffer does
  3477. + not automatically drain. This can cause normal, non-cacheable
  3478. + writes to be retained when the memory system is idle, leading
  3479. + to suboptimal I/O performance for drivers using coherent DMA.
  3480. + This option adds a write barrier to the cpu_idle loop so that,
  3481. + on systems with an outer cache, the store buffer is drained
  3482. + explicitly.
  3483. +
  3484. config CACHE_TAUROS2
  3485. bool "Enable the Tauros2 L2 cache controller"
  3486. depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4)
  3487. diff -Nur linux-3.15-rc6.orig/arch/arm/mm/l2c-common.c linux-3.15-rc6/arch/arm/mm/l2c-common.c
  3488. --- linux-3.15-rc6.orig/arch/arm/mm/l2c-common.c 1970-01-01 01:00:00.000000000 +0100
  3489. +++ linux-3.15-rc6/arch/arm/mm/l2c-common.c 2014-05-23 11:26:48.284939966 +0200
  3490. @@ -0,0 +1,20 @@
  3491. +/*
  3492. + * Copyright (C) 2010 ARM Ltd.
  3493. + * Written by Catalin Marinas <catalin.marinas@arm.com>
  3494. + *
  3495. + * This program is free software; you can redistribute it and/or modify
  3496. + * it under the terms of the GNU General Public License version 2 as
  3497. + * published by the Free Software Foundation.
  3498. + */
  3499. +#include <linux/bug.h>
  3500. +#include <linux/smp.h>
  3501. +#include <asm/outercache.h>
  3502. +
  3503. +void outer_disable(void)
  3504. +{
  3505. + WARN_ON(!irqs_disabled());
  3506. + WARN_ON(num_online_cpus() > 1);
  3507. +
  3508. + if (outer_cache.disable)
  3509. + outer_cache.disable();
  3510. +}
  3511. diff -Nur linux-3.15-rc6.orig/arch/arm/mm/l2c-l2x0-resume.S linux-3.15-rc6/arch/arm/mm/l2c-l2x0-resume.S
  3512. --- linux-3.15-rc6.orig/arch/arm/mm/l2c-l2x0-resume.S 1970-01-01 01:00:00.000000000 +0100
  3513. +++ linux-3.15-rc6/arch/arm/mm/l2c-l2x0-resume.S 2014-05-23 11:26:48.284939966 +0200
  3514. @@ -0,0 +1,58 @@
  3515. +/*
  3516. + * L2C-310 early resume code. This can be used by platforms to restore
  3517. + * the settings of their L2 cache controller before restoring the
  3518. + * processor state.
  3519. + *
  3520. + * This code can only be used to if you are running in the secure world.
  3521. + */
  3522. +#include <linux/linkage.h>
  3523. +#include <asm/hardware/cache-l2x0.h>
  3524. +
  3525. + .text
  3526. +
  3527. +ENTRY(l2c310_early_resume)
  3528. + adr r0, 1f
  3529. + ldr r2, [r0]
  3530. + add r0, r2, r0
  3531. +
  3532. + ldmia r0, {r1, r2, r3, r4, r5, r6, r7, r8}
  3533. + @ r1 = phys address of L2C-310 controller
  3534. + @ r2 = aux_ctrl
  3535. + @ r3 = tag_latency
  3536. + @ r4 = data_latency
  3537. + @ r5 = filter_start
  3538. + @ r6 = filter_end
  3539. + @ r7 = prefetch_ctrl
  3540. + @ r8 = pwr_ctrl
  3541. +
  3542. + @ Check that the address has been initialised
  3543. + teq r1, #0
  3544. + moveq pc, lr
  3545. +
  3546. + @ The prefetch and power control registers are revision dependent
  3547. + @ and can be written whether or not the L2 cache is enabled
  3548. + ldr r0, [r1, #L2X0_CACHE_ID]
  3549. + and r0, r0, #L2X0_CACHE_ID_RTL_MASK
  3550. + cmp r0, #L310_CACHE_ID_RTL_R2P0
  3551. + strcs r7, [r1, #L310_PREFETCH_CTRL]
  3552. + cmp r0, #L310_CACHE_ID_RTL_R3P0
  3553. + strcs r8, [r1, #L310_POWER_CTRL]
  3554. +
  3555. + @ Don't setup the L2 cache if it is already enabled
  3556. + ldr r0, [r1, #L2X0_CTRL]
  3557. + tst r0, #L2X0_CTRL_EN
  3558. + movne pc, lr
  3559. +
  3560. + str r3, [r1, #L310_TAG_LATENCY_CTRL]
  3561. + str r4, [r1, #L310_DATA_LATENCY_CTRL]
  3562. + str r6, [r1, #L310_ADDR_FILTER_END]
  3563. + str r5, [r1, #L310_ADDR_FILTER_START]
  3564. +
  3565. + str r2, [r1, #L2X0_AUX_CTRL]
  3566. + mov r9, #L2X0_CTRL_EN
  3567. + str r9, [r1, #L2X0_CTRL]
  3568. + mov pc, lr
  3569. +ENDPROC(l2c310_early_resume)
  3570. +
  3571. + .align
  3572. +1: .long l2x0_saved_regs - .
  3573. diff -Nur linux-3.15-rc6.orig/arch/arm/mm/Makefile linux-3.15-rc6/arch/arm/mm/Makefile
  3574. --- linux-3.15-rc6.orig/arch/arm/mm/Makefile 2014-05-21 23:42:02.000000000 +0200
  3575. +++ linux-3.15-rc6/arch/arm/mm/Makefile 2014-05-23 11:26:48.284939966 +0200
  3576. @@ -95,7 +95,8 @@
  3577. AFLAGS_proc-v6.o :=-Wa,-march=armv6
  3578. AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
  3579. +obj-$(CONFIG_OUTER_CACHE) += l2c-common.o
  3580. obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
  3581. -obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
  3582. +obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o
  3583. obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
  3584. obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o
  3585. diff -Nur linux-3.15-rc6.orig/arch/arm/plat-samsung/s5p-sleep.S linux-3.15-rc6/arch/arm/plat-samsung/s5p-sleep.S
  3586. --- linux-3.15-rc6.orig/arch/arm/plat-samsung/s5p-sleep.S 2014-05-21 23:42:02.000000000 +0200
  3587. +++ linux-3.15-rc6/arch/arm/plat-samsung/s5p-sleep.S 2014-05-23 11:26:48.284939966 +0200
  3588. @@ -22,7 +22,6 @@
  3589. */
  3590. #include <linux/linkage.h>
  3591. -#include <asm/asm-offsets.h>
  3592. .data
  3593. .align
  3594. diff -Nur linux-3.15-rc6.orig/Documentation/devicetree/bindings/leds/leds-pwm.txt linux-3.15-rc6/Documentation/devicetree/bindings/leds/leds-pwm.txt
  3595. --- linux-3.15-rc6.orig/Documentation/devicetree/bindings/leds/leds-pwm.txt 2014-05-21 23:42:02.000000000 +0200
  3596. +++ linux-3.15-rc6/Documentation/devicetree/bindings/leds/leds-pwm.txt 2014-05-23 11:26:48.284939966 +0200
  3597. @@ -13,6 +13,8 @@
  3598. For the pwms and pwm-names property please refer to:
  3599. Documentation/devicetree/bindings/pwm/pwm.txt
  3600. - max-brightness : Maximum brightness possible for the LED
  3601. +- active-low : (optional) For PWMs where the LED is wired to supply
  3602. + rather than ground.
  3603. - label : (optional)
  3604. see Documentation/devicetree/bindings/leds/common.txt
  3605. - linux,default-trigger : (optional)
  3606. diff -Nur linux-3.15-rc6.orig/Documentation/devicetree/bindings/mmc/mmc.txt linux-3.15-rc6/Documentation/devicetree/bindings/mmc/mmc.txt
  3607. --- linux-3.15-rc6.orig/Documentation/devicetree/bindings/mmc/mmc.txt 2014-05-21 23:42:02.000000000 +0200
  3608. +++ linux-3.15-rc6/Documentation/devicetree/bindings/mmc/mmc.txt 2014-05-23 11:26:48.284939966 +0200
  3609. @@ -5,6 +5,8 @@
  3610. Interpreted by the OF core:
  3611. - reg: Registers location and length.
  3612. - interrupts: Interrupts used by the MMC controller.
  3613. +- clocks: Clocks needed for the host controller, if any.
  3614. +- clock-names: Goes with clocks above.
  3615. Card detection:
  3616. If no property below is supplied, host native card detect is used.
  3617. @@ -39,6 +41,15 @@
  3618. - mmc-hs200-1_8v: eMMC HS200 mode(1.8V I/O) is supported
  3619. - mmc-hs200-1_2v: eMMC HS200 mode(1.2V I/O) is supported
  3620. +Card power and reset control:
  3621. +The following properties can be specified for cases where the MMC
  3622. +peripheral needs additional reset, regulator and clock lines. It is for
  3623. +example common for WiFi/BT adapters to have these separate from the main
  3624. +MMC bus:
  3625. + - card-reset-gpios: Specify GPIOs for card reset (reset active low)
  3626. + - card-external-vcc-supply: Regulator to drive (independent) card VCC
  3627. + - clock with name "card_ext_clock": External clock provided to the card
  3628. +
  3629. *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
  3630. polarity properties, we have to fix the meaning of the "normal" and "inverted"
  3631. line levels. We choose to follow the SDHCI standard, which specifies both those
  3632. diff -Nur linux-3.15-rc6.orig/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt linux-3.15-rc6/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt
  3633. --- linux-3.15-rc6.orig/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt 2014-05-21 23:42:02.000000000 +0200
  3634. +++ linux-3.15-rc6/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt 2014-05-23 11:26:48.288939979 +0200
  3635. @@ -60,7 +60,8 @@
  3636. - compatible: Should be "fsl,imx-parallel-display"
  3637. Optional properties:
  3638. - interface_pix_fmt: How this display is connected to the
  3639. - display interface. Currently supported types: "rgb24", "rgb565", "bgr666"
  3640. + display interface. Currently supported types: "rgb24", "rgb565", "bgr666",
  3641. + "rgb666"
  3642. - edid: verbatim EDID data block describing attached display.
  3643. - ddc: phandle describing the i2c bus handling the display data
  3644. channel
  3645. diff -Nur linux-3.15-rc6.orig/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml linux-3.15-rc6/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml
  3646. --- linux-3.15-rc6.orig/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml 2014-05-21 23:42:02.000000000 +0200
  3647. +++ linux-3.15-rc6/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml 2014-05-23 11:26:48.288939979 +0200
  3648. @@ -279,6 +279,45 @@
  3649. <entry></entry>
  3650. <entry></entry>
  3651. </row>
  3652. + <row id="V4L2-PIX-FMT-RGB666">
  3653. + <entry><constant>V4L2_PIX_FMT_RGB666</constant></entry>
  3654. + <entry>'RGBH'</entry>
  3655. + <entry></entry>
  3656. + <entry>r<subscript>5</subscript></entry>
  3657. + <entry>r<subscript>4</subscript></entry>
  3658. + <entry>r<subscript>3</subscript></entry>
  3659. + <entry>r<subscript>2</subscript></entry>
  3660. + <entry>r<subscript>1</subscript></entry>
  3661. + <entry>r<subscript>0</subscript></entry>
  3662. + <entry>g<subscript>5</subscript></entry>
  3663. + <entry>g<subscript>4</subscript></entry>
  3664. + <entry></entry>
  3665. + <entry>g<subscript>3</subscript></entry>
  3666. + <entry>g<subscript>2</subscript></entry>
  3667. + <entry>g<subscript>1</subscript></entry>
  3668. + <entry>g<subscript>0</subscript></entry>
  3669. + <entry>b<subscript>5</subscript></entry>
  3670. + <entry>b<subscript>4</subscript></entry>
  3671. + <entry>b<subscript>3</subscript></entry>
  3672. + <entry>b<subscript>2</subscript></entry>
  3673. + <entry></entry>
  3674. + <entry>b<subscript>1</subscript></entry>
  3675. + <entry>b<subscript>0</subscript></entry>
  3676. + <entry></entry>
  3677. + <entry></entry>
  3678. + <entry></entry>
  3679. + <entry></entry>
  3680. + <entry></entry>
  3681. + <entry></entry>
  3682. + <entry></entry>
  3683. + <entry></entry>
  3684. + <entry></entry>
  3685. + <entry></entry>
  3686. + <entry></entry>
  3687. + <entry></entry>
  3688. + <entry></entry>
  3689. + <entry></entry>
  3690. + </row>
  3691. <row id="V4L2-PIX-FMT-BGR24">
  3692. <entry><constant>V4L2_PIX_FMT_BGR24</constant></entry>
  3693. <entry>'BGR3'</entry>
  3694. diff -Nur linux-3.15-rc6.orig/drivers/ata/ahci_imx.c linux-3.15-rc6/drivers/ata/ahci_imx.c
  3695. --- linux-3.15-rc6.orig/drivers/ata/ahci_imx.c 2014-05-21 23:42:02.000000000 +0200
  3696. +++ linux-3.15-rc6/drivers/ata/ahci_imx.c 2014-05-23 11:26:48.288939979 +0200
  3697. @@ -62,6 +62,7 @@
  3698. struct regmap *gpr;
  3699. bool no_device;
  3700. bool first_time;
  3701. + u32 phy_params;
  3702. };
  3703. static int ahci_imx_hotplug;
  3704. @@ -246,14 +247,7 @@
  3705. IMX6Q_GPR13_SATA_TX_LVL_MASK |
  3706. IMX6Q_GPR13_SATA_MPLL_CLK_EN |
  3707. IMX6Q_GPR13_SATA_TX_EDGE_RATE,
  3708. - IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
  3709. - IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
  3710. - IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
  3711. - IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
  3712. - IMX6Q_GPR13_SATA_MPLL_SS_EN |
  3713. - IMX6Q_GPR13_SATA_TX_ATTEN_9_16 |
  3714. - IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB |
  3715. - IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
  3716. + imxpriv->phy_params);
  3717. regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
  3718. IMX6Q_GPR13_SATA_MPLL_CLK_EN,
  3719. IMX6Q_GPR13_SATA_MPLL_CLK_EN);
  3720. @@ -324,6 +318,10 @@
  3721. writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
  3722. imx_sata_disable(hpriv);
  3723. imxpriv->no_device = true;
  3724. +
  3725. + dev_info(ap->dev, "no device found, disabling link.\n");
  3726. + dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX
  3727. + ".hotplug=1 to enable hotplug\n");
  3728. }
  3729. static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
  3730. @@ -364,6 +362,165 @@
  3731. };
  3732. MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
  3733. +struct reg_value {
  3734. + u32 of_value;
  3735. + u32 reg_value;
  3736. +};
  3737. +
  3738. +struct reg_property {
  3739. + const char *name;
  3740. + const struct reg_value *values;
  3741. + size_t num_values;
  3742. + u32 def_value;
  3743. + u32 set_value;
  3744. +};
  3745. +
  3746. +static const struct reg_value gpr13_tx_level[] = {
  3747. + { 937, IMX6Q_GPR13_SATA_TX_LVL_0_937_V },
  3748. + { 947, IMX6Q_GPR13_SATA_TX_LVL_0_947_V },
  3749. + { 957, IMX6Q_GPR13_SATA_TX_LVL_0_957_V },
  3750. + { 966, IMX6Q_GPR13_SATA_TX_LVL_0_966_V },
  3751. + { 976, IMX6Q_GPR13_SATA_TX_LVL_0_976_V },
  3752. + { 986, IMX6Q_GPR13_SATA_TX_LVL_0_986_V },
  3753. + { 996, IMX6Q_GPR13_SATA_TX_LVL_0_996_V },
  3754. + { 1005, IMX6Q_GPR13_SATA_TX_LVL_1_005_V },
  3755. + { 1015, IMX6Q_GPR13_SATA_TX_LVL_1_015_V },
  3756. + { 1025, IMX6Q_GPR13_SATA_TX_LVL_1_025_V },
  3757. + { 1035, IMX6Q_GPR13_SATA_TX_LVL_1_035_V },
  3758. + { 1045, IMX6Q_GPR13_SATA_TX_LVL_1_045_V },
  3759. + { 1054, IMX6Q_GPR13_SATA_TX_LVL_1_054_V },
  3760. + { 1064, IMX6Q_GPR13_SATA_TX_LVL_1_064_V },
  3761. + { 1074, IMX6Q_GPR13_SATA_TX_LVL_1_074_V },
  3762. + { 1084, IMX6Q_GPR13_SATA_TX_LVL_1_084_V },
  3763. + { 1094, IMX6Q_GPR13_SATA_TX_LVL_1_094_V },
  3764. + { 1104, IMX6Q_GPR13_SATA_TX_LVL_1_104_V },
  3765. + { 1113, IMX6Q_GPR13_SATA_TX_LVL_1_113_V },
  3766. + { 1123, IMX6Q_GPR13_SATA_TX_LVL_1_123_V },
  3767. + { 1133, IMX6Q_GPR13_SATA_TX_LVL_1_133_V },
  3768. + { 1143, IMX6Q_GPR13_SATA_TX_LVL_1_143_V },
  3769. + { 1152, IMX6Q_GPR13_SATA_TX_LVL_1_152_V },
  3770. + { 1162, IMX6Q_GPR13_SATA_TX_LVL_1_162_V },
  3771. + { 1172, IMX6Q_GPR13_SATA_TX_LVL_1_172_V },
  3772. + { 1182, IMX6Q_GPR13_SATA_TX_LVL_1_182_V },
  3773. + { 1191, IMX6Q_GPR13_SATA_TX_LVL_1_191_V },
  3774. + { 1201, IMX6Q_GPR13_SATA_TX_LVL_1_201_V },
  3775. + { 1211, IMX6Q_GPR13_SATA_TX_LVL_1_211_V },
  3776. + { 1221, IMX6Q_GPR13_SATA_TX_LVL_1_221_V },
  3777. + { 1230, IMX6Q_GPR13_SATA_TX_LVL_1_230_V },
  3778. + { 1240, IMX6Q_GPR13_SATA_TX_LVL_1_240_V }
  3779. +};
  3780. +
  3781. +static const struct reg_value gpr13_tx_boost[] = {
  3782. + { 0, IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB },
  3783. + { 370, IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB },
  3784. + { 740, IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB },
  3785. + { 111, IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB },
  3786. + { 148, IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB },
  3787. + { 185, IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB },
  3788. + { 222, IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB },
  3789. + { 259, IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB },
  3790. + { 296, IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB },
  3791. + { 333, IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB },
  3792. + { 370, IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB },
  3793. + { 407, IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB },
  3794. + { 444, IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB },
  3795. + { 481, IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB },
  3796. + { 528, IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB },
  3797. + { 575, IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB }
  3798. +};
  3799. +
  3800. +static const struct reg_value gpr13_tx_atten[] = {
  3801. + { 8, IMX6Q_GPR13_SATA_TX_ATTEN_8_16 },
  3802. + { 9, IMX6Q_GPR13_SATA_TX_ATTEN_9_16 },
  3803. + { 10, IMX6Q_GPR13_SATA_TX_ATTEN_10_16 },
  3804. + { 12, IMX6Q_GPR13_SATA_TX_ATTEN_12_16 },
  3805. + { 14, IMX6Q_GPR13_SATA_TX_ATTEN_14_16 },
  3806. + { 16, IMX6Q_GPR13_SATA_TX_ATTEN_16_16 },
  3807. +};
  3808. +
  3809. +static const struct reg_value gpr13_rx_eq[] = {
  3810. + { 500, IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB },
  3811. + { 1000, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB },
  3812. + { 1500, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB },
  3813. + { 2000, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB },
  3814. + { 2500, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB },
  3815. + { 3000, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB },
  3816. + { 3500, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB },
  3817. + { 4000, IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB },
  3818. +};
  3819. +
  3820. +static const struct reg_property gpr13_props[] = {
  3821. + {
  3822. + .name = "fsl,transmit-level-mV",
  3823. + .values = gpr13_tx_level,
  3824. + .num_values = ARRAY_SIZE(gpr13_tx_level),
  3825. + .def_value = IMX6Q_GPR13_SATA_TX_LVL_1_025_V,
  3826. + }, {
  3827. + .name = "fsl,transmit-boost-mdB",
  3828. + .values = gpr13_tx_boost,
  3829. + .num_values = ARRAY_SIZE(gpr13_tx_boost),
  3830. + .def_value = IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB,
  3831. + }, {
  3832. + .name = "fsl,transmit-atten-16ths",
  3833. + .values = gpr13_tx_atten,
  3834. + .num_values = ARRAY_SIZE(gpr13_tx_atten),
  3835. + .def_value = IMX6Q_GPR13_SATA_TX_ATTEN_9_16,
  3836. + }, {
  3837. + .name = "fsl,receive-eq-mdB",
  3838. + .values = gpr13_rx_eq,
  3839. + .num_values = ARRAY_SIZE(gpr13_rx_eq),
  3840. + .def_value = IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB,
  3841. + }, {
  3842. + .name = "fsl,no-spread-spectrum",
  3843. + .def_value = IMX6Q_GPR13_SATA_MPLL_SS_EN,
  3844. + .set_value = 0,
  3845. + },
  3846. +};
  3847. +
  3848. +static u32 imx_ahci_parse_props(struct device *dev,
  3849. + const struct reg_property *prop, size_t num)
  3850. +{
  3851. + struct device_node *np = dev->of_node;
  3852. + u32 reg_value = 0;
  3853. + int i, j;
  3854. +
  3855. + for (i = 0; i < num; i++, prop++) {
  3856. + u32 of_val;
  3857. +
  3858. + if (prop->num_values == 0) {
  3859. + if (of_property_read_bool(np, prop->name))
  3860. + reg_value |= prop->set_value;
  3861. + else
  3862. + reg_value |= prop->def_value;
  3863. + continue;
  3864. + }
  3865. +
  3866. + if (of_property_read_u32(np, prop->name, &of_val)) {
  3867. + dev_info(dev, "%s not specified, using %08x\n",
  3868. + prop->name, prop->def_value);
  3869. + reg_value |= prop->def_value;
  3870. + continue;
  3871. + }
  3872. +
  3873. + for (j = 0; j < prop->num_values; j++) {
  3874. + if (prop->values[j].of_value == of_val) {
  3875. + dev_info(dev, "%s value %u, using %08x\n",
  3876. + prop->name, of_val, prop->values[j].reg_value);
  3877. + reg_value |= prop->values[j].reg_value;
  3878. + break;
  3879. + }
  3880. + }
  3881. +
  3882. + if (j == prop->num_values) {
  3883. + dev_err(dev, "DT property %s is not a valid value\n",
  3884. + prop->name);
  3885. + reg_value |= prop->def_value;
  3886. + }
  3887. + }
  3888. +
  3889. + return reg_value;
  3890. +}
  3891. +
  3892. static int imx_ahci_probe(struct platform_device *pdev)
  3893. {
  3894. struct device *dev = &pdev->dev;
  3895. @@ -392,6 +549,8 @@
  3896. }
  3897. if (imxpriv->type == AHCI_IMX6Q) {
  3898. + u32 reg_value;
  3899. +
  3900. imxpriv->gpr = syscon_regmap_lookup_by_compatible(
  3901. "fsl,imx6q-iomuxc-gpr");
  3902. if (IS_ERR(imxpriv->gpr)) {
  3903. @@ -399,6 +558,15 @@
  3904. "failed to find fsl,imx6q-iomux-gpr regmap\n");
  3905. return PTR_ERR(imxpriv->gpr);
  3906. }
  3907. +
  3908. + reg_value = imx_ahci_parse_props(dev, gpr13_props,
  3909. + ARRAY_SIZE(gpr13_props));
  3910. +
  3911. + imxpriv->phy_params =
  3912. + IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
  3913. + IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
  3914. + IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
  3915. + reg_value;
  3916. }
  3917. hpriv = ahci_platform_get_resources(pdev);
  3918. diff -Nur linux-3.15-rc6.orig/drivers/cec/cec-dev.c linux-3.15-rc6/drivers/cec/cec-dev.c
  3919. --- linux-3.15-rc6.orig/drivers/cec/cec-dev.c 1970-01-01 01:00:00.000000000 +0100
  3920. +++ linux-3.15-rc6/drivers/cec/cec-dev.c 2014-05-23 11:26:48.296940005 +0200
  3921. @@ -0,0 +1,384 @@
  3922. +/*
  3923. + * HDMI Consumer Electronics Control
  3924. + *
  3925. + * This provides the user API for communication with HDMI CEC complaint
  3926. + * devices in kernel drivers, and is based upon the protocol developed
  3927. + * by Freescale for their i.MX SoCs.
  3928. + *
  3929. + * This program is free software; you can redistribute it and/or modify
  3930. + * it under the terms of the GNU General Public License version 2 as
  3931. + * published by the Free Software Foundation.
  3932. + */
  3933. +#include <linux/cec-dev.h>
  3934. +#include <linux/device.h>
  3935. +#include <linux/fs.h>
  3936. +#include <linux/module.h>
  3937. +#include <linux/poll.h>
  3938. +#include <linux/sched.h>
  3939. +#include <linux/slab.h>
  3940. +
  3941. +struct cec_event {
  3942. + struct cec_user_event usr;
  3943. + struct list_head node;
  3944. +};
  3945. +
  3946. +static struct class *cec_class;
  3947. +static int cec_major;
  3948. +
  3949. +static void cec_dev_send_message(struct cec_dev *cec_dev, u8 *msg,
  3950. + size_t count)
  3951. +{
  3952. + unsigned long flags;
  3953. +
  3954. + spin_lock_irqsave(&cec_dev->lock, flags);
  3955. + cec_dev->retries = 5;
  3956. + cec_dev->write_busy = 1;
  3957. + cec_dev->send_message(cec_dev, msg, count);
  3958. + spin_unlock_irqrestore(&cec_dev->lock, flags);
  3959. +}
  3960. +
  3961. +void cec_dev_event(struct cec_dev *cec_dev, int type, u8 *msg, size_t len)
  3962. +{
  3963. + struct cec_event *event;
  3964. + unsigned long flags;
  3965. +
  3966. + event = kzalloc(sizeof(*event), GFP_ATOMIC);
  3967. + if (event) {
  3968. + event->usr.event_type = type;
  3969. + event->usr.msg_len = len;
  3970. + if (msg)
  3971. + memcpy(event->usr.msg, msg, len);
  3972. +
  3973. + spin_lock_irqsave(&cec_dev->lock, flags);
  3974. + list_add_tail(&event->node, &cec_dev->events);
  3975. + spin_unlock_irqrestore(&cec_dev->lock, flags);
  3976. + wake_up(&cec_dev->waitq);
  3977. + }
  3978. +}
  3979. +EXPORT_SYMBOL_GPL(cec_dev_event);
  3980. +
  3981. +static int cec_dev_lock_write(struct cec_dev *cec_dev, struct file *file)
  3982. + __acquires(cec_dev->mutex)
  3983. +{
  3984. + int ret;
  3985. +
  3986. + do {
  3987. + if (file->f_flags & O_NONBLOCK) {
  3988. + if (cec_dev->write_busy)
  3989. + return -EAGAIN;
  3990. + } else {
  3991. + ret = wait_event_interruptible(cec_dev->waitq,
  3992. + !cec_dev->write_busy);
  3993. + if (ret)
  3994. + break;
  3995. + }
  3996. +
  3997. + ret = mutex_lock_interruptible(&cec_dev->mutex);
  3998. + if (ret)
  3999. + break;
  4000. +
  4001. + if (!cec_dev->write_busy)
  4002. + break;
  4003. +
  4004. + mutex_unlock(&cec_dev->mutex);
  4005. + } while (1);
  4006. +
  4007. + return ret;
  4008. +}
  4009. +
  4010. +static ssize_t cec_dev_read(struct file *file, char __user *buf,
  4011. + size_t count, loff_t *ppos)
  4012. +{
  4013. + struct cec_dev *cec_dev = file->private_data;
  4014. + ssize_t ret;
  4015. +
  4016. + if (count > sizeof(struct cec_user_event))
  4017. + count = sizeof(struct cec_user_event);
  4018. +
  4019. + if (!access_ok(VERIFY_WRITE, buf, count))
  4020. + return -EFAULT;
  4021. +
  4022. + do {
  4023. + struct cec_event *event = NULL;
  4024. + unsigned long flags;
  4025. +
  4026. + spin_lock_irqsave(&cec_dev->lock, flags);
  4027. + if (!list_empty(&cec_dev->events)) {
  4028. + event = list_first_entry(&cec_dev->events,
  4029. + struct cec_event, node);
  4030. + list_del(&event->node);
  4031. + }
  4032. + spin_unlock_irqrestore(&cec_dev->lock, flags);
  4033. +
  4034. + if (event) {
  4035. + ret = __copy_to_user(buf, &event->usr, count) ?
  4036. + -EFAULT : count;
  4037. + kfree(event);
  4038. + break;
  4039. + }
  4040. +
  4041. + if (file->f_flags & O_NONBLOCK) {
  4042. + ret = -EAGAIN;
  4043. + break;
  4044. + }
  4045. +
  4046. + ret = wait_event_interruptible(cec_dev->waitq,
  4047. + !list_empty(&cec_dev->events));
  4048. + if (ret)
  4049. + break;
  4050. + } while (1);
  4051. +
  4052. + return ret;
  4053. +}
  4054. +
  4055. +static ssize_t cec_dev_write(struct file *file, const char __user *buf,
  4056. + size_t count, loff_t *ppos)
  4057. +{
  4058. + struct cec_dev *cec_dev = file->private_data;
  4059. + u8 msg[MAX_MESSAGE_LEN];
  4060. + int ret;
  4061. +
  4062. + if (count > sizeof(msg))
  4063. + return -E2BIG;
  4064. +
  4065. + if (copy_from_user(msg, buf, count))
  4066. + return -EFAULT;
  4067. +
  4068. + ret = cec_dev_lock_write(cec_dev, file);
  4069. + if (ret)
  4070. + return ret;
  4071. +
  4072. + cec_dev_send_message(cec_dev, msg, count);
  4073. +
  4074. + mutex_unlock(&cec_dev->mutex);
  4075. +
  4076. + return count;
  4077. +}
  4078. +
  4079. +static long cec_dev_ioctl(struct file *file, u_int cmd, unsigned long arg)
  4080. +{
  4081. + struct cec_dev *cec_dev = file->private_data;
  4082. + int ret;
  4083. +
  4084. + switch (cmd) {
  4085. + case HDMICEC_IOC_O_SETLOGICALADDRESS:
  4086. + case HDMICEC_IOC_SETLOGICALADDRESS:
  4087. + if (arg > 15) {
  4088. + ret = -EINVAL;
  4089. + break;
  4090. + }
  4091. +
  4092. + ret = cec_dev_lock_write(cec_dev, file);
  4093. + if (ret == 0) {
  4094. + unsigned char msg[1];
  4095. +
  4096. + cec_dev->addresses = BIT(arg);
  4097. + cec_dev->set_address(cec_dev, cec_dev->addresses);
  4098. +
  4099. + /*
  4100. + * Send a ping message with the source and destination
  4101. + * set to our address; the result indicates whether
  4102. + * unit has chosen our address simultaneously.
  4103. + */
  4104. + msg[0] = arg << 4 | arg;
  4105. + cec_dev_send_message(cec_dev, msg, sizeof(msg));
  4106. + mutex_unlock(&cec_dev->mutex);
  4107. + }
  4108. + break;
  4109. +
  4110. + case HDMICEC_IOC_STARTDEVICE:
  4111. + ret = mutex_lock_interruptible(&cec_dev->mutex);
  4112. + if (ret == 0) {
  4113. + cec_dev->addresses = BIT(15);
  4114. + cec_dev->set_address(cec_dev, cec_dev->addresses);
  4115. + mutex_unlock(&cec_dev->mutex);
  4116. + }
  4117. + break;
  4118. +
  4119. + case HDMICEC_IOC_STOPDEVICE:
  4120. + ret = 0;
  4121. + break;
  4122. +
  4123. + case HDMICEC_IOC_GETPHYADDRESS:
  4124. + ret = put_user(cec_dev->physical, (u16 __user *)arg);
  4125. + ret = -ENOIOCTLCMD;
  4126. + break;
  4127. +
  4128. + default:
  4129. + ret = -ENOIOCTLCMD;
  4130. + break;
  4131. + }
  4132. +
  4133. + return ret;
  4134. +}
  4135. +
  4136. +static unsigned cec_dev_poll(struct file *file, poll_table *wait)
  4137. +{
  4138. + struct cec_dev *cec_dev = file->private_data;
  4139. + unsigned mask = 0;
  4140. +
  4141. + poll_wait(file, &cec_dev->waitq, wait);
  4142. +
  4143. + if (cec_dev->write_busy == 0)
  4144. + mask |= POLLOUT | POLLWRNORM;
  4145. + if (!list_empty(&cec_dev->events))
  4146. + mask |= POLLIN | POLLRDNORM;
  4147. +
  4148. + return mask;
  4149. +}
  4150. +
  4151. +static int cec_dev_release(struct inode *inode, struct file *file)
  4152. +{
  4153. + struct cec_dev *cec_dev = file->private_data;
  4154. +
  4155. + mutex_lock(&cec_dev->mutex);
  4156. + if (cec_dev->users >= 1)
  4157. + cec_dev->users -= 1;
  4158. + if (cec_dev->users == 0) {
  4159. + /*
  4160. + * Wait for any write to complete before shutting down.
  4161. + * A message should complete in a maximum of 2.75ms *
  4162. + * 160 bits + 4.7ms, or 444.7ms. Let's call that 500ms.
  4163. + * If we time out, shutdown anyway.
  4164. + */
  4165. + wait_event_timeout(cec_dev->waitq, !cec_dev->write_busy,
  4166. + msecs_to_jiffies(500));
  4167. +
  4168. + cec_dev->release(cec_dev);
  4169. +
  4170. + while (!list_empty(&cec_dev->events)) {
  4171. + struct cec_event *event;
  4172. +
  4173. + event = list_first_entry(&cec_dev->events,
  4174. + struct cec_event, node);
  4175. + list_del(&event->node);
  4176. + kfree(event);
  4177. + }
  4178. + }
  4179. + mutex_unlock(&cec_dev->mutex);
  4180. + return 0;
  4181. +}
  4182. +
  4183. +static int cec_dev_open(struct inode *inode, struct file *file)
  4184. +{
  4185. + struct cec_dev *cec_dev = container_of(inode->i_cdev, struct cec_dev,
  4186. + cdev);
  4187. + int ret = 0;
  4188. +
  4189. + nonseekable_open(inode, file);
  4190. +
  4191. + file->private_data = cec_dev;
  4192. +
  4193. + ret = mutex_lock_interruptible(&cec_dev->mutex);
  4194. + if (ret)
  4195. + return ret;
  4196. +
  4197. + if (cec_dev->users++ == 0) {
  4198. + cec_dev->addresses = BIT(15);
  4199. +
  4200. + ret = cec_dev->open(cec_dev);
  4201. + if (ret < 0)
  4202. + cec_dev->users = 0;
  4203. + }
  4204. + mutex_unlock(&cec_dev->mutex);
  4205. +
  4206. + return ret;
  4207. +}
  4208. +
  4209. +static const struct file_operations hdmi_cec_fops = {
  4210. + .owner = THIS_MODULE,
  4211. + .read = cec_dev_read,
  4212. + .write = cec_dev_write,
  4213. + .open = cec_dev_open,
  4214. + .unlocked_ioctl = cec_dev_ioctl,
  4215. + .release = cec_dev_release,
  4216. + .poll = cec_dev_poll,
  4217. +};
  4218. +
  4219. +void cec_dev_init(struct cec_dev *cec_dev, struct module *module)
  4220. +{
  4221. + cec_dev->devn = MKDEV(cec_major, 0);
  4222. +
  4223. + INIT_LIST_HEAD(&cec_dev->events);
  4224. + init_waitqueue_head(&cec_dev->waitq);
  4225. + spin_lock_init(&cec_dev->lock);
  4226. + mutex_init(&cec_dev->mutex);
  4227. +
  4228. + cec_dev->addresses = BIT(15);
  4229. +
  4230. + cdev_init(&cec_dev->cdev, &hdmi_cec_fops);
  4231. + cec_dev->cdev.owner = module;
  4232. +}
  4233. +EXPORT_SYMBOL_GPL(cec_dev_init);
  4234. +
  4235. +int cec_dev_add(struct cec_dev *cec_dev, struct device *dev, const char *name)
  4236. +{
  4237. + struct device *cd;
  4238. + int ret;
  4239. +
  4240. + ret = cdev_add(&cec_dev->cdev, cec_dev->devn, 1);
  4241. + if (ret < 0)
  4242. + goto err_cdev;
  4243. +
  4244. + cd = device_create(cec_class, dev, cec_dev->devn, NULL, name);
  4245. + if (IS_ERR(cd)) {
  4246. + ret = PTR_ERR(cd);
  4247. + dev_err(dev, "can't create device: %d\n", ret);
  4248. + goto err_dev;
  4249. + }
  4250. +
  4251. + return 0;
  4252. +
  4253. + err_dev:
  4254. + cdev_del(&cec_dev->cdev);
  4255. + err_cdev:
  4256. + return ret;
  4257. +}
  4258. +EXPORT_SYMBOL_GPL(cec_dev_add);
  4259. +
  4260. +void cec_dev_remove(struct cec_dev *cec_dev)
  4261. +{
  4262. + device_destroy(cec_class, cec_dev->devn);
  4263. + cdev_del(&cec_dev->cdev);
  4264. +}
  4265. +EXPORT_SYMBOL_GPL(cec_dev_remove);
  4266. +
  4267. +static int cec_init(void)
  4268. +{
  4269. + dev_t dev;
  4270. + int ret;
  4271. +
  4272. + cec_class = class_create(THIS_MODULE, "hdmi-cec");
  4273. + if (IS_ERR(cec_class)) {
  4274. + ret = PTR_ERR(cec_class);
  4275. + pr_err("cec: can't create cec class: %d\n", ret);
  4276. + goto err_class;
  4277. + }
  4278. +
  4279. + ret = alloc_chrdev_region(&dev, 0, 1, "hdmi-cec");
  4280. + if (ret) {
  4281. + pr_err("cec: can't create character devices: %d\n", ret);
  4282. + goto err_chrdev;
  4283. + }
  4284. +
  4285. + cec_major = MAJOR(dev);
  4286. +
  4287. + return 0;
  4288. +
  4289. + err_chrdev:
  4290. + class_destroy(cec_class);
  4291. + err_class:
  4292. + return ret;
  4293. +}
  4294. +subsys_initcall(cec_init);
  4295. +
  4296. +static void cec_exit(void)
  4297. +{
  4298. + unregister_chrdev_region(MKDEV(cec_major, 0), 1);
  4299. + class_destroy(cec_class);
  4300. +}
  4301. +module_exit(cec_exit);
  4302. +
  4303. +MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
  4304. +MODULE_DESCRIPTION("Generic HDMI CEC driver");
  4305. +MODULE_LICENSE("GPL");
  4306. diff -Nur linux-3.15-rc6.orig/drivers/cec/Kconfig linux-3.15-rc6/drivers/cec/Kconfig
  4307. --- linux-3.15-rc6.orig/drivers/cec/Kconfig 1970-01-01 01:00:00.000000000 +0100
  4308. +++ linux-3.15-rc6/drivers/cec/Kconfig 2014-05-23 11:26:48.296940005 +0200
  4309. @@ -0,0 +1,14 @@
  4310. +#
  4311. +# Consumer Electroncs Control support
  4312. +#
  4313. +
  4314. +menu "Consumer Electronics Control devices"
  4315. +
  4316. +config CEC
  4317. + bool
  4318. +
  4319. +config HDMI_CEC_CORE
  4320. + tristate
  4321. + select CEC
  4322. +
  4323. +endmenu
  4324. diff -Nur linux-3.15-rc6.orig/drivers/cec/Makefile linux-3.15-rc6/drivers/cec/Makefile
  4325. --- linux-3.15-rc6.orig/drivers/cec/Makefile 1970-01-01 01:00:00.000000000 +0100
  4326. +++ linux-3.15-rc6/drivers/cec/Makefile 2014-05-23 11:26:48.296940005 +0200
  4327. @@ -0,0 +1 @@
  4328. +obj-$(CONFIG_HDMI_CEC_CORE) += cec-dev.o
  4329. diff -Nur linux-3.15-rc6.orig/drivers/gpu/drm/drm_crtc_helper.c linux-3.15-rc6/drivers/gpu/drm/drm_crtc_helper.c
  4330. --- linux-3.15-rc6.orig/drivers/gpu/drm/drm_crtc_helper.c 2014-05-21 23:42:02.000000000 +0200
  4331. +++ linux-3.15-rc6/drivers/gpu/drm/drm_crtc_helper.c 2014-05-23 11:26:48.296940005 +0200
  4332. @@ -140,16 +140,10 @@
  4333. static void __drm_helper_disable_unused_functions(struct drm_device *dev)
  4334. {
  4335. struct drm_encoder *encoder;
  4336. - struct drm_connector *connector;
  4337. struct drm_crtc *crtc;
  4338. drm_warn_on_modeset_not_all_locked(dev);
  4339. - list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  4340. - if (!connector->encoder)
  4341. - continue;
  4342. - }
  4343. -
  4344. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  4345. if (!drm_helper_encoder_in_use(encoder)) {
  4346. drm_encoder_disable(encoder);
  4347. diff -Nur linux-3.15-rc6.orig/drivers/Kconfig linux-3.15-rc6/drivers/Kconfig
  4348. --- linux-3.15-rc6.orig/drivers/Kconfig 2014-05-21 23:42:02.000000000 +0200
  4349. +++ linux-3.15-rc6/drivers/Kconfig 2014-05-23 11:26:48.296940005 +0200
  4350. @@ -174,4 +174,6 @@
  4351. source "drivers/mcb/Kconfig"
  4352. +source "drivers/cec/Kconfig"
  4353. +
  4354. endmenu
  4355. diff -Nur linux-3.15-rc6.orig/drivers/leds/leds-pwm.c linux-3.15-rc6/drivers/leds/leds-pwm.c
  4356. --- linux-3.15-rc6.orig/drivers/leds/leds-pwm.c 2014-05-21 23:42:02.000000000 +0200
  4357. +++ linux-3.15-rc6/drivers/leds/leds-pwm.c 2014-05-23 11:26:48.296940005 +0200
  4358. @@ -69,6 +69,10 @@
  4359. duty *= brightness;
  4360. do_div(duty, max);
  4361. +
  4362. + if (led_dat->active_low)
  4363. + duty = led_dat->period - duty;
  4364. +
  4365. led_dat->duty = duty;
  4366. if (led_dat->can_sleep)
  4367. @@ -92,55 +96,75 @@
  4368. }
  4369. }
  4370. -static int led_pwm_create_of(struct platform_device *pdev,
  4371. - struct led_pwm_priv *priv)
  4372. +static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
  4373. + struct led_pwm *led, struct device_node *child)
  4374. {
  4375. - struct device_node *child;
  4376. + struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
  4377. int ret;
  4378. - for_each_child_of_node(pdev->dev.of_node, child) {
  4379. - struct led_pwm_data *led_dat = &priv->leds[priv->num_leds];
  4380. + led_data->active_low = led->active_low;
  4381. + led_data->period = led->pwm_period_ns;
  4382. + led_data->cdev.name = led->name;
  4383. + led_data->cdev.default_trigger = led->default_trigger;
  4384. + led_data->cdev.brightness_set = led_pwm_set;
  4385. + led_data->cdev.brightness = LED_OFF;
  4386. + led_data->cdev.max_brightness = led->max_brightness;
  4387. + led_data->cdev.flags = LED_CORE_SUSPENDRESUME;
  4388. +
  4389. + if (child)
  4390. + led_data->pwm = devm_of_pwm_get(dev, child, NULL);
  4391. + else
  4392. + led_data->pwm = devm_pwm_get(dev, led->name);
  4393. + if (IS_ERR(led_data->pwm)) {
  4394. + ret = PTR_ERR(led_data->pwm);
  4395. + dev_err(dev, "unable to request PWM for %s: %d\n",
  4396. + led->name, ret);
  4397. + return ret;
  4398. + }
  4399. - led_dat->cdev.name = of_get_property(child, "label",
  4400. - NULL) ? : child->name;
  4401. + if (child)
  4402. + led_data->period = pwm_get_period(led_data->pwm);
  4403. - led_dat->pwm = devm_of_pwm_get(&pdev->dev, child, NULL);
  4404. - if (IS_ERR(led_dat->pwm)) {
  4405. - dev_err(&pdev->dev, "unable to request PWM for %s\n",
  4406. - led_dat->cdev.name);
  4407. - ret = PTR_ERR(led_dat->pwm);
  4408. - goto err;
  4409. - }
  4410. - /* Get the period from PWM core when n*/
  4411. - led_dat->period = pwm_get_period(led_dat->pwm);
  4412. + led_data->can_sleep = pwm_can_sleep(led_data->pwm);
  4413. + if (led_data->can_sleep)
  4414. + INIT_WORK(&led_data->work, led_pwm_work);
  4415. - led_dat->cdev.default_trigger = of_get_property(child,
  4416. + ret = led_classdev_register(dev, &led_data->cdev);
  4417. + if (ret == 0) {
  4418. + priv->num_leds++;
  4419. + } else {
  4420. + dev_err(dev, "failed to register PWM led for %s: %d\n",
  4421. + led->name, ret);
  4422. + }
  4423. +
  4424. + return ret;
  4425. +}
  4426. +
  4427. +static int led_pwm_create_of(struct device *dev, struct led_pwm_priv *priv)
  4428. +{
  4429. + struct device_node *child;
  4430. + struct led_pwm led;
  4431. + int ret = 0;
  4432. +
  4433. + memset(&led, 0, sizeof(led));
  4434. +
  4435. + for_each_child_of_node(dev->of_node, child) {
  4436. + led.name = of_get_property(child, "label", NULL) ? :
  4437. + child->name;
  4438. +
  4439. + led.default_trigger = of_get_property(child,
  4440. "linux,default-trigger", NULL);
  4441. + led.active_low = of_property_read_bool(child, "active-low");
  4442. of_property_read_u32(child, "max-brightness",
  4443. - &led_dat->cdev.max_brightness);
  4444. + &led.max_brightness);
  4445. - led_dat->cdev.brightness_set = led_pwm_set;
  4446. - led_dat->cdev.brightness = LED_OFF;
  4447. - led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
  4448. -
  4449. - led_dat->can_sleep = pwm_can_sleep(led_dat->pwm);
  4450. - if (led_dat->can_sleep)
  4451. - INIT_WORK(&led_dat->work, led_pwm_work);
  4452. -
  4453. - ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
  4454. - if (ret < 0) {
  4455. - dev_err(&pdev->dev, "failed to register for %s\n",
  4456. - led_dat->cdev.name);
  4457. + ret = led_pwm_add(dev, priv, &led, child);
  4458. + if (ret) {
  4459. of_node_put(child);
  4460. - goto err;
  4461. + break;
  4462. }
  4463. - priv->num_leds++;
  4464. }
  4465. - return 0;
  4466. -err:
  4467. - led_pwm_cleanup(priv);
  4468. -
  4469. return ret;
  4470. }
  4471. @@ -166,51 +190,23 @@
  4472. if (pdata) {
  4473. for (i = 0; i < count; i++) {
  4474. - struct led_pwm *cur_led = &pdata->leds[i];
  4475. - struct led_pwm_data *led_dat = &priv->leds[i];
  4476. -
  4477. - led_dat->pwm = devm_pwm_get(&pdev->dev, cur_led->name);
  4478. - if (IS_ERR(led_dat->pwm)) {
  4479. - ret = PTR_ERR(led_dat->pwm);
  4480. - dev_err(&pdev->dev,
  4481. - "unable to request PWM for %s\n",
  4482. - cur_led->name);
  4483. - goto err;
  4484. - }
  4485. -
  4486. - led_dat->cdev.name = cur_led->name;
  4487. - led_dat->cdev.default_trigger = cur_led->default_trigger;
  4488. - led_dat->active_low = cur_led->active_low;
  4489. - led_dat->period = cur_led->pwm_period_ns;
  4490. - led_dat->cdev.brightness_set = led_pwm_set;
  4491. - led_dat->cdev.brightness = LED_OFF;
  4492. - led_dat->cdev.max_brightness = cur_led->max_brightness;
  4493. - led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
  4494. -
  4495. - led_dat->can_sleep = pwm_can_sleep(led_dat->pwm);
  4496. - if (led_dat->can_sleep)
  4497. - INIT_WORK(&led_dat->work, led_pwm_work);
  4498. -
  4499. - ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
  4500. - if (ret < 0)
  4501. - goto err;
  4502. + ret = led_pwm_add(&pdev->dev, priv, &pdata->leds[i],
  4503. + NULL);
  4504. + if (ret)
  4505. + break;
  4506. }
  4507. - priv->num_leds = count;
  4508. } else {
  4509. - ret = led_pwm_create_of(pdev, priv);
  4510. - if (ret)
  4511. - return ret;
  4512. + ret = led_pwm_create_of(&pdev->dev, priv);
  4513. + }
  4514. +
  4515. + if (ret) {
  4516. + led_pwm_cleanup(priv);
  4517. + return ret;
  4518. }
  4519. platform_set_drvdata(pdev, priv);
  4520. return 0;
  4521. -
  4522. -err:
  4523. - priv->num_leds = i;
  4524. - led_pwm_cleanup(priv);
  4525. -
  4526. - return ret;
  4527. }
  4528. static int led_pwm_remove(struct platform_device *pdev)
  4529. diff -Nur linux-3.15-rc6.orig/drivers/Makefile linux-3.15-rc6/drivers/Makefile
  4530. --- linux-3.15-rc6.orig/drivers/Makefile 2014-05-21 23:42:02.000000000 +0200
  4531. +++ linux-3.15-rc6/drivers/Makefile 2014-05-23 11:26:48.296940005 +0200
  4532. @@ -157,3 +157,4 @@
  4533. obj-$(CONFIG_FMC) += fmc/
  4534. obj-$(CONFIG_POWERCAP) += powercap/
  4535. obj-$(CONFIG_MCB) += mcb/
  4536. +obj-$(CONFIG_CEC) += cec/
  4537. diff -Nur linux-3.15-rc6.orig/drivers/mmc/core/core.c linux-3.15-rc6/drivers/mmc/core/core.c
  4538. --- linux-3.15-rc6.orig/drivers/mmc/core/core.c 2014-05-21 23:42:02.000000000 +0200
  4539. +++ linux-3.15-rc6/drivers/mmc/core/core.c 2014-05-23 11:26:48.300940018 +0200
  4540. @@ -13,11 +13,13 @@
  4541. #include <linux/module.h>
  4542. #include <linux/init.h>
  4543. #include <linux/interrupt.h>
  4544. +#include <linux/clk.h>
  4545. #include <linux/completion.h>
  4546. #include <linux/device.h>
  4547. #include <linux/delay.h>
  4548. #include <linux/pagemap.h>
  4549. #include <linux/err.h>
  4550. +#include <linux/gpio.h>
  4551. #include <linux/leds.h>
  4552. #include <linux/scatterlist.h>
  4553. #include <linux/log2.h>
  4554. @@ -1504,6 +1506,43 @@
  4555. mmc_host_clk_release(host);
  4556. }
  4557. +static void mmc_card_power_up(struct mmc_host *host)
  4558. +{
  4559. + int i;
  4560. + struct gpio_desc **gds = host->card_reset_gpios;
  4561. +
  4562. + for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
  4563. + if (gds[i]) {
  4564. + dev_dbg(host->parent, "Asserting reset line %d", i);
  4565. + gpiod_set_value(gds[i], 1);
  4566. + }
  4567. + }
  4568. +
  4569. + if (host->card_regulator) {
  4570. + dev_dbg(host->parent, "Enabling external regulator");
  4571. + if (regulator_enable(host->card_regulator))
  4572. + dev_err(host->parent, "Failed to enable external regulator");
  4573. + }
  4574. +
  4575. + if (host->card_clk) {
  4576. + dev_dbg(host->parent, "Enabling external clock");
  4577. + clk_prepare_enable(host->card_clk);
  4578. + }
  4579. +
  4580. + /* 2ms delay to let clocks and power settle */
  4581. + mmc_delay(20);
  4582. +
  4583. + for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
  4584. + if (gds[i]) {
  4585. + dev_dbg(host->parent, "Deasserting reset line %d", i);
  4586. + gpiod_set_value(gds[i], 0);
  4587. + }
  4588. + }
  4589. +
  4590. + /* 2ms delay to after reset release */
  4591. + mmc_delay(20);
  4592. +}
  4593. +
  4594. /*
  4595. * Apply power to the MMC stack. This is a two-stage process.
  4596. * First, we enable power to the card without the clock running.
  4597. @@ -1520,6 +1559,9 @@
  4598. if (host->ios.power_mode == MMC_POWER_ON)
  4599. return;
  4600. + /* Power up the card/module first, if needed */
  4601. + mmc_card_power_up(host);
  4602. +
  4603. mmc_host_clk_hold(host);
  4604. host->ios.vdd = fls(ocr) - 1;
  4605. diff -Nur linux-3.15-rc6.orig/drivers/mmc/core/host.c linux-3.15-rc6/drivers/mmc/core/host.c
  4606. --- linux-3.15-rc6.orig/drivers/mmc/core/host.c 2014-05-21 23:42:02.000000000 +0200
  4607. +++ linux-3.15-rc6/drivers/mmc/core/host.c 2014-05-23 11:26:48.300940018 +0200
  4608. @@ -12,14 +12,18 @@
  4609. * MMC host class device management
  4610. */
  4611. +#include <linux/kernel.h>
  4612. +#include <linux/clk.h>
  4613. #include <linux/device.h>
  4614. #include <linux/err.h>
  4615. +#include <linux/gpio/consumer.h>
  4616. #include <linux/idr.h>
  4617. #include <linux/of.h>
  4618. #include <linux/of_gpio.h>
  4619. #include <linux/pagemap.h>
  4620. #include <linux/export.h>
  4621. #include <linux/leds.h>
  4622. +#include <linux/regulator/consumer.h>
  4623. #include <linux/slab.h>
  4624. #include <linux/suspend.h>
  4625. @@ -457,6 +461,66 @@
  4626. EXPORT_SYMBOL(mmc_of_parse);
  4627. +static int mmc_of_parse_child(struct mmc_host *host)
  4628. +{
  4629. + struct device_node *np;
  4630. + struct clk *clk;
  4631. + int i;
  4632. +
  4633. + if (!host->parent || !host->parent->of_node)
  4634. + return 0;
  4635. +
  4636. + np = host->parent->of_node;
  4637. +
  4638. + host->card_regulator = regulator_get(host->parent, "card-external-vcc");
  4639. + if (IS_ERR(host->card_regulator)) {
  4640. + if (PTR_ERR(host->card_regulator) == -EPROBE_DEFER)
  4641. + return PTR_ERR(host->card_regulator);
  4642. + host->card_regulator = NULL;
  4643. + }
  4644. +
  4645. + /* Parse card power/reset/clock control */
  4646. + if (of_find_property(np, "card-reset-gpios", NULL)) {
  4647. + struct gpio_desc *gpd;
  4648. + int level = 0;
  4649. +
  4650. + /*
  4651. + * If the regulator is enabled, then we can hold the
  4652. + * card in reset with an active high resets. Otherwise,
  4653. + * hold the resets low.
  4654. + */
  4655. + if (host->card_regulator && regulator_is_enabled(host->card_regulator))
  4656. + level = 1;
  4657. +
  4658. + for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
  4659. + gpd = devm_gpiod_get_index(host->parent, "card-reset", i);
  4660. + if (IS_ERR(gpd)) {
  4661. + if (PTR_ERR(gpd) == -EPROBE_DEFER)
  4662. + return PTR_ERR(gpd);
  4663. + break;
  4664. + }
  4665. + gpiod_direction_output(gpd, gpiod_is_active_low(gpd) | level);
  4666. + host->card_reset_gpios[i] = gpd;
  4667. + }
  4668. +
  4669. + gpd = devm_gpiod_get_index(host->parent, "card-reset", ARRAY_SIZE(host->card_reset_gpios));
  4670. + if (!IS_ERR(gpd)) {
  4671. + dev_warn(host->parent, "More reset gpios than we can handle");
  4672. + gpiod_put(gpd);
  4673. + }
  4674. + }
  4675. +
  4676. + clk = of_clk_get_by_name(np, "card_ext_clock");
  4677. + if (IS_ERR(clk)) {
  4678. + if (PTR_ERR(clk) == -EPROBE_DEFER)
  4679. + return PTR_ERR(clk);
  4680. + clk = NULL;
  4681. + }
  4682. + host->card_clk = clk;
  4683. +
  4684. + return 0;
  4685. +}
  4686. +
  4687. /**
  4688. * mmc_alloc_host - initialise the per-host structure.
  4689. * @extra: sizeof private data structure
  4690. @@ -536,6 +600,10 @@
  4691. {
  4692. int err;
  4693. + err = mmc_of_parse_child(host);
  4694. + if (err)
  4695. + return err;
  4696. +
  4697. WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
  4698. !host->ops->enable_sdio_irq);
  4699. diff -Nur linux-3.15-rc6.orig/drivers/mmc/core/sdio_irq.c linux-3.15-rc6/drivers/mmc/core/sdio_irq.c
  4700. --- linux-3.15-rc6.orig/drivers/mmc/core/sdio_irq.c 2014-05-21 23:42:02.000000000 +0200
  4701. +++ linux-3.15-rc6/drivers/mmc/core/sdio_irq.c 2014-05-23 11:26:48.300940018 +0200
  4702. @@ -90,6 +90,15 @@
  4703. return ret;
  4704. }
  4705. +void sdio_run_irqs(struct mmc_host *host)
  4706. +{
  4707. + mmc_claim_host(host);
  4708. + host->sdio_irq_pending = true;
  4709. + process_sdio_pending_irqs(host);
  4710. + mmc_release_host(host);
  4711. +}
  4712. +EXPORT_SYMBOL_GPL(sdio_run_irqs);
  4713. +
  4714. static int sdio_irq_thread(void *_host)
  4715. {
  4716. struct mmc_host *host = _host;
  4717. @@ -189,14 +198,20 @@
  4718. WARN_ON(!host->claimed);
  4719. if (!host->sdio_irqs++) {
  4720. - atomic_set(&host->sdio_irq_thread_abort, 0);
  4721. - host->sdio_irq_thread =
  4722. - kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
  4723. - mmc_hostname(host));
  4724. - if (IS_ERR(host->sdio_irq_thread)) {
  4725. - int err = PTR_ERR(host->sdio_irq_thread);
  4726. - host->sdio_irqs--;
  4727. - return err;
  4728. + if (!(host->caps2 & MMC_CAP2_SDIO_NOTHREAD)) {
  4729. + atomic_set(&host->sdio_irq_thread_abort, 0);
  4730. + host->sdio_irq_thread =
  4731. + kthread_run(sdio_irq_thread, host,
  4732. + "ksdioirqd/%s", mmc_hostname(host));
  4733. + if (IS_ERR(host->sdio_irq_thread)) {
  4734. + int err = PTR_ERR(host->sdio_irq_thread);
  4735. + host->sdio_irqs--;
  4736. + return err;
  4737. + }
  4738. + } else {
  4739. + mmc_host_clk_hold(host);
  4740. + host->ops->enable_sdio_irq(host, 1);
  4741. + mmc_host_clk_release(host);
  4742. }
  4743. }
  4744. @@ -211,8 +226,14 @@
  4745. BUG_ON(host->sdio_irqs < 1);
  4746. if (!--host->sdio_irqs) {
  4747. - atomic_set(&host->sdio_irq_thread_abort, 1);
  4748. - kthread_stop(host->sdio_irq_thread);
  4749. + if (!(host->caps2 & MMC_CAP2_SDIO_NOTHREAD)) {
  4750. + atomic_set(&host->sdio_irq_thread_abort, 1);
  4751. + kthread_stop(host->sdio_irq_thread);
  4752. + } else {
  4753. + mmc_host_clk_hold(host);
  4754. + host->ops->enable_sdio_irq(host, 0);
  4755. + mmc_host_clk_release(host);
  4756. + }
  4757. }
  4758. return 0;
  4759. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/dw_mmc.c linux-3.15-rc6/drivers/mmc/host/dw_mmc.c
  4760. --- linux-3.15-rc6.orig/drivers/mmc/host/dw_mmc.c 2014-05-21 23:42:02.000000000 +0200
  4761. +++ linux-3.15-rc6/drivers/mmc/host/dw_mmc.c 2014-05-23 11:26:48.300940018 +0200
  4762. @@ -2140,6 +2140,8 @@
  4763. if (!mmc)
  4764. return -ENOMEM;
  4765. + mmc_of_parse(mmc);
  4766. +
  4767. slot = mmc_priv(mmc);
  4768. slot->id = id;
  4769. slot->mmc = mmc;
  4770. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/Kconfig linux-3.15-rc6/drivers/mmc/host/Kconfig
  4771. --- linux-3.15-rc6.orig/drivers/mmc/host/Kconfig 2014-05-21 23:42:02.000000000 +0200
  4772. +++ linux-3.15-rc6/drivers/mmc/host/Kconfig 2014-05-23 11:26:48.300940018 +0200
  4773. @@ -25,8 +25,7 @@
  4774. If unsure, say N.
  4775. config MMC_SDHCI
  4776. - tristate "Secure Digital Host Controller Interface support"
  4777. - depends on HAS_DMA
  4778. + tristate
  4779. help
  4780. This selects the generic Secure Digital Host Controller Interface.
  4781. It is used by manufacturers such as Texas Instruments(R), Ricoh(R)
  4782. @@ -59,7 +58,8 @@
  4783. config MMC_SDHCI_PCI
  4784. tristate "SDHCI support on PCI bus"
  4785. - depends on MMC_SDHCI && PCI
  4786. + depends on PCI && HAS_DMA
  4787. + select MMC_SDHCI
  4788. help
  4789. This selects the PCI Secure Digital Host Controller Interface.
  4790. Most controllers found today are PCI devices.
  4791. @@ -83,7 +83,8 @@
  4792. config MMC_SDHCI_ACPI
  4793. tristate "SDHCI support for ACPI enumerated SDHCI controllers"
  4794. - depends on MMC_SDHCI && ACPI
  4795. + depends on ACPI && HAS_DMA
  4796. + select MMC_SDHCI
  4797. help
  4798. This selects support for ACPI enumerated SDHCI controllers,
  4799. identified by ACPI Compatibility ID PNP0D40 or specific
  4800. @@ -94,8 +95,8 @@
  4801. If unsure, say N.
  4802. config MMC_SDHCI_PLTFM
  4803. - tristate "SDHCI platform and OF driver helper"
  4804. - depends on MMC_SDHCI
  4805. + tristate
  4806. + select MMC_SDHCI
  4807. help
  4808. This selects the common helper functions support for Secure Digital
  4809. Host Controller Interface based platform and OF drivers.
  4810. @@ -106,8 +107,8 @@
  4811. config MMC_SDHCI_OF_ARASAN
  4812. tristate "SDHCI OF support for the Arasan SDHCI controllers"
  4813. - depends on MMC_SDHCI_PLTFM
  4814. - depends on OF
  4815. + depends on OF && HAS_DMA
  4816. + select MMC_SDHCI_PLTFM
  4817. help
  4818. This selects the Arasan Secure Digital Host Controller Interface
  4819. (SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
  4820. @@ -118,9 +119,9 @@
  4821. config MMC_SDHCI_OF_ESDHC
  4822. tristate "SDHCI OF support for the Freescale eSDHC controller"
  4823. - depends on MMC_SDHCI_PLTFM
  4824. - depends on PPC_OF
  4825. + depends on PPC_OF && HAS_DMA
  4826. select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
  4827. + select MMC_SDHCI_PLTFM
  4828. help
  4829. This selects the Freescale eSDHC controller support.
  4830. @@ -130,9 +131,9 @@
  4831. config MMC_SDHCI_OF_HLWD
  4832. tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers"
  4833. - depends on MMC_SDHCI_PLTFM
  4834. - depends on PPC_OF
  4835. + depends on PPC_OF && HAS_DMA
  4836. select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
  4837. + select MMC_SDHCI_PLTFM
  4838. help
  4839. This selects the Secure Digital Host Controller Interface (SDHCI)
  4840. found in the "Hollywood" chipset of the Nintendo Wii video game
  4841. @@ -144,8 +145,8 @@
  4842. config MMC_SDHCI_CNS3XXX
  4843. tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
  4844. - depends on ARCH_CNS3XXX
  4845. - depends on MMC_SDHCI_PLTFM
  4846. + depends on ARCH_CNS3XXX && HAS_DMA
  4847. + select MMC_SDHCI_PLTFM
  4848. help
  4849. This selects the SDHCI support for CNS3xxx System-on-Chip devices.
  4850. @@ -155,9 +156,9 @@
  4851. config MMC_SDHCI_ESDHC_IMX
  4852. tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller"
  4853. - depends on ARCH_MXC
  4854. - depends on MMC_SDHCI_PLTFM
  4855. + depends on ARCH_MXC && HAS_DMA
  4856. select MMC_SDHCI_IO_ACCESSORS
  4857. + select MMC_SDHCI_PLTFM
  4858. help
  4859. This selects the Freescale eSDHC/uSDHC controller support
  4860. found on i.MX25, i.MX35 i.MX5x and i.MX6x.
  4861. @@ -168,9 +169,9 @@
  4862. config MMC_SDHCI_DOVE
  4863. tristate "SDHCI support on Marvell's Dove SoC"
  4864. - depends on ARCH_DOVE
  4865. - depends on MMC_SDHCI_PLTFM
  4866. + depends on ARCH_DOVE && HAS_DMA
  4867. select MMC_SDHCI_IO_ACCESSORS
  4868. + select MMC_SDHCI_PLTFM
  4869. help
  4870. This selects the Secure Digital Host Controller Interface in
  4871. Marvell's Dove SoC.
  4872. @@ -181,9 +182,9 @@
  4873. config MMC_SDHCI_TEGRA
  4874. tristate "SDHCI platform support for the Tegra SD/MMC Controller"
  4875. - depends on ARCH_TEGRA
  4876. - depends on MMC_SDHCI_PLTFM
  4877. + depends on ARCH_TEGRA && HAS_DMA
  4878. select MMC_SDHCI_IO_ACCESSORS
  4879. + select MMC_SDHCI_PLTFM
  4880. help
  4881. This selects the Tegra SD/MMC controller. If you have a Tegra
  4882. platform with SD or MMC devices, say Y or M here.
  4883. @@ -192,7 +193,8 @@
  4884. config MMC_SDHCI_S3C
  4885. tristate "SDHCI support on Samsung S3C SoC"
  4886. - depends on MMC_SDHCI && PLAT_SAMSUNG
  4887. + depends on PLAT_SAMSUNG && HAS_DMA
  4888. + select MMC_SDHCI
  4889. help
  4890. This selects the Secure Digital Host Controller Interface (SDHCI)
  4891. often referrered to as the HSMMC block in some of the Samsung S3C
  4892. @@ -204,8 +206,8 @@
  4893. config MMC_SDHCI_SIRF
  4894. tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs"
  4895. - depends on ARCH_SIRF
  4896. - depends on MMC_SDHCI_PLTFM
  4897. + depends on ARCH_SIRF && HAS_DMA
  4898. + select MMC_SDHCI_PLTFM
  4899. help
  4900. This selects the SDHCI support for SiRF System-on-Chip devices.
  4901. @@ -215,8 +217,7 @@
  4902. config MMC_SDHCI_PXAV3
  4903. tristate "Marvell MMP2 SD Host Controller support (PXAV3)"
  4904. - depends on CLKDEV_LOOKUP
  4905. - select MMC_SDHCI
  4906. + depends on CLKDEV_LOOKUP && HAS_DMA
  4907. select MMC_SDHCI_PLTFM
  4908. default CPU_MMP2
  4909. help
  4910. @@ -228,8 +229,7 @@
  4911. config MMC_SDHCI_PXAV2
  4912. tristate "Marvell PXA9XX SD Host Controller support (PXAV2)"
  4913. - depends on CLKDEV_LOOKUP
  4914. - select MMC_SDHCI
  4915. + depends on CLKDEV_LOOKUP && HAS_DMA
  4916. select MMC_SDHCI_PLTFM
  4917. default CPU_PXA910
  4918. help
  4919. @@ -241,7 +241,8 @@
  4920. config MMC_SDHCI_SPEAR
  4921. tristate "SDHCI support on ST SPEAr platform"
  4922. - depends on MMC_SDHCI && PLAT_SPEAR
  4923. + depends on PLAT_SPEAR && HAS_DMA
  4924. + select MMC_SDHCI
  4925. help
  4926. This selects the Secure Digital Host Controller Interface (SDHCI)
  4927. often referrered to as the HSMMC block in some of the ST SPEAR range
  4928. @@ -263,7 +264,7 @@
  4929. config MMC_SDHCI_BCM_KONA
  4930. tristate "SDHCI support on Broadcom KONA platform"
  4931. - depends on ARCH_BCM_MOBILE
  4932. + depends on ARCH_BCM_MOBILE && HAS_DMA
  4933. select MMC_SDHCI_PLTFM
  4934. help
  4935. This selects the Broadcom Kona Secure Digital Host Controller
  4936. @@ -274,9 +275,9 @@
  4937. config MMC_SDHCI_BCM2835
  4938. tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
  4939. - depends on ARCH_BCM2835
  4940. - depends on MMC_SDHCI_PLTFM
  4941. + depends on ARCH_BCM2835 && HAS_DMA
  4942. select MMC_SDHCI_IO_ACCESSORS
  4943. + select MMC_SDHCI_PLTFM
  4944. help
  4945. This selects the BCM2835 SD/MMC controller. If you have a BCM2835
  4946. platform with SD or MMC devices, say Y or M here.
  4947. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-acpi.c linux-3.15-rc6/drivers/mmc/host/sdhci-acpi.c
  4948. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-acpi.c 2014-05-21 23:42:02.000000000 +0200
  4949. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-acpi.c 2014-05-23 11:26:48.300940018 +0200
  4950. @@ -102,11 +102,19 @@
  4951. }
  4952. static const struct sdhci_ops sdhci_acpi_ops_dflt = {
  4953. + .set_clock = sdhci_set_clock,
  4954. .enable_dma = sdhci_acpi_enable_dma,
  4955. + .set_bus_width = sdhci_set_bus_width,
  4956. + .reset = sdhci_reset,
  4957. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  4958. };
  4959. static const struct sdhci_ops sdhci_acpi_ops_int = {
  4960. + .set_clock = sdhci_set_clock,
  4961. .enable_dma = sdhci_acpi_enable_dma,
  4962. + .set_bus_width = sdhci_set_bus_width,
  4963. + .reset = sdhci_reset,
  4964. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  4965. .hw_reset = sdhci_acpi_int_hw_reset,
  4966. };
  4967. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-bcm2835.c linux-3.15-rc6/drivers/mmc/host/sdhci-bcm2835.c
  4968. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-bcm2835.c 2014-05-21 23:42:02.000000000 +0200
  4969. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-bcm2835.c 2014-05-23 11:26:48.300940018 +0200
  4970. @@ -131,8 +131,12 @@
  4971. .read_l = bcm2835_sdhci_readl,
  4972. .read_w = bcm2835_sdhci_readw,
  4973. .read_b = bcm2835_sdhci_readb,
  4974. + .set_clock = sdhci_set_clock,
  4975. .get_max_clock = sdhci_pltfm_clk_get_max_clock,
  4976. .get_min_clock = bcm2835_sdhci_get_min_clock,
  4977. + .set_bus_width = sdhci_set_bus_width,
  4978. + .reset = sdhci_reset,
  4979. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  4980. };
  4981. static const struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
  4982. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-bcm-kona.c linux-3.15-rc6/drivers/mmc/host/sdhci-bcm-kona.c
  4983. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-bcm-kona.c 2014-05-21 23:42:02.000000000 +0200
  4984. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-bcm-kona.c 2014-05-23 11:26:48.300940018 +0200
  4985. @@ -206,9 +206,13 @@
  4986. }
  4987. static struct sdhci_ops sdhci_bcm_kona_ops = {
  4988. + .set_clock = sdhci_set_clock,
  4989. .get_max_clock = sdhci_bcm_kona_get_max_clk,
  4990. .get_timeout_clock = sdhci_bcm_kona_get_timeout_clock,
  4991. .platform_send_init_74_clocks = sdhci_bcm_kona_init_74_clocks,
  4992. + .set_bus_width = sdhci_set_bus_width,
  4993. + .reset = sdhci_reset,
  4994. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  4995. .card_event = sdhci_bcm_kona_card_event,
  4996. };
  4997. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci.c linux-3.15-rc6/drivers/mmc/host/sdhci.c
  4998. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci.c 2014-05-21 23:42:02.000000000 +0200
  4999. +++ linux-3.15-rc6/drivers/mmc/host/sdhci.c 2014-05-23 11:26:48.304940032 +0200
  5000. @@ -44,6 +44,8 @@
  5001. #define MAX_TUNING_LOOP 40
  5002. +#define ADMA_SIZE ((128 * 2 + 1) * 4)
  5003. +
  5004. static unsigned int debug_quirks = 0;
  5005. static unsigned int debug_quirks2;
  5006. @@ -131,43 +133,26 @@
  5007. * *
  5008. \*****************************************************************************/
  5009. -static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
  5010. -{
  5011. - u32 ier;
  5012. -
  5013. - ier = sdhci_readl(host, SDHCI_INT_ENABLE);
  5014. - ier &= ~clear;
  5015. - ier |= set;
  5016. - sdhci_writel(host, ier, SDHCI_INT_ENABLE);
  5017. - sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
  5018. -}
  5019. -
  5020. -static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
  5021. -{
  5022. - sdhci_clear_set_irqs(host, 0, irqs);
  5023. -}
  5024. -
  5025. -static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
  5026. -{
  5027. - sdhci_clear_set_irqs(host, irqs, 0);
  5028. -}
  5029. -
  5030. static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
  5031. {
  5032. - u32 present, irqs;
  5033. + u32 present;
  5034. if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
  5035. (host->mmc->caps & MMC_CAP_NONREMOVABLE))
  5036. return;
  5037. - present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
  5038. - SDHCI_CARD_PRESENT;
  5039. - irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
  5040. + if (enable) {
  5041. + present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
  5042. + SDHCI_CARD_PRESENT;
  5043. - if (enable)
  5044. - sdhci_unmask_irqs(host, irqs);
  5045. - else
  5046. - sdhci_mask_irqs(host, irqs);
  5047. + host->ier |= present ? SDHCI_INT_CARD_REMOVE :
  5048. + SDHCI_INT_CARD_INSERT;
  5049. + } else {
  5050. + host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
  5051. + }
  5052. +
  5053. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  5054. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  5055. }
  5056. static void sdhci_enable_card_detection(struct sdhci_host *host)
  5057. @@ -180,22 +165,9 @@
  5058. sdhci_set_card_detection(host, false);
  5059. }
  5060. -static void sdhci_reset(struct sdhci_host *host, u8 mask)
  5061. +void sdhci_reset(struct sdhci_host *host, u8 mask)
  5062. {
  5063. unsigned long timeout;
  5064. - u32 uninitialized_var(ier);
  5065. -
  5066. - if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
  5067. - if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
  5068. - SDHCI_CARD_PRESENT))
  5069. - return;
  5070. - }
  5071. -
  5072. - if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
  5073. - ier = sdhci_readl(host, SDHCI_INT_ENABLE);
  5074. -
  5075. - if (host->ops->platform_reset_enter)
  5076. - host->ops->platform_reset_enter(host, mask);
  5077. sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
  5078. @@ -220,16 +192,27 @@
  5079. timeout--;
  5080. mdelay(1);
  5081. }
  5082. +}
  5083. +EXPORT_SYMBOL_GPL(sdhci_reset);
  5084. +
  5085. +static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
  5086. +{
  5087. + if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
  5088. + if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
  5089. + SDHCI_CARD_PRESENT))
  5090. + return;
  5091. + }
  5092. - if (host->ops->platform_reset_exit)
  5093. - host->ops->platform_reset_exit(host, mask);
  5094. + host->ops->reset(host, mask);
  5095. - if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
  5096. - sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
  5097. + if (mask & SDHCI_RESET_ALL) {
  5098. + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
  5099. + if (host->ops->enable_dma)
  5100. + host->ops->enable_dma(host);
  5101. + }
  5102. - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
  5103. - if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
  5104. - host->ops->enable_dma(host);
  5105. + /* Resetting the controller clears many */
  5106. + host->preset_enabled = false;
  5107. }
  5108. }
  5109. @@ -238,15 +221,18 @@
  5110. static void sdhci_init(struct sdhci_host *host, int soft)
  5111. {
  5112. if (soft)
  5113. - sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
  5114. + sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
  5115. else
  5116. - sdhci_reset(host, SDHCI_RESET_ALL);
  5117. + sdhci_do_reset(host, SDHCI_RESET_ALL);
  5118. +
  5119. + host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
  5120. + SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
  5121. + SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
  5122. + SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
  5123. + SDHCI_INT_RESPONSE;
  5124. - sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
  5125. - SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
  5126. - SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
  5127. - SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
  5128. - SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
  5129. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  5130. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  5131. if (soft) {
  5132. /* force clock reconfiguration */
  5133. @@ -502,11 +488,6 @@
  5134. else
  5135. direction = DMA_TO_DEVICE;
  5136. - /*
  5137. - * The ADMA descriptor table is mapped further down as we
  5138. - * need to fill it with data first.
  5139. - */
  5140. -
  5141. host->align_addr = dma_map_single(mmc_dev(host->mmc),
  5142. host->align_buffer, 128 * 4, direction);
  5143. if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
  5144. @@ -567,7 +548,7 @@
  5145. * If this triggers then we have a calculation bug
  5146. * somewhere. :/
  5147. */
  5148. - WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
  5149. + WARN_ON((desc - host->adma_desc) > ADMA_SIZE);
  5150. }
  5151. if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
  5152. @@ -595,17 +576,8 @@
  5153. host->align_addr, 128 * 4, direction);
  5154. }
  5155. - host->adma_addr = dma_map_single(mmc_dev(host->mmc),
  5156. - host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
  5157. - if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
  5158. - goto unmap_entries;
  5159. - BUG_ON(host->adma_addr & 0x3);
  5160. -
  5161. return 0;
  5162. -unmap_entries:
  5163. - dma_unmap_sg(mmc_dev(host->mmc), data->sg,
  5164. - data->sg_len, direction);
  5165. unmap_align:
  5166. dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
  5167. 128 * 4, direction);
  5168. @@ -623,19 +595,25 @@
  5169. u8 *align;
  5170. char *buffer;
  5171. unsigned long flags;
  5172. + bool has_unaligned;
  5173. if (data->flags & MMC_DATA_READ)
  5174. direction = DMA_FROM_DEVICE;
  5175. else
  5176. direction = DMA_TO_DEVICE;
  5177. - dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
  5178. - (128 * 2 + 1) * 4, DMA_TO_DEVICE);
  5179. -
  5180. dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
  5181. 128 * 4, direction);
  5182. - if (data->flags & MMC_DATA_READ) {
  5183. + /* Do a quick scan of the SG list for any unaligned mappings */
  5184. + has_unaligned = false;
  5185. + for_each_sg(data->sg, sg, host->sg_count, i)
  5186. + if (sg_dma_address(sg) & 3) {
  5187. + has_unaligned = true;
  5188. + break;
  5189. + }
  5190. +
  5191. + if (has_unaligned && data->flags & MMC_DATA_READ) {
  5192. dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
  5193. data->sg_len, direction);
  5194. @@ -721,9 +699,12 @@
  5195. u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
  5196. if (host->flags & SDHCI_REQ_USE_DMA)
  5197. - sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
  5198. + host->ier = (host->ier & ~pio_irqs) | dma_irqs;
  5199. else
  5200. - sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
  5201. + host->ier = (host->ier & ~dma_irqs) | pio_irqs;
  5202. +
  5203. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  5204. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  5205. }
  5206. static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
  5207. @@ -976,8 +957,8 @@
  5208. * upon error conditions.
  5209. */
  5210. if (data->error) {
  5211. - sdhci_reset(host, SDHCI_RESET_CMD);
  5212. - sdhci_reset(host, SDHCI_RESET_DATA);
  5213. + sdhci_do_reset(host, SDHCI_RESET_CMD);
  5214. + sdhci_do_reset(host, SDHCI_RESET_DATA);
  5215. }
  5216. sdhci_send_command(host, data->stop);
  5217. @@ -1107,24 +1088,23 @@
  5218. static u16 sdhci_get_preset_value(struct sdhci_host *host)
  5219. {
  5220. - u16 ctrl, preset = 0;
  5221. + u16 preset = 0;
  5222. - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5223. -
  5224. - switch (ctrl & SDHCI_CTRL_UHS_MASK) {
  5225. - case SDHCI_CTRL_UHS_SDR12:
  5226. + switch (host->timing) {
  5227. + case MMC_TIMING_UHS_SDR12:
  5228. preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
  5229. break;
  5230. - case SDHCI_CTRL_UHS_SDR25:
  5231. + case MMC_TIMING_UHS_SDR25:
  5232. preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
  5233. break;
  5234. - case SDHCI_CTRL_UHS_SDR50:
  5235. + case MMC_TIMING_UHS_SDR50:
  5236. preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
  5237. break;
  5238. - case SDHCI_CTRL_UHS_SDR104:
  5239. + case MMC_TIMING_UHS_SDR104:
  5240. + case MMC_TIMING_MMC_HS200:
  5241. preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
  5242. break;
  5243. - case SDHCI_CTRL_UHS_DDR50:
  5244. + case MMC_TIMING_UHS_DDR50:
  5245. preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
  5246. break;
  5247. default:
  5248. @@ -1136,32 +1116,22 @@
  5249. return preset;
  5250. }
  5251. -static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
  5252. +void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
  5253. {
  5254. int div = 0; /* Initialized for compiler warning */
  5255. int real_div = div, clk_mul = 1;
  5256. u16 clk = 0;
  5257. unsigned long timeout;
  5258. - if (clock && clock == host->clock)
  5259. - return;
  5260. -
  5261. host->mmc->actual_clock = 0;
  5262. - if (host->ops->set_clock) {
  5263. - host->ops->set_clock(host, clock);
  5264. - if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
  5265. - return;
  5266. - }
  5267. -
  5268. sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
  5269. if (clock == 0)
  5270. - goto out;
  5271. + return;
  5272. if (host->version >= SDHCI_SPEC_300) {
  5273. - if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
  5274. - SDHCI_CTRL_PRESET_VAL_ENABLE) {
  5275. + if (host->preset_enabled) {
  5276. u16 pre_val;
  5277. clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
  5278. @@ -1247,26 +1217,16 @@
  5279. clk |= SDHCI_CLOCK_CARD_EN;
  5280. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  5281. -
  5282. -out:
  5283. - host->clock = clock;
  5284. -}
  5285. -
  5286. -static inline void sdhci_update_clock(struct sdhci_host *host)
  5287. -{
  5288. - unsigned int clock;
  5289. -
  5290. - clock = host->clock;
  5291. - host->clock = 0;
  5292. - sdhci_set_clock(host, clock);
  5293. }
  5294. +EXPORT_SYMBOL_GPL(sdhci_set_clock);
  5295. -static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
  5296. +static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
  5297. + unsigned short vdd)
  5298. {
  5299. u8 pwr = 0;
  5300. - if (power != (unsigned short)-1) {
  5301. - switch (1 << power) {
  5302. + if (mode != MMC_POWER_OFF) {
  5303. + switch (1 << vdd) {
  5304. case MMC_VDD_165_195:
  5305. pwr = SDHCI_POWER_180;
  5306. break;
  5307. @@ -1284,7 +1244,7 @@
  5308. }
  5309. if (host->pwr == pwr)
  5310. - return -1;
  5311. + return;
  5312. host->pwr = pwr;
  5313. @@ -1292,38 +1252,43 @@
  5314. sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
  5315. if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
  5316. sdhci_runtime_pm_bus_off(host);
  5317. - return 0;
  5318. - }
  5319. -
  5320. - /*
  5321. - * Spec says that we should clear the power reg before setting
  5322. - * a new value. Some controllers don't seem to like this though.
  5323. - */
  5324. - if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
  5325. - sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
  5326. + vdd = 0;
  5327. + } else {
  5328. + /*
  5329. + * Spec says that we should clear the power reg before setting
  5330. + * a new value. Some controllers don't seem to like this though.
  5331. + */
  5332. + if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
  5333. + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
  5334. - /*
  5335. - * At least the Marvell CaFe chip gets confused if we set the voltage
  5336. - * and set turn on power at the same time, so set the voltage first.
  5337. - */
  5338. - if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
  5339. - sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
  5340. + /*
  5341. + * At least the Marvell CaFe chip gets confused if we set the
  5342. + * voltage and set turn on power at the same time, so set the
  5343. + * voltage first.
  5344. + */
  5345. + if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
  5346. + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
  5347. - pwr |= SDHCI_POWER_ON;
  5348. + pwr |= SDHCI_POWER_ON;
  5349. - sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
  5350. + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
  5351. - if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
  5352. - sdhci_runtime_pm_bus_on(host);
  5353. + if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
  5354. + sdhci_runtime_pm_bus_on(host);
  5355. - /*
  5356. - * Some controllers need an extra 10ms delay of 10ms before they
  5357. - * can apply clock after applying power
  5358. - */
  5359. - if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
  5360. - mdelay(10);
  5361. + /*
  5362. + * Some controllers need an extra 10ms delay of 10ms before
  5363. + * they can apply clock after applying power
  5364. + */
  5365. + if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
  5366. + mdelay(10);
  5367. + }
  5368. - return power;
  5369. + if (host->vmmc) {
  5370. + spin_unlock_irq(&host->lock);
  5371. + mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd);
  5372. + spin_lock_irq(&host->lock);
  5373. + }
  5374. }
  5375. /*****************************************************************************\
  5376. @@ -1427,10 +1392,52 @@
  5377. spin_unlock_irqrestore(&host->lock, flags);
  5378. }
  5379. +void sdhci_set_bus_width(struct sdhci_host *host, int width)
  5380. +{
  5381. + u8 ctrl;
  5382. +
  5383. + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  5384. + if (width == MMC_BUS_WIDTH_8) {
  5385. + ctrl &= ~SDHCI_CTRL_4BITBUS;
  5386. + if (host->version >= SDHCI_SPEC_300)
  5387. + ctrl |= SDHCI_CTRL_8BITBUS;
  5388. + } else {
  5389. + if (host->version >= SDHCI_SPEC_300)
  5390. + ctrl &= ~SDHCI_CTRL_8BITBUS;
  5391. + if (width == MMC_BUS_WIDTH_4)
  5392. + ctrl |= SDHCI_CTRL_4BITBUS;
  5393. + else
  5394. + ctrl &= ~SDHCI_CTRL_4BITBUS;
  5395. + }
  5396. + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  5397. +}
  5398. +EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
  5399. +
  5400. +void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
  5401. +{
  5402. + u16 ctrl_2;
  5403. +
  5404. + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5405. + /* Select Bus Speed Mode for host */
  5406. + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
  5407. + if ((timing == MMC_TIMING_MMC_HS200) ||
  5408. + (timing == MMC_TIMING_UHS_SDR104))
  5409. + ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
  5410. + else if (timing == MMC_TIMING_UHS_SDR12)
  5411. + ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
  5412. + else if (timing == MMC_TIMING_UHS_SDR25)
  5413. + ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
  5414. + else if (timing == MMC_TIMING_UHS_SDR50)
  5415. + ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
  5416. + else if (timing == MMC_TIMING_UHS_DDR50)
  5417. + ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
  5418. + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
  5419. +}
  5420. +EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
  5421. +
  5422. static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
  5423. {
  5424. unsigned long flags;
  5425. - int vdd_bit = -1;
  5426. u8 ctrl;
  5427. spin_lock_irqsave(&host->lock, flags);
  5428. @@ -1456,45 +1463,17 @@
  5429. !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
  5430. sdhci_enable_preset_value(host, false);
  5431. - sdhci_set_clock(host, ios->clock);
  5432. -
  5433. - if (ios->power_mode == MMC_POWER_OFF)
  5434. - vdd_bit = sdhci_set_power(host, -1);
  5435. - else
  5436. - vdd_bit = sdhci_set_power(host, ios->vdd);
  5437. -
  5438. - if (host->vmmc && vdd_bit != -1) {
  5439. - spin_unlock_irqrestore(&host->lock, flags);
  5440. - mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
  5441. - spin_lock_irqsave(&host->lock, flags);
  5442. + if (!ios->clock || ios->clock != host->clock) {
  5443. + host->ops->set_clock(host, ios->clock);
  5444. + host->clock = ios->clock;
  5445. }
  5446. + sdhci_set_power(host, ios->power_mode, ios->vdd);
  5447. +
  5448. if (host->ops->platform_send_init_74_clocks)
  5449. host->ops->platform_send_init_74_clocks(host, ios->power_mode);
  5450. - /*
  5451. - * If your platform has 8-bit width support but is not a v3 controller,
  5452. - * or if it requires special setup code, you should implement that in
  5453. - * platform_bus_width().
  5454. - */
  5455. - if (host->ops->platform_bus_width) {
  5456. - host->ops->platform_bus_width(host, ios->bus_width);
  5457. - } else {
  5458. - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  5459. - if (ios->bus_width == MMC_BUS_WIDTH_8) {
  5460. - ctrl &= ~SDHCI_CTRL_4BITBUS;
  5461. - if (host->version >= SDHCI_SPEC_300)
  5462. - ctrl |= SDHCI_CTRL_8BITBUS;
  5463. - } else {
  5464. - if (host->version >= SDHCI_SPEC_300)
  5465. - ctrl &= ~SDHCI_CTRL_8BITBUS;
  5466. - if (ios->bus_width == MMC_BUS_WIDTH_4)
  5467. - ctrl |= SDHCI_CTRL_4BITBUS;
  5468. - else
  5469. - ctrl &= ~SDHCI_CTRL_4BITBUS;
  5470. - }
  5471. - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  5472. - }
  5473. + host->ops->set_bus_width(host, ios->bus_width);
  5474. ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  5475. @@ -1516,13 +1495,13 @@
  5476. (ios->timing == MMC_TIMING_UHS_SDR25))
  5477. ctrl |= SDHCI_CTRL_HISPD;
  5478. - ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5479. - if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
  5480. + if (!host->preset_enabled) {
  5481. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  5482. /*
  5483. * We only need to set Driver Strength if the
  5484. * preset value enable is not set.
  5485. */
  5486. + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5487. ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
  5488. if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
  5489. ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
  5490. @@ -1546,34 +1525,11 @@
  5491. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  5492. /* Re-enable SD Clock */
  5493. - sdhci_update_clock(host);
  5494. + host->ops->set_clock(host, host->clock);
  5495. }
  5496. -
  5497. - /* Reset SD Clock Enable */
  5498. - clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
  5499. - clk &= ~SDHCI_CLOCK_CARD_EN;
  5500. - sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  5501. -
  5502. - if (host->ops->set_uhs_signaling)
  5503. - host->ops->set_uhs_signaling(host, ios->timing);
  5504. - else {
  5505. - ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5506. - /* Select Bus Speed Mode for host */
  5507. - ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
  5508. - if ((ios->timing == MMC_TIMING_MMC_HS200) ||
  5509. - (ios->timing == MMC_TIMING_UHS_SDR104))
  5510. - ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
  5511. - else if (ios->timing == MMC_TIMING_UHS_SDR12)
  5512. - ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
  5513. - else if (ios->timing == MMC_TIMING_UHS_SDR25)
  5514. - ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
  5515. - else if (ios->timing == MMC_TIMING_UHS_SDR50)
  5516. - ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
  5517. - else if (ios->timing == MMC_TIMING_UHS_DDR50)
  5518. - ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
  5519. - sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
  5520. - }
  5521. + host->ops->set_uhs_signaling(host, ios->timing);
  5522. + host->timing = ios->timing;
  5523. if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
  5524. ((ios->timing == MMC_TIMING_UHS_SDR12) ||
  5525. @@ -1588,9 +1544,6 @@
  5526. ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
  5527. >> SDHCI_PRESET_DRV_SHIFT;
  5528. }
  5529. -
  5530. - /* Re-enable SD Clock */
  5531. - sdhci_update_clock(host);
  5532. } else
  5533. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  5534. @@ -1600,7 +1553,7 @@
  5535. * it on each ios seems to solve the problem.
  5536. */
  5537. if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
  5538. - sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
  5539. + sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
  5540. mmiowb();
  5541. spin_unlock_irqrestore(&host->lock, flags);
  5542. @@ -1709,24 +1662,16 @@
  5543. static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
  5544. {
  5545. - if (host->flags & SDHCI_DEVICE_DEAD)
  5546. - goto out;
  5547. -
  5548. - if (enable)
  5549. - host->flags |= SDHCI_SDIO_IRQ_ENABLED;
  5550. - else
  5551. - host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
  5552. -
  5553. - /* SDIO IRQ will be enabled as appropriate in runtime resume */
  5554. - if (host->runtime_suspended)
  5555. - goto out;
  5556. + if (!(host->flags & SDHCI_DEVICE_DEAD)) {
  5557. + if (enable)
  5558. + host->ier |= SDHCI_INT_CARD_INT;
  5559. + else
  5560. + host->ier &= ~SDHCI_INT_CARD_INT;
  5561. - if (enable)
  5562. - sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
  5563. - else
  5564. - sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
  5565. -out:
  5566. - mmiowb();
  5567. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  5568. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  5569. + mmiowb();
  5570. + }
  5571. }
  5572. static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
  5573. @@ -1734,9 +1679,18 @@
  5574. struct sdhci_host *host = mmc_priv(mmc);
  5575. unsigned long flags;
  5576. + sdhci_runtime_pm_get(host);
  5577. +
  5578. spin_lock_irqsave(&host->lock, flags);
  5579. + if (enable)
  5580. + host->flags |= SDHCI_SDIO_IRQ_ENABLED;
  5581. + else
  5582. + host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
  5583. +
  5584. sdhci_enable_sdio_irq_nolock(host, enable);
  5585. spin_unlock_irqrestore(&host->lock, flags);
  5586. +
  5587. + sdhci_runtime_pm_put(host);
  5588. }
  5589. static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
  5590. @@ -1798,9 +1752,6 @@
  5591. ctrl |= SDHCI_CTRL_VDD_180;
  5592. sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  5593. - /* Wait for 5ms */
  5594. - usleep_range(5000, 5500);
  5595. -
  5596. /* 1.8V regulator output should be stable within 5 ms */
  5597. ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5598. if (ctrl & SDHCI_CTRL_VDD_180)
  5599. @@ -1855,22 +1806,16 @@
  5600. static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
  5601. {
  5602. - struct sdhci_host *host;
  5603. + struct sdhci_host *host = mmc_priv(mmc);
  5604. u16 ctrl;
  5605. - u32 ier;
  5606. int tuning_loop_counter = MAX_TUNING_LOOP;
  5607. unsigned long timeout;
  5608. int err = 0;
  5609. - bool requires_tuning_nonuhs = false;
  5610. unsigned long flags;
  5611. - host = mmc_priv(mmc);
  5612. -
  5613. sdhci_runtime_pm_get(host);
  5614. spin_lock_irqsave(&host->lock, flags);
  5615. - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5616. -
  5617. /*
  5618. * The Host Controller needs tuning only in case of SDR104 mode
  5619. * and for SDR50 mode when Use Tuning for SDR50 is set in the
  5620. @@ -1878,15 +1823,18 @@
  5621. * If the Host Controller supports the HS200 mode then the
  5622. * tuning function has to be executed.
  5623. */
  5624. - if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
  5625. - (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
  5626. - host->flags & SDHCI_SDR104_NEEDS_TUNING))
  5627. - requires_tuning_nonuhs = true;
  5628. -
  5629. - if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
  5630. - requires_tuning_nonuhs)
  5631. - ctrl |= SDHCI_CTRL_EXEC_TUNING;
  5632. - else {
  5633. + switch (host->timing) {
  5634. + case MMC_TIMING_MMC_HS200:
  5635. + case MMC_TIMING_UHS_SDR104:
  5636. + break;
  5637. +
  5638. + case MMC_TIMING_UHS_SDR50:
  5639. + if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
  5640. + host->flags & SDHCI_SDR104_NEEDS_TUNING)
  5641. + break;
  5642. + /* FALLTHROUGH */
  5643. +
  5644. + default:
  5645. spin_unlock_irqrestore(&host->lock, flags);
  5646. sdhci_runtime_pm_put(host);
  5647. return 0;
  5648. @@ -1899,6 +1847,8 @@
  5649. return err;
  5650. }
  5651. + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5652. + ctrl |= SDHCI_CTRL_EXEC_TUNING;
  5653. sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  5654. /*
  5655. @@ -1911,8 +1861,8 @@
  5656. * to make sure we don't hit a controller bug, we _only_
  5657. * enable Buffer Read Ready interrupt here.
  5658. */
  5659. - ier = sdhci_readl(host, SDHCI_INT_ENABLE);
  5660. - sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
  5661. + sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
  5662. + sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
  5663. /*
  5664. * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
  5665. @@ -2044,7 +1994,8 @@
  5666. if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
  5667. err = 0;
  5668. - sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
  5669. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  5670. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  5671. spin_unlock_irqrestore(&host->lock, flags);
  5672. sdhci_runtime_pm_put(host);
  5673. @@ -2054,26 +2005,30 @@
  5674. static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
  5675. {
  5676. - u16 ctrl;
  5677. -
  5678. /* Host Controller v3.00 defines preset value registers */
  5679. if (host->version < SDHCI_SPEC_300)
  5680. return;
  5681. - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5682. -
  5683. /*
  5684. * We only enable or disable Preset Value if they are not already
  5685. * enabled or disabled respectively. Otherwise, we bail out.
  5686. */
  5687. - if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
  5688. - ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
  5689. - sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  5690. - host->flags |= SDHCI_PV_ENABLED;
  5691. - } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
  5692. - ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
  5693. + if (host->preset_enabled != enable) {
  5694. + u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  5695. +
  5696. + if (enable)
  5697. + ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
  5698. + else
  5699. + ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
  5700. +
  5701. sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
  5702. - host->flags &= ~SDHCI_PV_ENABLED;
  5703. +
  5704. + if (enable)
  5705. + host->flags |= SDHCI_PV_ENABLED;
  5706. + else
  5707. + host->flags &= ~SDHCI_PV_ENABLED;
  5708. +
  5709. + host->preset_enabled = enable;
  5710. }
  5711. }
  5712. @@ -2095,8 +2050,8 @@
  5713. pr_err("%s: Resetting controller.\n",
  5714. mmc_hostname(host->mmc));
  5715. - sdhci_reset(host, SDHCI_RESET_CMD);
  5716. - sdhci_reset(host, SDHCI_RESET_DATA);
  5717. + sdhci_do_reset(host, SDHCI_RESET_CMD);
  5718. + sdhci_do_reset(host, SDHCI_RESET_DATA);
  5719. host->mrq->cmd->error = -ENOMEDIUM;
  5720. tasklet_schedule(&host->finish_tasklet);
  5721. @@ -2124,15 +2079,6 @@
  5722. * *
  5723. \*****************************************************************************/
  5724. -static void sdhci_tasklet_card(unsigned long param)
  5725. -{
  5726. - struct sdhci_host *host = (struct sdhci_host*)param;
  5727. -
  5728. - sdhci_card_event(host->mmc);
  5729. -
  5730. - mmc_detect_change(host->mmc, msecs_to_jiffies(200));
  5731. -}
  5732. -
  5733. static void sdhci_tasklet_finish(unsigned long param)
  5734. {
  5735. struct sdhci_host *host;
  5736. @@ -2169,12 +2115,12 @@
  5737. /* Some controllers need this kick or reset won't work here */
  5738. if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
  5739. /* This is to force an update */
  5740. - sdhci_update_clock(host);
  5741. + host->ops->set_clock(host, host->clock);
  5742. /* Spec says we should do both at the same time, but Ricoh
  5743. controllers do not like that. */
  5744. - sdhci_reset(host, SDHCI_RESET_CMD);
  5745. - sdhci_reset(host, SDHCI_RESET_DATA);
  5746. + sdhci_do_reset(host, SDHCI_RESET_CMD);
  5747. + sdhci_do_reset(host, SDHCI_RESET_DATA);
  5748. }
  5749. host->mrq = NULL;
  5750. @@ -2424,101 +2370,94 @@
  5751. static irqreturn_t sdhci_irq(int irq, void *dev_id)
  5752. {
  5753. - irqreturn_t result;
  5754. + irqreturn_t result = IRQ_NONE;
  5755. struct sdhci_host *host = dev_id;
  5756. - u32 intmask, unexpected = 0;
  5757. - int cardint = 0, max_loops = 16;
  5758. + u32 intmask, mask, unexpected = 0;
  5759. + int max_loops = 16;
  5760. spin_lock(&host->lock);
  5761. - if (host->runtime_suspended) {
  5762. + if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
  5763. spin_unlock(&host->lock);
  5764. return IRQ_NONE;
  5765. }
  5766. intmask = sdhci_readl(host, SDHCI_INT_STATUS);
  5767. -
  5768. if (!intmask || intmask == 0xffffffff) {
  5769. result = IRQ_NONE;
  5770. goto out;
  5771. }
  5772. -again:
  5773. - DBG("*** %s got interrupt: 0x%08x\n",
  5774. - mmc_hostname(host->mmc), intmask);
  5775. -
  5776. - if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
  5777. - u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
  5778. - SDHCI_CARD_PRESENT;
  5779. -
  5780. - /*
  5781. - * There is a observation on i.mx esdhc. INSERT bit will be
  5782. - * immediately set again when it gets cleared, if a card is
  5783. - * inserted. We have to mask the irq to prevent interrupt
  5784. - * storm which will freeze the system. And the REMOVE gets
  5785. - * the same situation.
  5786. - *
  5787. - * More testing are needed here to ensure it works for other
  5788. - * platforms though.
  5789. - */
  5790. - sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
  5791. - SDHCI_INT_CARD_REMOVE);
  5792. - sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
  5793. - SDHCI_INT_CARD_INSERT);
  5794. -
  5795. - sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
  5796. - SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
  5797. - intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
  5798. - tasklet_schedule(&host->card_tasklet);
  5799. - }
  5800. -
  5801. - if (intmask & SDHCI_INT_CMD_MASK) {
  5802. - sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
  5803. - SDHCI_INT_STATUS);
  5804. - sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
  5805. - }
  5806. + do {
  5807. + /* Clear selected interrupts. */
  5808. + mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
  5809. + SDHCI_INT_BUS_POWER);
  5810. + sdhci_writel(host, mask, SDHCI_INT_STATUS);
  5811. +
  5812. + DBG("*** %s got interrupt: 0x%08x\n",
  5813. + mmc_hostname(host->mmc), intmask);
  5814. +
  5815. + if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
  5816. + u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
  5817. + SDHCI_CARD_PRESENT;
  5818. - if (intmask & SDHCI_INT_DATA_MASK) {
  5819. - sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
  5820. - SDHCI_INT_STATUS);
  5821. - sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
  5822. - }
  5823. + /*
  5824. + * There is a observation on i.mx esdhc. INSERT
  5825. + * bit will be immediately set again when it gets
  5826. + * cleared, if a card is inserted. We have to mask
  5827. + * the irq to prevent interrupt storm which will
  5828. + * freeze the system. And the REMOVE gets the
  5829. + * same situation.
  5830. + *
  5831. + * More testing are needed here to ensure it works
  5832. + * for other platforms though.
  5833. + */
  5834. + host->ier &= ~(SDHCI_INT_CARD_INSERT |
  5835. + SDHCI_INT_CARD_REMOVE);
  5836. + host->ier |= present ? SDHCI_INT_CARD_REMOVE :
  5837. + SDHCI_INT_CARD_INSERT;
  5838. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  5839. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  5840. - intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
  5841. + sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
  5842. + SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
  5843. - intmask &= ~SDHCI_INT_ERROR;
  5844. + host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
  5845. + SDHCI_INT_CARD_REMOVE);
  5846. + result = IRQ_WAKE_THREAD;
  5847. + }
  5848. - if (intmask & SDHCI_INT_BUS_POWER) {
  5849. - pr_err("%s: Card is consuming too much power!\n",
  5850. - mmc_hostname(host->mmc));
  5851. - sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
  5852. - }
  5853. + if (intmask & SDHCI_INT_CMD_MASK)
  5854. + sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
  5855. - intmask &= ~SDHCI_INT_BUS_POWER;
  5856. + if (intmask & SDHCI_INT_DATA_MASK)
  5857. + sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
  5858. - if (intmask & SDHCI_INT_CARD_INT)
  5859. - cardint = 1;
  5860. + if (intmask & SDHCI_INT_BUS_POWER)
  5861. + pr_err("%s: Card is consuming too much power!\n",
  5862. + mmc_hostname(host->mmc));
  5863. - intmask &= ~SDHCI_INT_CARD_INT;
  5864. + if (intmask & SDHCI_INT_CARD_INT) {
  5865. + sdhci_enable_sdio_irq_nolock(host, false);
  5866. + host->thread_isr |= SDHCI_INT_CARD_INT;
  5867. + result = IRQ_WAKE_THREAD;
  5868. + }
  5869. - if (intmask) {
  5870. - unexpected |= intmask;
  5871. - sdhci_writel(host, intmask, SDHCI_INT_STATUS);
  5872. - }
  5873. + intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
  5874. + SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
  5875. + SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
  5876. + SDHCI_INT_CARD_INT);
  5877. - result = IRQ_HANDLED;
  5878. + if (intmask) {
  5879. + unexpected |= intmask;
  5880. + sdhci_writel(host, intmask, SDHCI_INT_STATUS);
  5881. + }
  5882. - intmask = sdhci_readl(host, SDHCI_INT_STATUS);
  5883. + if (result == IRQ_NONE)
  5884. + result = IRQ_HANDLED;
  5885. - /*
  5886. - * If we know we'll call the driver to signal SDIO IRQ, disregard
  5887. - * further indications of Card Interrupt in the status to avoid a
  5888. - * needless loop.
  5889. - */
  5890. - if (cardint)
  5891. - intmask &= ~SDHCI_INT_CARD_INT;
  5892. - if (intmask && --max_loops)
  5893. - goto again;
  5894. + intmask = sdhci_readl(host, SDHCI_INT_STATUS);
  5895. + } while (intmask && --max_loops);
  5896. out:
  5897. spin_unlock(&host->lock);
  5898. @@ -2527,15 +2466,38 @@
  5899. mmc_hostname(host->mmc), unexpected);
  5900. sdhci_dumpregs(host);
  5901. }
  5902. - /*
  5903. - * We have to delay this as it calls back into the driver.
  5904. - */
  5905. - if (cardint)
  5906. - mmc_signal_sdio_irq(host->mmc);
  5907. return result;
  5908. }
  5909. +static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
  5910. +{
  5911. + struct sdhci_host *host = dev_id;
  5912. + unsigned long flags;
  5913. + u32 isr;
  5914. +
  5915. + spin_lock_irqsave(&host->lock, flags);
  5916. + isr = host->thread_isr;
  5917. + host->thread_isr = 0;
  5918. + spin_unlock_irqrestore(&host->lock, flags);
  5919. +
  5920. + if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
  5921. + sdhci_card_event(host->mmc);
  5922. + mmc_detect_change(host->mmc, msecs_to_jiffies(200));
  5923. + }
  5924. +
  5925. + if (isr & SDHCI_INT_CARD_INT) {
  5926. + sdio_run_irqs(host->mmc);
  5927. +
  5928. + spin_lock_irqsave(&host->lock, flags);
  5929. + if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
  5930. + sdhci_enable_sdio_irq_nolock(host, true);
  5931. + spin_unlock_irqrestore(&host->lock, flags);
  5932. + }
  5933. +
  5934. + return isr ? IRQ_HANDLED : IRQ_NONE;
  5935. +}
  5936. +
  5937. /*****************************************************************************\
  5938. * *
  5939. * Suspend/resume *
  5940. @@ -2572,9 +2534,6 @@
  5941. int sdhci_suspend_host(struct sdhci_host *host)
  5942. {
  5943. - if (host->ops->platform_suspend)
  5944. - host->ops->platform_suspend(host);
  5945. -
  5946. sdhci_disable_card_detection(host);
  5947. /* Disable tuning since we are suspending */
  5948. @@ -2584,7 +2543,9 @@
  5949. }
  5950. if (!device_may_wakeup(mmc_dev(host->mmc))) {
  5951. - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
  5952. + host->ier = 0;
  5953. + sdhci_writel(host, 0, SDHCI_INT_ENABLE);
  5954. + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
  5955. free_irq(host->irq, host);
  5956. } else {
  5957. sdhci_enable_irq_wakeups(host);
  5958. @@ -2605,8 +2566,9 @@
  5959. }
  5960. if (!device_may_wakeup(mmc_dev(host->mmc))) {
  5961. - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
  5962. - mmc_hostname(host->mmc), host);
  5963. + ret = request_threaded_irq(host->irq, sdhci_irq,
  5964. + sdhci_thread_irq, IRQF_SHARED,
  5965. + mmc_hostname(host->mmc), host);
  5966. if (ret)
  5967. return ret;
  5968. } else {
  5969. @@ -2628,9 +2590,6 @@
  5970. sdhci_enable_card_detection(host);
  5971. - if (host->ops->platform_resume)
  5972. - host->ops->platform_resume(host);
  5973. -
  5974. /* Set the re-tuning expiration flag */
  5975. if (host->flags & SDHCI_USING_RETUNING_TIMER)
  5976. host->flags |= SDHCI_NEEDS_RETUNING;
  5977. @@ -2682,10 +2641,12 @@
  5978. }
  5979. spin_lock_irqsave(&host->lock, flags);
  5980. - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
  5981. + host->ier &= SDHCI_INT_CARD_INT;
  5982. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  5983. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  5984. spin_unlock_irqrestore(&host->lock, flags);
  5985. - synchronize_irq(host->irq);
  5986. + synchronize_hardirq(host->irq);
  5987. spin_lock_irqsave(&host->lock, flags);
  5988. host->runtime_suspended = true;
  5989. @@ -2729,7 +2690,7 @@
  5990. host->runtime_suspended = false;
  5991. /* Enable SDIO IRQ */
  5992. - if ((host->flags & SDHCI_SDIO_IRQ_ENABLED))
  5993. + if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
  5994. sdhci_enable_sdio_irq_nolock(host, true);
  5995. /* Enable Card Detection */
  5996. @@ -2788,7 +2749,7 @@
  5997. if (debug_quirks2)
  5998. host->quirks2 = debug_quirks2;
  5999. - sdhci_reset(host, SDHCI_RESET_ALL);
  6000. + sdhci_do_reset(host, SDHCI_RESET_ALL);
  6001. host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
  6002. host->version = (host->version & SDHCI_SPEC_VER_MASK)
  6003. @@ -2848,15 +2809,29 @@
  6004. * (128) and potentially one alignment transfer for
  6005. * each of those entries.
  6006. */
  6007. - host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
  6008. + host->adma_desc = dma_alloc_coherent(mmc_dev(host->mmc),
  6009. + ADMA_SIZE, &host->adma_addr,
  6010. + GFP_KERNEL);
  6011. host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
  6012. if (!host->adma_desc || !host->align_buffer) {
  6013. - kfree(host->adma_desc);
  6014. + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
  6015. + host->adma_desc, host->adma_addr);
  6016. kfree(host->align_buffer);
  6017. pr_warning("%s: Unable to allocate ADMA "
  6018. "buffers. Falling back to standard DMA.\n",
  6019. mmc_hostname(mmc));
  6020. host->flags &= ~SDHCI_USE_ADMA;
  6021. + host->adma_desc = NULL;
  6022. + host->align_buffer = NULL;
  6023. + } else if (host->adma_addr & 3) {
  6024. + pr_warning("%s: unable to allocate aligned ADMA descriptor\n",
  6025. + mmc_hostname(mmc));
  6026. + host->flags &= ~SDHCI_USE_ADMA;
  6027. + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
  6028. + host->adma_desc, host->adma_addr);
  6029. + kfree(host->align_buffer);
  6030. + host->adma_desc = NULL;
  6031. + host->align_buffer = NULL;
  6032. }
  6033. }
  6034. @@ -2941,6 +2916,7 @@
  6035. mmc->max_busy_timeout = (1 << 27) / host->timeout_clk;
  6036. mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
  6037. + mmc->caps2 |= MMC_CAP2_SDIO_NOTHREAD;
  6038. if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
  6039. host->flags |= SDHCI_AUTO_CMD12;
  6040. @@ -3212,8 +3188,6 @@
  6041. /*
  6042. * Init tasklets.
  6043. */
  6044. - tasklet_init(&host->card_tasklet,
  6045. - sdhci_tasklet_card, (unsigned long)host);
  6046. tasklet_init(&host->finish_tasklet,
  6047. sdhci_tasklet_finish, (unsigned long)host);
  6048. @@ -3230,8 +3204,8 @@
  6049. sdhci_init(host, 0);
  6050. - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
  6051. - mmc_hostname(mmc), host);
  6052. + ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
  6053. + IRQF_SHARED, mmc_hostname(mmc), host);
  6054. if (ret) {
  6055. pr_err("%s: Failed to request IRQ %d: %d\n",
  6056. mmc_hostname(mmc), host->irq, ret);
  6057. @@ -3273,12 +3247,12 @@
  6058. #ifdef SDHCI_USE_LEDS_CLASS
  6059. reset:
  6060. - sdhci_reset(host, SDHCI_RESET_ALL);
  6061. - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
  6062. + sdhci_do_reset(host, SDHCI_RESET_ALL);
  6063. + sdhci_writel(host, 0, SDHCI_INT_ENABLE);
  6064. + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
  6065. free_irq(host->irq, host);
  6066. #endif
  6067. untasklet:
  6068. - tasklet_kill(&host->card_tasklet);
  6069. tasklet_kill(&host->finish_tasklet);
  6070. return ret;
  6071. @@ -3315,14 +3289,14 @@
  6072. #endif
  6073. if (!dead)
  6074. - sdhci_reset(host, SDHCI_RESET_ALL);
  6075. + sdhci_do_reset(host, SDHCI_RESET_ALL);
  6076. - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
  6077. + sdhci_writel(host, 0, SDHCI_INT_ENABLE);
  6078. + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
  6079. free_irq(host->irq, host);
  6080. del_timer_sync(&host->timer);
  6081. - tasklet_kill(&host->card_tasklet);
  6082. tasklet_kill(&host->finish_tasklet);
  6083. if (host->vmmc) {
  6084. @@ -3335,7 +3309,9 @@
  6085. regulator_put(host->vqmmc);
  6086. }
  6087. - kfree(host->adma_desc);
  6088. + if (host->adma_desc)
  6089. + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE,
  6090. + host->adma_desc, host->adma_addr);
  6091. kfree(host->align_buffer);
  6092. host->adma_desc = NULL;
  6093. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-cns3xxx.c linux-3.15-rc6/drivers/mmc/host/sdhci-cns3xxx.c
  6094. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-cns3xxx.c 2014-05-21 23:42:02.000000000 +0200
  6095. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-cns3xxx.c 2014-05-23 11:26:48.304940032 +0200
  6096. @@ -30,13 +30,12 @@
  6097. u16 clk;
  6098. unsigned long timeout;
  6099. - if (clock == host->clock)
  6100. - return;
  6101. + host->mmc->actual_clock = 0;
  6102. sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
  6103. if (clock == 0)
  6104. - goto out;
  6105. + return;
  6106. while (host->max_clk / div > clock) {
  6107. /*
  6108. @@ -75,13 +74,14 @@
  6109. clk |= SDHCI_CLOCK_CARD_EN;
  6110. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  6111. -out:
  6112. - host->clock = clock;
  6113. }
  6114. static const struct sdhci_ops sdhci_cns3xxx_ops = {
  6115. .get_max_clock = sdhci_cns3xxx_get_max_clk,
  6116. .set_clock = sdhci_cns3xxx_set_clock,
  6117. + .set_bus_width = sdhci_set_bus_width,
  6118. + .reset = sdhci_reset,
  6119. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6120. };
  6121. static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
  6122. @@ -90,8 +90,7 @@
  6123. SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
  6124. SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
  6125. SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
  6126. - SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
  6127. - SDHCI_QUIRK_NONSTANDARD_CLOCK,
  6128. + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
  6129. };
  6130. static int sdhci_cns3xxx_probe(struct platform_device *pdev)
  6131. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-dove.c linux-3.15-rc6/drivers/mmc/host/sdhci-dove.c
  6132. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-dove.c 2014-05-21 23:42:02.000000000 +0200
  6133. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-dove.c 2014-05-23 11:26:48.304940032 +0200
  6134. @@ -86,6 +86,10 @@
  6135. static const struct sdhci_ops sdhci_dove_ops = {
  6136. .read_w = sdhci_dove_readw,
  6137. .read_l = sdhci_dove_readl,
  6138. + .set_clock = sdhci_set_clock,
  6139. + .set_bus_width = sdhci_set_bus_width,
  6140. + .reset = sdhci_reset,
  6141. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6142. };
  6143. static const struct sdhci_pltfm_data sdhci_dove_pdata = {
  6144. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-esdhc.h linux-3.15-rc6/drivers/mmc/host/sdhci-esdhc.h
  6145. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-esdhc.h 2014-05-21 23:42:02.000000000 +0200
  6146. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-esdhc.h 2014-05-23 11:26:48.304940032 +0200
  6147. @@ -20,10 +20,8 @@
  6148. #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \
  6149. SDHCI_QUIRK_NO_BUSY_IRQ | \
  6150. - SDHCI_QUIRK_NONSTANDARD_CLOCK | \
  6151. SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
  6152. - SDHCI_QUIRK_PIO_NEEDS_DELAY | \
  6153. - SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
  6154. + SDHCI_QUIRK_PIO_NEEDS_DELAY)
  6155. #define ESDHC_SYSTEM_CONTROL 0x2c
  6156. #define ESDHC_CLOCK_MASK 0x0000fff0
  6157. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-esdhc-imx.c linux-3.15-rc6/drivers/mmc/host/sdhci-esdhc-imx.c
  6158. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-esdhc-imx.c 2014-05-21 23:42:02.000000000 +0200
  6159. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-esdhc-imx.c 2014-05-23 11:26:48.304940032 +0200
  6160. @@ -160,7 +160,6 @@
  6161. MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
  6162. WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
  6163. } multiblock_status;
  6164. - u32 uhs_mode;
  6165. u32 is_ddr;
  6166. };
  6167. @@ -382,7 +381,6 @@
  6168. if (val & ESDHC_MIX_CTRL_SMPCLK_SEL)
  6169. ret |= SDHCI_CTRL_TUNED_CLK;
  6170. - ret |= (imx_data->uhs_mode & SDHCI_CTRL_UHS_MASK);
  6171. ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
  6172. return ret;
  6173. @@ -429,7 +427,6 @@
  6174. else
  6175. new_val &= ~ESDHC_VENDOR_SPEC_VSELECT;
  6176. writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
  6177. - imx_data->uhs_mode = val & SDHCI_CTRL_UHS_MASK;
  6178. if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
  6179. new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
  6180. if (val & SDHCI_CTRL_TUNED_CLK)
  6181. @@ -600,12 +597,14 @@
  6182. u32 temp, val;
  6183. if (clock == 0) {
  6184. + host->mmc->actual_clock = 0;
  6185. +
  6186. if (esdhc_is_usdhc(imx_data)) {
  6187. val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
  6188. writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
  6189. host->ioaddr + ESDHC_VENDOR_SPEC);
  6190. }
  6191. - goto out;
  6192. + return;
  6193. }
  6194. if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr)
  6195. @@ -645,8 +644,6 @@
  6196. }
  6197. mdelay(1);
  6198. -out:
  6199. - host->clock = clock;
  6200. }
  6201. static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
  6202. @@ -668,7 +665,7 @@
  6203. return -ENOSYS;
  6204. }
  6205. -static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
  6206. +static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
  6207. {
  6208. u32 ctrl;
  6209. @@ -686,8 +683,6 @@
  6210. esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl,
  6211. SDHCI_HOST_CONTROL);
  6212. -
  6213. - return 0;
  6214. }
  6215. static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
  6216. @@ -697,6 +692,7 @@
  6217. /* FIXME: delay a bit for card to be ready for next tuning due to errors */
  6218. mdelay(1);
  6219. + /* This is balanced by the runtime put in sdhci_tasklet_finish */
  6220. pm_runtime_get_sync(host->mmc->parent);
  6221. reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
  6222. reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
  6223. @@ -713,13 +709,12 @@
  6224. complete(&mrq->completion);
  6225. }
  6226. -static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode)
  6227. +static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode,
  6228. + struct scatterlist *sg)
  6229. {
  6230. struct mmc_command cmd = {0};
  6231. struct mmc_request mrq = {NULL};
  6232. struct mmc_data data = {0};
  6233. - struct scatterlist sg;
  6234. - char tuning_pattern[ESDHC_TUNING_BLOCK_PATTERN_LEN];
  6235. cmd.opcode = opcode;
  6236. cmd.arg = 0;
  6237. @@ -728,11 +723,9 @@
  6238. data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN;
  6239. data.blocks = 1;
  6240. data.flags = MMC_DATA_READ;
  6241. - data.sg = &sg;
  6242. + data.sg = sg;
  6243. data.sg_len = 1;
  6244. - sg_init_one(&sg, tuning_pattern, sizeof(tuning_pattern));
  6245. -
  6246. mrq.cmd = &cmd;
  6247. mrq.cmd->mrq = &mrq;
  6248. mrq.data = &data;
  6249. @@ -742,14 +735,12 @@
  6250. mrq.done = esdhc_request_done;
  6251. init_completion(&(mrq.completion));
  6252. - disable_irq(host->irq);
  6253. - spin_lock(&host->lock);
  6254. + spin_lock_irq(&host->lock);
  6255. host->mrq = &mrq;
  6256. sdhci_send_command(host, mrq.cmd);
  6257. - spin_unlock(&host->lock);
  6258. - enable_irq(host->irq);
  6259. + spin_unlock_irq(&host->lock);
  6260. wait_for_completion(&mrq.completion);
  6261. @@ -772,13 +763,21 @@
  6262. static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
  6263. {
  6264. + struct scatterlist sg;
  6265. + char *tuning_pattern;
  6266. int min, max, avg, ret;
  6267. + tuning_pattern = kmalloc(ESDHC_TUNING_BLOCK_PATTERN_LEN, GFP_KERNEL);
  6268. + if (!tuning_pattern)
  6269. + return -ENOMEM;
  6270. +
  6271. + sg_init_one(&sg, tuning_pattern, ESDHC_TUNING_BLOCK_PATTERN_LEN);
  6272. +
  6273. /* find the mininum delay first which can pass tuning */
  6274. min = ESDHC_TUNE_CTRL_MIN;
  6275. while (min < ESDHC_TUNE_CTRL_MAX) {
  6276. esdhc_prepare_tuning(host, min);
  6277. - if (!esdhc_send_tuning_cmd(host, opcode))
  6278. + if (!esdhc_send_tuning_cmd(host, opcode, &sg))
  6279. break;
  6280. min += ESDHC_TUNE_CTRL_STEP;
  6281. }
  6282. @@ -787,7 +786,7 @@
  6283. max = min + ESDHC_TUNE_CTRL_STEP;
  6284. while (max < ESDHC_TUNE_CTRL_MAX) {
  6285. esdhc_prepare_tuning(host, max);
  6286. - if (esdhc_send_tuning_cmd(host, opcode)) {
  6287. + if (esdhc_send_tuning_cmd(host, opcode, &sg)) {
  6288. max -= ESDHC_TUNE_CTRL_STEP;
  6289. break;
  6290. }
  6291. @@ -797,9 +796,11 @@
  6292. /* use average delay to get the best timing */
  6293. avg = (min + max) / 2;
  6294. esdhc_prepare_tuning(host, avg);
  6295. - ret = esdhc_send_tuning_cmd(host, opcode);
  6296. + ret = esdhc_send_tuning_cmd(host, opcode, &sg);
  6297. esdhc_post_tuning(host);
  6298. + kfree(tuning_pattern);
  6299. +
  6300. dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
  6301. ret ? "failed" : "passed", avg, ret);
  6302. @@ -837,28 +838,20 @@
  6303. return pinctrl_select_state(imx_data->pinctrl, pinctrl);
  6304. }
  6305. -static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
  6306. +static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
  6307. {
  6308. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  6309. struct pltfm_imx_data *imx_data = pltfm_host->priv;
  6310. struct esdhc_platform_data *boarddata = &imx_data->boarddata;
  6311. - switch (uhs) {
  6312. + switch (timing) {
  6313. case MMC_TIMING_UHS_SDR12:
  6314. - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR12;
  6315. - break;
  6316. case MMC_TIMING_UHS_SDR25:
  6317. - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR25;
  6318. - break;
  6319. case MMC_TIMING_UHS_SDR50:
  6320. - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR50;
  6321. - break;
  6322. case MMC_TIMING_UHS_SDR104:
  6323. case MMC_TIMING_MMC_HS200:
  6324. - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR104;
  6325. break;
  6326. case MMC_TIMING_UHS_DDR50:
  6327. - imx_data->uhs_mode = SDHCI_CTRL_UHS_DDR50;
  6328. writel(readl(host->ioaddr + ESDHC_MIX_CTRL) |
  6329. ESDHC_MIX_CTRL_DDREN,
  6330. host->ioaddr + ESDHC_MIX_CTRL);
  6331. @@ -875,7 +868,15 @@
  6332. break;
  6333. }
  6334. - return esdhc_change_pinstate(host, uhs);
  6335. + esdhc_change_pinstate(host, timing);
  6336. +}
  6337. +
  6338. +static void esdhc_reset(struct sdhci_host *host, u8 mask)
  6339. +{
  6340. + sdhci_reset(host, mask);
  6341. +
  6342. + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
  6343. + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
  6344. }
  6345. static struct sdhci_ops sdhci_esdhc_ops = {
  6346. @@ -888,8 +889,9 @@
  6347. .get_max_clock = esdhc_pltfm_get_max_clock,
  6348. .get_min_clock = esdhc_pltfm_get_min_clock,
  6349. .get_ro = esdhc_pltfm_get_ro,
  6350. - .platform_bus_width = esdhc_pltfm_bus_width,
  6351. + .set_bus_width = esdhc_pltfm_set_bus_width,
  6352. .set_uhs_signaling = esdhc_set_uhs_signaling,
  6353. + .reset = esdhc_reset,
  6354. };
  6355. static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
  6356. @@ -1170,8 +1172,10 @@
  6357. ret = sdhci_runtime_suspend_host(host);
  6358. - clk_disable_unprepare(imx_data->clk_per);
  6359. - clk_disable_unprepare(imx_data->clk_ipg);
  6360. + if (!sdhci_sdio_irq_enabled(host)) {
  6361. + clk_disable_unprepare(imx_data->clk_per);
  6362. + clk_disable_unprepare(imx_data->clk_ipg);
  6363. + }
  6364. clk_disable_unprepare(imx_data->clk_ahb);
  6365. return ret;
  6366. @@ -1183,8 +1187,10 @@
  6367. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  6368. struct pltfm_imx_data *imx_data = pltfm_host->priv;
  6369. - clk_prepare_enable(imx_data->clk_per);
  6370. - clk_prepare_enable(imx_data->clk_ipg);
  6371. + if (!sdhci_sdio_irq_enabled(host)) {
  6372. + clk_prepare_enable(imx_data->clk_per);
  6373. + clk_prepare_enable(imx_data->clk_ipg);
  6374. + }
  6375. clk_prepare_enable(imx_data->clk_ahb);
  6376. return sdhci_runtime_resume_host(host);
  6377. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci.h linux-3.15-rc6/drivers/mmc/host/sdhci.h
  6378. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci.h 2014-05-21 23:42:02.000000000 +0200
  6379. +++ linux-3.15-rc6/drivers/mmc/host/sdhci.h 2014-05-23 11:26:48.304940032 +0200
  6380. @@ -281,18 +281,14 @@
  6381. unsigned int (*get_max_clock)(struct sdhci_host *host);
  6382. unsigned int (*get_min_clock)(struct sdhci_host *host);
  6383. unsigned int (*get_timeout_clock)(struct sdhci_host *host);
  6384. - int (*platform_bus_width)(struct sdhci_host *host,
  6385. - int width);
  6386. + void (*set_bus_width)(struct sdhci_host *host, int width);
  6387. void (*platform_send_init_74_clocks)(struct sdhci_host *host,
  6388. u8 power_mode);
  6389. unsigned int (*get_ro)(struct sdhci_host *host);
  6390. - void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
  6391. - void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
  6392. + void (*reset)(struct sdhci_host *host, u8 mask);
  6393. int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
  6394. - int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
  6395. + void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
  6396. void (*hw_reset)(struct sdhci_host *host);
  6397. - void (*platform_suspend)(struct sdhci_host *host);
  6398. - void (*platform_resume)(struct sdhci_host *host);
  6399. void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
  6400. void (*platform_init)(struct sdhci_host *host);
  6401. void (*card_event)(struct sdhci_host *host);
  6402. @@ -397,6 +393,16 @@
  6403. extern void sdhci_send_command(struct sdhci_host *host,
  6404. struct mmc_command *cmd);
  6405. +static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
  6406. +{
  6407. + return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED);
  6408. +}
  6409. +
  6410. +void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
  6411. +void sdhci_set_bus_width(struct sdhci_host *host, int width);
  6412. +void sdhci_reset(struct sdhci_host *host, u8 mask);
  6413. +void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
  6414. +
  6415. #ifdef CONFIG_PM
  6416. extern int sdhci_suspend_host(struct sdhci_host *host);
  6417. extern int sdhci_resume_host(struct sdhci_host *host);
  6418. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-of-arasan.c linux-3.15-rc6/drivers/mmc/host/sdhci-of-arasan.c
  6419. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-of-arasan.c 2014-05-21 23:42:02.000000000 +0200
  6420. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-of-arasan.c 2014-05-23 11:26:48.304940032 +0200
  6421. @@ -52,8 +52,12 @@
  6422. }
  6423. static struct sdhci_ops sdhci_arasan_ops = {
  6424. + .set_clock = sdhci_set_clock,
  6425. .get_max_clock = sdhci_pltfm_clk_get_max_clock,
  6426. .get_timeout_clock = sdhci_arasan_get_timeout_clock,
  6427. + .set_bus_width = sdhci_set_bus_width,
  6428. + .reset = sdhci_reset,
  6429. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6430. };
  6431. static struct sdhci_pltfm_data sdhci_arasan_pdata = {
  6432. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-of-esdhc.c linux-3.15-rc6/drivers/mmc/host/sdhci-of-esdhc.c
  6433. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-of-esdhc.c 2014-05-21 23:42:02.000000000 +0200
  6434. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-of-esdhc.c 2014-05-23 11:26:48.304940032 +0200
  6435. @@ -199,13 +199,14 @@
  6436. static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
  6437. {
  6438. -
  6439. int pre_div = 2;
  6440. int div = 1;
  6441. u32 temp;
  6442. + host->mmc->actual_clock = 0;
  6443. +
  6444. if (clock == 0)
  6445. - goto out;
  6446. + return;
  6447. /* Workaround to reduce the clock frequency for p1010 esdhc */
  6448. if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) {
  6449. @@ -238,24 +239,8 @@
  6450. | (pre_div << ESDHC_PREDIV_SHIFT));
  6451. sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
  6452. mdelay(1);
  6453. -out:
  6454. - host->clock = clock;
  6455. }
  6456. -#ifdef CONFIG_PM
  6457. -static u32 esdhc_proctl;
  6458. -static void esdhc_of_suspend(struct sdhci_host *host)
  6459. -{
  6460. - esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
  6461. -}
  6462. -
  6463. -static void esdhc_of_resume(struct sdhci_host *host)
  6464. -{
  6465. - esdhc_of_enable_dma(host);
  6466. - sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
  6467. -}
  6468. -#endif
  6469. -
  6470. static void esdhc_of_platform_init(struct sdhci_host *host)
  6471. {
  6472. u32 vvn;
  6473. @@ -269,7 +254,7 @@
  6474. host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
  6475. }
  6476. -static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width)
  6477. +static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
  6478. {
  6479. u32 ctrl;
  6480. @@ -289,8 +274,6 @@
  6481. clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL,
  6482. ESDHC_CTRL_BUSWIDTH_MASK, ctrl);
  6483. -
  6484. - return 0;
  6485. }
  6486. static const struct sdhci_ops sdhci_esdhc_ops = {
  6487. @@ -305,13 +288,46 @@
  6488. .get_max_clock = esdhc_of_get_max_clock,
  6489. .get_min_clock = esdhc_of_get_min_clock,
  6490. .platform_init = esdhc_of_platform_init,
  6491. -#ifdef CONFIG_PM
  6492. - .platform_suspend = esdhc_of_suspend,
  6493. - .platform_resume = esdhc_of_resume,
  6494. -#endif
  6495. .adma_workaround = esdhci_of_adma_workaround,
  6496. - .platform_bus_width = esdhc_pltfm_bus_width,
  6497. + .set_bus_width = esdhc_pltfm_set_bus_width,
  6498. + .reset = sdhci_reset,
  6499. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6500. +};
  6501. +
  6502. +#ifdef CONFIG_PM
  6503. +
  6504. +static u32 esdhc_proctl;
  6505. +static int esdhc_of_suspend(struct device *dev)
  6506. +{
  6507. + struct sdhci_host *host = dev_get_drvdata(dev);
  6508. +
  6509. + esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
  6510. +
  6511. + return sdhci_suspend_host(host);
  6512. +}
  6513. +
  6514. +static void esdhc_of_resume(device *dev)
  6515. +{
  6516. + struct sdhci_host *host = dev_get_drvdata(dev);
  6517. + int ret = sdhci_resume_host(host);
  6518. +
  6519. + if (ret == 0) {
  6520. + /* Isn't this already done by sdhci_resume_host() ? --rmk */
  6521. + esdhc_of_enable_dma(host);
  6522. + sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
  6523. + }
  6524. +
  6525. + return ret;
  6526. +}
  6527. +
  6528. +static const struct dev_pm_ops esdhc_pmops = {
  6529. + .suspend = esdhci_of_suspend,
  6530. + .resume = esdhci_of_resume,
  6531. };
  6532. +#define ESDHC_PMOPS (&esdhc_pmops)
  6533. +#else
  6534. +#define ESDHC_PMOPS NULL
  6535. +#endif
  6536. static const struct sdhci_pltfm_data sdhci_esdhc_pdata = {
  6537. /*
  6538. @@ -374,7 +390,7 @@
  6539. .name = "sdhci-esdhc",
  6540. .owner = THIS_MODULE,
  6541. .of_match_table = sdhci_esdhc_of_match,
  6542. - .pm = SDHCI_PLTFM_PMOPS,
  6543. + .pm = ESDHC_PMOPS,
  6544. },
  6545. .probe = sdhci_esdhc_probe,
  6546. .remove = sdhci_esdhc_remove,
  6547. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-of-hlwd.c linux-3.15-rc6/drivers/mmc/host/sdhci-of-hlwd.c
  6548. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-of-hlwd.c 2014-05-21 23:42:02.000000000 +0200
  6549. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-of-hlwd.c 2014-05-23 11:26:48.304940032 +0200
  6550. @@ -58,6 +58,10 @@
  6551. .write_l = sdhci_hlwd_writel,
  6552. .write_w = sdhci_hlwd_writew,
  6553. .write_b = sdhci_hlwd_writeb,
  6554. + .set_clock = sdhci_set_clock,
  6555. + .set_bus_width = sdhci_set_bus_width,
  6556. + .reset = sdhci_reset,
  6557. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6558. };
  6559. static const struct sdhci_pltfm_data sdhci_hlwd_pdata = {
  6560. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-pci.c linux-3.15-rc6/drivers/mmc/host/sdhci-pci.c
  6561. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-pci.c 2014-05-21 23:42:02.000000000 +0200
  6562. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-pci.c 2014-05-23 11:26:48.304940032 +0200
  6563. @@ -1031,7 +1031,7 @@
  6564. return 0;
  6565. }
  6566. -static int sdhci_pci_bus_width(struct sdhci_host *host, int width)
  6567. +static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width)
  6568. {
  6569. u8 ctrl;
  6570. @@ -1052,8 +1052,6 @@
  6571. }
  6572. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  6573. -
  6574. - return 0;
  6575. }
  6576. static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host)
  6577. @@ -1080,8 +1078,11 @@
  6578. }
  6579. static const struct sdhci_ops sdhci_pci_ops = {
  6580. + .set_clock = sdhci_set_clock,
  6581. .enable_dma = sdhci_pci_enable_dma,
  6582. - .platform_bus_width = sdhci_pci_bus_width,
  6583. + .set_bus_width = sdhci_pci_set_bus_width,
  6584. + .reset = sdhci_reset,
  6585. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6586. .hw_reset = sdhci_pci_hw_reset,
  6587. };
  6588. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-pltfm.c linux-3.15-rc6/drivers/mmc/host/sdhci-pltfm.c
  6589. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-pltfm.c 2014-05-21 23:42:02.000000000 +0200
  6590. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-pltfm.c 2014-05-23 11:26:48.308940045 +0200
  6591. @@ -45,6 +45,10 @@
  6592. EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock);
  6593. static const struct sdhci_ops sdhci_pltfm_ops = {
  6594. + .set_clock = sdhci_set_clock,
  6595. + .set_bus_width = sdhci_set_bus_width,
  6596. + .reset = sdhci_reset,
  6597. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6598. };
  6599. #ifdef CONFIG_OF
  6600. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-pxav2.c linux-3.15-rc6/drivers/mmc/host/sdhci-pxav2.c
  6601. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-pxav2.c 2014-05-21 23:42:02.000000000 +0200
  6602. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-pxav2.c 2014-05-23 11:26:48.308940045 +0200
  6603. @@ -51,11 +51,13 @@
  6604. #define MMC_CARD 0x1000
  6605. #define MMC_WIDTH 0x0100
  6606. -static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask)
  6607. +static void pxav2_reset(struct sdhci_host *host, u8 mask)
  6608. {
  6609. struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
  6610. struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
  6611. + sdhci_reset(host, mask);
  6612. +
  6613. if (mask == SDHCI_RESET_ALL) {
  6614. u16 tmp = 0;
  6615. @@ -88,7 +90,7 @@
  6616. }
  6617. }
  6618. -static int pxav2_mmc_set_width(struct sdhci_host *host, int width)
  6619. +static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width)
  6620. {
  6621. u8 ctrl;
  6622. u16 tmp;
  6623. @@ -107,14 +109,14 @@
  6624. }
  6625. writew(tmp, host->ioaddr + SD_CE_ATA_2);
  6626. writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
  6627. -
  6628. - return 0;
  6629. }
  6630. static const struct sdhci_ops pxav2_sdhci_ops = {
  6631. + .set_clock = sdhci_set_clock,
  6632. .get_max_clock = sdhci_pltfm_clk_get_max_clock,
  6633. - .platform_reset_exit = pxav2_set_private_registers,
  6634. - .platform_bus_width = pxav2_mmc_set_width,
  6635. + .set_bus_width = pxav2_mmc_set_bus_width,
  6636. + .reset = pxav2_reset,
  6637. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6638. };
  6639. #ifdef CONFIG_OF
  6640. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-pxav3.c linux-3.15-rc6/drivers/mmc/host/sdhci-pxav3.c
  6641. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-pxav3.c 2014-05-21 23:42:02.000000000 +0200
  6642. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-pxav3.c 2014-05-23 11:26:48.308940045 +0200
  6643. @@ -112,11 +112,13 @@
  6644. return 0;
  6645. }
  6646. -static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask)
  6647. +static void pxav3_reset(struct sdhci_host *host, u8 mask)
  6648. {
  6649. struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
  6650. struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
  6651. + sdhci_reset(host, mask);
  6652. +
  6653. if (mask == SDHCI_RESET_ALL) {
  6654. /*
  6655. * tune timing of read data/command when crc error happen
  6656. @@ -184,7 +186,7 @@
  6657. pxa->power_mode = power_mode;
  6658. }
  6659. -static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
  6660. +static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
  6661. {
  6662. u16 ctrl_2;
  6663. @@ -218,15 +220,16 @@
  6664. dev_dbg(mmc_dev(host->mmc),
  6665. "%s uhs = %d, ctrl_2 = %04X\n",
  6666. __func__, uhs, ctrl_2);
  6667. -
  6668. - return 0;
  6669. }
  6670. static const struct sdhci_ops pxav3_sdhci_ops = {
  6671. - .platform_reset_exit = pxav3_set_private_registers,
  6672. + .set_clock = sdhci_set_clock,
  6673. .set_uhs_signaling = pxav3_set_uhs_signaling,
  6674. .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
  6675. .get_max_clock = sdhci_pltfm_clk_get_max_clock,
  6676. + .set_bus_width = sdhci_set_bus_width,
  6677. + .reset = pxav3_reset,
  6678. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6679. };
  6680. static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
  6681. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-s3c.c linux-3.15-rc6/drivers/mmc/host/sdhci-s3c.c
  6682. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-s3c.c 2014-05-21 23:42:02.000000000 +0200
  6683. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-s3c.c 2014-05-23 11:26:48.308940045 +0200
  6684. @@ -58,6 +58,8 @@
  6685. struct clk *clk_io;
  6686. struct clk *clk_bus[MAX_BUS_CLK];
  6687. unsigned long clk_rates[MAX_BUS_CLK];
  6688. +
  6689. + bool no_divider;
  6690. };
  6691. /**
  6692. @@ -70,6 +72,7 @@
  6693. */
  6694. struct sdhci_s3c_drv_data {
  6695. unsigned int sdhci_quirks;
  6696. + bool no_divider;
  6697. };
  6698. static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
  6699. @@ -119,7 +122,7 @@
  6700. * If controller uses a non-standard clock division, find the best clock
  6701. * speed possible with selected clock source and skip the division.
  6702. */
  6703. - if (ourhost->host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
  6704. + if (ourhost->no_divider) {
  6705. rate = clk_round_rate(clksrc, wanted);
  6706. return wanted - rate;
  6707. }
  6708. @@ -161,9 +164,13 @@
  6709. int src;
  6710. u32 ctrl;
  6711. + host->mmc->actual_clock = 0;
  6712. +
  6713. /* don't bother if the clock is going off. */
  6714. - if (clock == 0)
  6715. + if (clock == 0) {
  6716. + sdhci_set_clock(host, clock);
  6717. return;
  6718. + }
  6719. for (src = 0; src < MAX_BUS_CLK; src++) {
  6720. delta = sdhci_s3c_consider_clock(ourhost, src, clock);
  6721. @@ -215,6 +222,8 @@
  6722. if (clock < 25 * 1000000)
  6723. ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2);
  6724. writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3);
  6725. +
  6726. + sdhci_set_clock(host, clock);
  6727. }
  6728. /**
  6729. @@ -295,10 +304,11 @@
  6730. unsigned long timeout;
  6731. u16 clk = 0;
  6732. + host->mmc->actual_clock = 0;
  6733. +
  6734. /* If the clock is going off, set to 0 at clock control register */
  6735. if (clock == 0) {
  6736. sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
  6737. - host->clock = clock;
  6738. return;
  6739. }
  6740. @@ -306,8 +316,6 @@
  6741. clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
  6742. - host->clock = clock;
  6743. -
  6744. clk = SDHCI_CLOCK_INT_EN;
  6745. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  6746. @@ -329,14 +337,14 @@
  6747. }
  6748. /**
  6749. - * sdhci_s3c_platform_bus_width - support 8bit buswidth
  6750. + * sdhci_s3c_set_bus_width - support 8bit buswidth
  6751. * @host: The SDHCI host being queried
  6752. * @width: MMC_BUS_WIDTH_ macro for the bus width being requested
  6753. *
  6754. * We have 8-bit width support but is not a v3 controller.
  6755. * So we add platform_bus_width() and support 8bit width.
  6756. */
  6757. -static int sdhci_s3c_platform_bus_width(struct sdhci_host *host, int width)
  6758. +static void sdhci_s3c_set_bus_width(struct sdhci_host *host, int width)
  6759. {
  6760. u8 ctrl;
  6761. @@ -358,15 +366,15 @@
  6762. }
  6763. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  6764. -
  6765. - return 0;
  6766. }
  6767. static struct sdhci_ops sdhci_s3c_ops = {
  6768. .get_max_clock = sdhci_s3c_get_max_clk,
  6769. .set_clock = sdhci_s3c_set_clock,
  6770. .get_min_clock = sdhci_s3c_get_min_clock,
  6771. - .platform_bus_width = sdhci_s3c_platform_bus_width,
  6772. + .set_bus_width = sdhci_s3c_set_bus_width,
  6773. + .reset = sdhci_reset,
  6774. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6775. };
  6776. static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
  6777. @@ -606,8 +614,10 @@
  6778. /* Setup quirks for the controller */
  6779. host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
  6780. host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
  6781. - if (drv_data)
  6782. + if (drv_data) {
  6783. host->quirks |= drv_data->sdhci_quirks;
  6784. + sc->no_divider = drv_data->no_divider;
  6785. + }
  6786. #ifndef CONFIG_MMC_SDHCI_S3C_DMA
  6787. @@ -656,7 +666,7 @@
  6788. * If controller does not have internal clock divider,
  6789. * we can use overriding functions instead of default.
  6790. */
  6791. - if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
  6792. + if (sc->no_divider) {
  6793. sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
  6794. sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
  6795. sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
  6796. @@ -797,7 +807,7 @@
  6797. #if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
  6798. static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
  6799. - .sdhci_quirks = SDHCI_QUIRK_NONSTANDARD_CLOCK,
  6800. + .no_divider = true,
  6801. };
  6802. #define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)
  6803. #else
  6804. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-sirf.c linux-3.15-rc6/drivers/mmc/host/sdhci-sirf.c
  6805. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-sirf.c 2014-05-21 23:42:02.000000000 +0200
  6806. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-sirf.c 2014-05-23 11:26:48.308940045 +0200
  6807. @@ -28,7 +28,11 @@
  6808. }
  6809. static struct sdhci_ops sdhci_sirf_ops = {
  6810. + .set_clock = sdhci_set_clock,
  6811. .get_max_clock = sdhci_sirf_get_max_clk,
  6812. + .set_bus_width = sdhci_set_bus_width,
  6813. + .reset = sdhci_reset,
  6814. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6815. };
  6816. static struct sdhci_pltfm_data sdhci_sirf_pdata = {
  6817. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-spear.c linux-3.15-rc6/drivers/mmc/host/sdhci-spear.c
  6818. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-spear.c 2014-05-21 23:42:02.000000000 +0200
  6819. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-spear.c 2014-05-23 11:26:48.308940045 +0200
  6820. @@ -38,7 +38,10 @@
  6821. /* sdhci ops */
  6822. static const struct sdhci_ops sdhci_pltfm_ops = {
  6823. - /* Nothing to do for now. */
  6824. + .set_clock = sdhci_set_clock,
  6825. + .set_bus_width = sdhci_set_bus_width,
  6826. + .reset = sdhci_reset,
  6827. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6828. };
  6829. #ifdef CONFIG_OF
  6830. diff -Nur linux-3.15-rc6.orig/drivers/mmc/host/sdhci-tegra.c linux-3.15-rc6/drivers/mmc/host/sdhci-tegra.c
  6831. --- linux-3.15-rc6.orig/drivers/mmc/host/sdhci-tegra.c 2014-05-21 23:42:02.000000000 +0200
  6832. +++ linux-3.15-rc6/drivers/mmc/host/sdhci-tegra.c 2014-05-23 11:26:48.308940045 +0200
  6833. @@ -48,19 +48,6 @@
  6834. int power_gpio;
  6835. };
  6836. -static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
  6837. -{
  6838. - u32 val;
  6839. -
  6840. - if (unlikely(reg == SDHCI_PRESENT_STATE)) {
  6841. - /* Use wp_gpio here instead? */
  6842. - val = readl(host->ioaddr + reg);
  6843. - return val | SDHCI_WRITE_PROTECT;
  6844. - }
  6845. -
  6846. - return readl(host->ioaddr + reg);
  6847. -}
  6848. -
  6849. static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
  6850. {
  6851. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  6852. @@ -108,12 +95,14 @@
  6853. return mmc_gpio_get_ro(host->mmc);
  6854. }
  6855. -static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
  6856. +static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
  6857. {
  6858. struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
  6859. struct sdhci_tegra *tegra_host = pltfm_host->priv;
  6860. const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
  6861. + sdhci_reset(host, mask);
  6862. +
  6863. if (!(mask & SDHCI_RESET_ALL))
  6864. return;
  6865. @@ -127,7 +116,7 @@
  6866. }
  6867. }
  6868. -static int tegra_sdhci_buswidth(struct sdhci_host *host, int bus_width)
  6869. +static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width)
  6870. {
  6871. u32 ctrl;
  6872. @@ -144,16 +133,16 @@
  6873. ctrl &= ~SDHCI_CTRL_4BITBUS;
  6874. }
  6875. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  6876. - return 0;
  6877. }
  6878. static const struct sdhci_ops tegra_sdhci_ops = {
  6879. .get_ro = tegra_sdhci_get_ro,
  6880. - .read_l = tegra_sdhci_readl,
  6881. .read_w = tegra_sdhci_readw,
  6882. .write_l = tegra_sdhci_writel,
  6883. - .platform_bus_width = tegra_sdhci_buswidth,
  6884. - .platform_reset_exit = tegra_sdhci_reset_exit,
  6885. + .set_clock = sdhci_set_clock,
  6886. + .set_bus_width = tegra_sdhci_set_bus_width,
  6887. + .reset = tegra_sdhci_reset,
  6888. + .set_uhs_signaling = sdhci_set_uhs_signaling,
  6889. };
  6890. static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
  6891. diff -Nur linux-3.15-rc6.orig/drivers/net/ethernet/freescale/fec.h linux-3.15-rc6/drivers/net/ethernet/freescale/fec.h
  6892. --- linux-3.15-rc6.orig/drivers/net/ethernet/freescale/fec.h 2014-05-21 23:42:02.000000000 +0200
  6893. +++ linux-3.15-rc6/drivers/net/ethernet/freescale/fec.h 2014-05-23 11:26:48.308940045 +0200
  6894. @@ -14,6 +14,7 @@
  6895. /****************************************************************************/
  6896. #include <linux/clocksource.h>
  6897. +#include <linux/mutex.h>
  6898. #include <linux/net_tstamp.h>
  6899. #include <linux/ptp_clock_kernel.h>
  6900. @@ -170,6 +171,11 @@
  6901. unsigned short res0[4];
  6902. };
  6903. +union bufdesc_u {
  6904. + struct bufdesc bd;
  6905. + struct bufdesc_ex ebd;
  6906. +};
  6907. +
  6908. /*
  6909. * The following definitions courtesy of commproc.h, which where
  6910. * Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
  6911. @@ -202,6 +208,7 @@
  6912. #define BD_ENET_RX_OV ((ushort)0x0002)
  6913. #define BD_ENET_RX_CL ((ushort)0x0001)
  6914. #define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */
  6915. +#define BD_ENET_RX_ERROR ((ushort)0x003f)
  6916. /* Enhanced buffer descriptor control/status used by Ethernet receive */
  6917. #define BD_ENET_RX_VLAN 0x00000004
  6918. @@ -224,10 +231,17 @@
  6919. #define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
  6920. /*enhanced buffer descriptor control/status used by Ethernet transmit*/
  6921. -#define BD_ENET_TX_INT 0x40000000
  6922. -#define BD_ENET_TX_TS 0x20000000
  6923. -#define BD_ENET_TX_PINS 0x10000000
  6924. -#define BD_ENET_TX_IINS 0x08000000
  6925. +#define BD_ENET_TX_INT BIT(30)
  6926. +#define BD_ENET_TX_TS BIT(29)
  6927. +#define BD_ENET_TX_PINS BIT(28)
  6928. +#define BD_ENET_TX_IINS BIT(27)
  6929. +#define BD_ENET_TX_TXE BIT(15)
  6930. +#define BD_ENET_TX_UE BIT(13)
  6931. +#define BD_ENET_TX_EE BIT(12)
  6932. +#define BD_ENET_TX_FE BIT(11)
  6933. +#define BD_ENET_TX_LCE BIT(10)
  6934. +#define BD_ENET_TX_OE BIT(9)
  6935. +#define BD_ENET_TX_TSE BIT(8)
  6936. /* This device has up to three irqs on some platforms */
  6937. @@ -240,28 +254,20 @@
  6938. * the skbuffer directly.
  6939. */
  6940. -#define FEC_ENET_RX_PAGES 8
  6941. +#define FEC_ENET_RX_PAGES 64
  6942. #define FEC_ENET_RX_FRSIZE 2048
  6943. #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
  6944. #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
  6945. #define FEC_ENET_TX_FRSIZE 2048
  6946. #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
  6947. -#define TX_RING_SIZE 16 /* Must be power of two */
  6948. -#define TX_RING_MOD_MASK 15 /* for this to work */
  6949. +#define TX_RING_SIZE 128 /* Must be power of two */
  6950. #define BD_ENET_RX_INT 0x00800000
  6951. #define BD_ENET_RX_PTP ((ushort)0x0400)
  6952. #define BD_ENET_RX_ICE 0x00000020
  6953. #define BD_ENET_RX_PCR 0x00000010
  6954. -#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
  6955. #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
  6956. -struct fec_enet_delayed_work {
  6957. - struct delayed_work delay_work;
  6958. - bool timeout;
  6959. - bool trig_tx;
  6960. -};
  6961. -
  6962. /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
  6963. * tx_bd_base always point to the base of the buffer descriptors. The
  6964. * cur_rx and cur_tx point to the currently available buffer.
  6965. @@ -281,27 +287,33 @@
  6966. struct clk *clk_enet_out;
  6967. struct clk *clk_ptp;
  6968. + unsigned char tx_page_map[TX_RING_SIZE];
  6969. /* The saved address of a sent-in-place packet/buffer, for skfree(). */
  6970. unsigned char *tx_bounce[TX_RING_SIZE];
  6971. struct sk_buff *tx_skbuff[TX_RING_SIZE];
  6972. struct sk_buff *rx_skbuff[RX_RING_SIZE];
  6973. /* CPM dual port RAM relative addresses */
  6974. - dma_addr_t bd_dma;
  6975. + dma_addr_t rx_bd_dma;
  6976. + dma_addr_t tx_bd_dma;
  6977. /* Address of Rx and Tx buffers */
  6978. - struct bufdesc *rx_bd_base;
  6979. - struct bufdesc *tx_bd_base;
  6980. + union bufdesc_u *rx_bd_base;
  6981. + union bufdesc_u *tx_bd_base;
  6982. /* The next free ring entry */
  6983. - struct bufdesc *cur_rx, *cur_tx;
  6984. - /* The ring entries to be free()ed */
  6985. - struct bufdesc *dirty_tx;
  6986. + unsigned short tx_next;
  6987. + unsigned short tx_dirty;
  6988. + unsigned short tx_min;
  6989. + unsigned short rx_next;
  6990. unsigned short tx_ring_size;
  6991. unsigned short rx_ring_size;
  6992. + unsigned char flags;
  6993. +
  6994. + struct mutex mutex;
  6995. +
  6996. struct platform_device *pdev;
  6997. - int opened;
  6998. int dev_id;
  6999. /* Phylib and MDIO interface */
  7000. @@ -315,11 +327,12 @@
  7001. int speed;
  7002. struct completion mdio_done;
  7003. int irq[FEC_IRQ_NUM];
  7004. - int bufdesc_ex;
  7005. - int pause_flag;
  7006. + unsigned short pause_flag;
  7007. + unsigned short pause_mode;
  7008. struct napi_struct napi;
  7009. - int csum_flags;
  7010. +
  7011. + struct work_struct tx_timeout_work;
  7012. struct ptp_clock *ptp_clock;
  7013. struct ptp_clock_info ptp_caps;
  7014. @@ -333,8 +346,8 @@
  7015. int hwts_rx_en;
  7016. int hwts_tx_en;
  7017. struct timer_list time_keep;
  7018. - struct fec_enet_delayed_work delay_work;
  7019. struct regulator *reg_phy;
  7020. + unsigned long quirks;
  7021. };
  7022. void fec_ptp_init(struct platform_device *pdev);
  7023. diff -Nur linux-3.15-rc6.orig/drivers/net/ethernet/freescale/fec_main.c linux-3.15-rc6/drivers/net/ethernet/freescale/fec_main.c
  7024. --- linux-3.15-rc6.orig/drivers/net/ethernet/freescale/fec_main.c 2014-05-21 23:42:02.000000000 +0200
  7025. +++ linux-3.15-rc6/drivers/net/ethernet/freescale/fec_main.c 2014-05-23 11:26:48.312940058 +0200
  7026. @@ -33,12 +33,6 @@
  7027. #include <linux/netdevice.h>
  7028. #include <linux/etherdevice.h>
  7029. #include <linux/skbuff.h>
  7030. -#include <linux/in.h>
  7031. -#include <linux/ip.h>
  7032. -#include <net/ip.h>
  7033. -#include <linux/tcp.h>
  7034. -#include <linux/udp.h>
  7035. -#include <linux/icmp.h>
  7036. #include <linux/spinlock.h>
  7037. #include <linux/workqueue.h>
  7038. #include <linux/bitops.h>
  7039. @@ -91,16 +85,8 @@
  7040. #define FEC_QUIRK_HAS_CSUM (1 << 5)
  7041. /* Controller has hardware vlan support */
  7042. #define FEC_QUIRK_HAS_VLAN (1 << 6)
  7043. -/* ENET IP errata ERR006358
  7044. - *
  7045. - * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
  7046. - * detected as not set during a prior frame transmission, then the
  7047. - * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
  7048. - * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
  7049. - * frames not being transmitted until there is a 0-to-1 transition on
  7050. - * ENET_TDAR[TDAR].
  7051. - */
  7052. -#define FEC_QUIRK_ERR006358 (1 << 7)
  7053. +/* Controller has ability to offset rx packets */
  7054. +#define FEC_QUIRK_RX_SHIFT16 (1 << 8)
  7055. static struct platform_device_id fec_devtype[] = {
  7056. {
  7057. @@ -120,7 +106,7 @@
  7058. .name = "imx6q-fec",
  7059. .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
  7060. FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
  7061. - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
  7062. + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_RX_SHIFT16,
  7063. }, {
  7064. .name = "mvf600-fec",
  7065. .driver_data = FEC_QUIRK_ENET_MAC,
  7066. @@ -172,9 +158,15 @@
  7067. #endif
  7068. #endif /* CONFIG_M5272 */
  7069. -#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
  7070. -#error "FEC: descriptor ring size constants too large"
  7071. +#if RX_RING_SIZE * 32 > PAGE_SIZE
  7072. +#error "FEC: receive descriptor ring size too large"
  7073. #endif
  7074. +#if TX_RING_SIZE * 32 > PAGE_SIZE
  7075. +#error "FEC: transmit descriptor ring size too large"
  7076. +#endif
  7077. +
  7078. +/* Minimum TX ring size when using NETIF_F_SG */
  7079. +#define TX_RING_SIZE_MIN_SG (2 * (MAX_SKB_FRAGS + 1))
  7080. /* Interrupt events/masks. */
  7081. #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
  7082. @@ -200,6 +192,7 @@
  7083. /* FEC receive acceleration */
  7084. #define FEC_RACC_IPDIS (1 << 1)
  7085. #define FEC_RACC_PRODIS (1 << 2)
  7086. +#define FEC_RACC_SHIFT16 BIT(7)
  7087. #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
  7088. /*
  7089. @@ -228,62 +221,60 @@
  7090. /* Transmitter timeout */
  7091. #define TX_TIMEOUT (2 * HZ)
  7092. -#define FEC_PAUSE_FLAG_AUTONEG 0x1
  7093. -#define FEC_PAUSE_FLAG_ENABLE 0x2
  7094. +/* pause mode/flag */
  7095. +#define FEC_PAUSE_FLAG_AUTONEG BIT(0)
  7096. +#define FEC_PAUSE_FLAG_RX BIT(1)
  7097. +#define FEC_PAUSE_FLAG_TX BIT(2)
  7098. +
  7099. +/* flags */
  7100. +#define FEC_FLAG_BUFDESC_EX BIT(0)
  7101. +#define FEC_FLAG_RX_CSUM BIT(1)
  7102. +#define FEC_FLAG_RX_VLAN BIT(2)
  7103. static int mii_cnt;
  7104. -static inline
  7105. -struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
  7106. +static unsigned copybreak = 200;
  7107. +module_param(copybreak, uint, 0644);
  7108. +MODULE_PARM_DESC(copybreak,
  7109. + "Maximum size of packet that is copied to a new buffer on receive");
  7110. +
  7111. +static bool fec_enet_rx_zerocopy(struct fec_enet_private *fep, unsigned pktlen)
  7112. {
  7113. - struct bufdesc *new_bd = bdp + 1;
  7114. - struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
  7115. - struct bufdesc_ex *ex_base;
  7116. - struct bufdesc *base;
  7117. - int ring_size;
  7118. -
  7119. - if (bdp >= fep->tx_bd_base) {
  7120. - base = fep->tx_bd_base;
  7121. - ring_size = fep->tx_ring_size;
  7122. - ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
  7123. - } else {
  7124. - base = fep->rx_bd_base;
  7125. - ring_size = fep->rx_ring_size;
  7126. - ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
  7127. - }
  7128. +#ifndef CONFIG_M5272
  7129. + if (fep->quirks & FEC_QUIRK_RX_SHIFT16 && pktlen >= copybreak)
  7130. + return true;
  7131. +#endif
  7132. + return false;
  7133. +}
  7134. - if (fep->bufdesc_ex)
  7135. - return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
  7136. - ex_base : ex_new_bd);
  7137. +static union bufdesc_u *
  7138. +fec_enet_tx_get(unsigned index, struct fec_enet_private *fep)
  7139. +{
  7140. + union bufdesc_u *base = fep->tx_bd_base;
  7141. + union bufdesc_u *bdp;
  7142. +
  7143. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  7144. + bdp = (union bufdesc_u *)(&base->ebd + index);
  7145. else
  7146. - return (new_bd >= (base + ring_size)) ?
  7147. - base : new_bd;
  7148. + bdp = (union bufdesc_u *)(&base->bd + index);
  7149. +
  7150. + return bdp;
  7151. }
  7152. -static inline
  7153. -struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
  7154. +static union bufdesc_u *
  7155. +fec_enet_rx_get(unsigned index, struct fec_enet_private *fep)
  7156. {
  7157. - struct bufdesc *new_bd = bdp - 1;
  7158. - struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
  7159. - struct bufdesc_ex *ex_base;
  7160. - struct bufdesc *base;
  7161. - int ring_size;
  7162. -
  7163. - if (bdp >= fep->tx_bd_base) {
  7164. - base = fep->tx_bd_base;
  7165. - ring_size = fep->tx_ring_size;
  7166. - ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
  7167. - } else {
  7168. - base = fep->rx_bd_base;
  7169. - ring_size = fep->rx_ring_size;
  7170. - ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
  7171. - }
  7172. + union bufdesc_u *base = fep->rx_bd_base;
  7173. + union bufdesc_u *bdp;
  7174. +
  7175. + index &= fep->rx_ring_size - 1;
  7176. - if (fep->bufdesc_ex)
  7177. - return (struct bufdesc *)((ex_new_bd < ex_base) ?
  7178. - (ex_new_bd + ring_size) : ex_new_bd);
  7179. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  7180. + bdp = (union bufdesc_u *)(&base->ebd + index);
  7181. else
  7182. - return (new_bd < base) ? (new_bd + ring_size) : new_bd;
  7183. + bdp = (union bufdesc_u *)(&base->bd + index);
  7184. +
  7185. + return bdp;
  7186. }
  7187. static void *swap_buffer(void *bufaddr, int len)
  7188. @@ -297,13 +288,47 @@
  7189. return bufaddr;
  7190. }
  7191. +static void fec_dump(struct net_device *ndev)
  7192. +{
  7193. + struct fec_enet_private *fep = netdev_priv(ndev);
  7194. + union bufdesc_u *bdp;
  7195. + unsigned index = 0;
  7196. +
  7197. + netdev_info(ndev, "TX ring dump\n");
  7198. + pr_info("Nr SC addr len SKB\n");
  7199. +
  7200. + for (index = 0; index < fep->tx_ring_size; index++) {
  7201. + bdp = fec_enet_tx_get(index, fep);
  7202. +
  7203. + pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p",
  7204. + index,
  7205. + index == fep->tx_next ? 'S' : ' ',
  7206. + index == fep->tx_dirty ? 'H' : ' ',
  7207. + bdp->bd.cbd_sc, bdp->bd.cbd_bufaddr,
  7208. + bdp->bd.cbd_datlen,
  7209. + fep->tx_skbuff[index]);
  7210. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  7211. + pr_cont(" %08lx", bdp->ebd.cbd_esc);
  7212. + pr_cont("\n");
  7213. + }
  7214. +}
  7215. +
  7216. static int
  7217. fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
  7218. {
  7219. + int csum_start;
  7220. +
  7221. /* Only run for packets requiring a checksum. */
  7222. if (skb->ip_summed != CHECKSUM_PARTIAL)
  7223. return 0;
  7224. + csum_start = skb_checksum_start_offset(skb);
  7225. + if (csum_start + skb->csum_offset > skb_headlen(skb)) {
  7226. + netdev_err(ndev, "checksum outside skb head: headlen %u start %u offset %u\n",
  7227. + skb_headlen(skb), csum_start, skb->csum_offset);
  7228. + return -1;
  7229. + }
  7230. +
  7231. if (unlikely(skb_cow_head(skb, 0)))
  7232. return -1;
  7233. @@ -312,23 +337,56 @@
  7234. return 0;
  7235. }
  7236. +static void
  7237. +fec_enet_tx_unmap(unsigned index, union bufdesc_u *bdp, struct fec_enet_private *fep)
  7238. +{
  7239. + dma_addr_t addr = bdp->bd.cbd_bufaddr;
  7240. + unsigned length = bdp->bd.cbd_datlen;
  7241. +
  7242. + bdp->bd.cbd_bufaddr = 0;
  7243. +
  7244. + if (fep->tx_page_map[index])
  7245. + dma_unmap_page(&fep->pdev->dev, addr, length, DMA_TO_DEVICE);
  7246. + else
  7247. + dma_unmap_single(&fep->pdev->dev, addr, length, DMA_TO_DEVICE);
  7248. +}
  7249. +
  7250. +static void
  7251. +fec_enet_tx_unmap_range(unsigned index, unsigned last, struct fec_enet_private *fep)
  7252. +{
  7253. + union bufdesc_u *bdp;
  7254. +
  7255. + do {
  7256. + if (last == 0)
  7257. + last = fep->tx_ring_size;
  7258. + last--;
  7259. +
  7260. + bdp = fec_enet_tx_get(last, fep);
  7261. + fec_enet_tx_unmap(last, bdp, fep);
  7262. + } while (index != last);
  7263. +}
  7264. +
  7265. +static unsigned ring_free(unsigned ins, unsigned rem, unsigned size)
  7266. +{
  7267. + int num = rem - ins;
  7268. + return num < 0 ? num + size : num;
  7269. +}
  7270. +
  7271. static netdev_tx_t
  7272. fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  7273. {
  7274. struct fec_enet_private *fep = netdev_priv(ndev);
  7275. - const struct platform_device_id *id_entry =
  7276. - platform_get_device_id(fep->pdev);
  7277. - struct bufdesc *bdp, *bdp_pre;
  7278. + union bufdesc_u *bdp;
  7279. void *bufaddr;
  7280. unsigned short status;
  7281. - unsigned int index;
  7282. + unsigned index, last, length, cbd_esc;
  7283. + int f, nr_frags = skb_shinfo(skb)->nr_frags;
  7284. + dma_addr_t addr;
  7285. /* Fill in a Tx ring entry */
  7286. - bdp = fep->cur_tx;
  7287. -
  7288. - status = bdp->cbd_sc;
  7289. + index = fep->tx_next;
  7290. - if (status & BD_ENET_TX_READY) {
  7291. + if (ring_free(index, fep->tx_dirty, fep->tx_ring_size) < 1 + nr_frags) {
  7292. /* Ooops. All transmit buffers are full. Bail out.
  7293. * This should not happen, since ndev->tbusy should be set.
  7294. */
  7295. @@ -342,26 +400,17 @@
  7296. return NETDEV_TX_OK;
  7297. }
  7298. - /* Clear all of the status flags */
  7299. - status &= ~BD_ENET_TX_STATS;
  7300. -
  7301. /* Set buffer length and buffer pointer */
  7302. bufaddr = skb->data;
  7303. - bdp->cbd_datlen = skb->len;
  7304. + length = skb_headlen(skb);
  7305. /*
  7306. * On some FEC implementations data must be aligned on
  7307. * 4-byte boundaries. Use bounce buffers to copy data
  7308. * and get it aligned. Ugh.
  7309. */
  7310. - if (fep->bufdesc_ex)
  7311. - index = (struct bufdesc_ex *)bdp -
  7312. - (struct bufdesc_ex *)fep->tx_bd_base;
  7313. - else
  7314. - index = bdp - fep->tx_bd_base;
  7315. -
  7316. if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
  7317. - memcpy(fep->tx_bounce[index], skb->data, skb->len);
  7318. + memcpy(fep->tx_bounce[index], skb->data, length);
  7319. bufaddr = fep->tx_bounce[index];
  7320. }
  7321. @@ -370,75 +419,127 @@
  7322. * the system that it's running on. As the result, driver has to
  7323. * swap every frame going to and coming from the controller.
  7324. */
  7325. - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
  7326. - swap_buffer(bufaddr, skb->len);
  7327. + if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
  7328. + swap_buffer(bufaddr, length);
  7329. - /* Save skb pointer */
  7330. - fep->tx_skbuff[index] = skb;
  7331. + /* Push the data cache so the CPM does not get stale memory data. */
  7332. + addr = dma_map_single(&fep->pdev->dev, bufaddr, length, DMA_TO_DEVICE);
  7333. + if (dma_mapping_error(&fep->pdev->dev, addr))
  7334. + goto release;
  7335. +
  7336. + bdp = fec_enet_tx_get(index, fep);
  7337. + bdp->bd.cbd_datlen = length;
  7338. + bdp->bd.cbd_bufaddr = addr;
  7339. - /* Push the data cache so the CPM does not get stale memory
  7340. - * data.
  7341. - */
  7342. - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
  7343. - skb->len, DMA_TO_DEVICE);
  7344. - if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
  7345. - bdp->cbd_bufaddr = 0;
  7346. - fep->tx_skbuff[index] = NULL;
  7347. - dev_kfree_skb_any(skb);
  7348. - if (net_ratelimit())
  7349. - netdev_err(ndev, "Tx DMA memory map failed\n");
  7350. - return NETDEV_TX_OK;
  7351. - }
  7352. + fep->tx_page_map[index] = 0;
  7353. - if (fep->bufdesc_ex) {
  7354. -
  7355. - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
  7356. - ebdp->cbd_bdu = 0;
  7357. + cbd_esc = BD_ENET_TX_INT;
  7358. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  7359. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
  7360. fep->hwts_tx_en)) {
  7361. - ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
  7362. + cbd_esc |= BD_ENET_TX_TS;
  7363. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  7364. } else {
  7365. - ebdp->cbd_esc = BD_ENET_TX_INT;
  7366. -
  7367. /* Enable protocol checksum flags
  7368. * We do not bother with the IP Checksum bits as they
  7369. * are done by the kernel
  7370. */
  7371. if (skb->ip_summed == CHECKSUM_PARTIAL)
  7372. - ebdp->cbd_esc |= BD_ENET_TX_PINS;
  7373. + cbd_esc |= BD_ENET_TX_PINS;
  7374. + }
  7375. + bdp->ebd.cbd_bdu = 0;
  7376. + bdp->ebd.cbd_esc = cbd_esc;
  7377. + }
  7378. +
  7379. + for (last = index, f = 0; f < nr_frags; f++) {
  7380. + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
  7381. +
  7382. + if (++last >= fep->tx_ring_size)
  7383. + last = 0;
  7384. +
  7385. + length = skb_frag_size(frag);
  7386. +
  7387. + /* If the alignment is unsuitable, we need to bounce. */
  7388. + if (frag->page_offset & FEC_ALIGNMENT) {
  7389. + unsigned char *bounce = fep->tx_bounce[last];
  7390. +
  7391. + /* FIXME: highdma? */
  7392. + memcpy(bounce, skb_frag_address(frag), length);
  7393. +
  7394. + addr = dma_map_single(&fep->pdev->dev, bounce,
  7395. + length, DMA_TO_DEVICE);
  7396. + fep->tx_page_map[last] = 0;
  7397. + } else {
  7398. + addr = skb_frag_dma_map(&fep->pdev->dev, frag, 0,
  7399. + length, DMA_TO_DEVICE);
  7400. + fep->tx_page_map[last] = 1;
  7401. + }
  7402. +
  7403. + if (dma_mapping_error(&fep->pdev->dev, addr))
  7404. + goto release_frags;
  7405. +
  7406. + bdp = fec_enet_tx_get(last, fep);
  7407. + bdp->bd.cbd_datlen = length;
  7408. + bdp->bd.cbd_bufaddr = addr;
  7409. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  7410. + bdp->ebd.cbd_esc = cbd_esc;
  7411. + bdp->ebd.cbd_bdu = 0;
  7412. }
  7413. }
  7414. + /* Save skb pointer */
  7415. + fep->tx_skbuff[last] = skb;
  7416. +
  7417. + /*
  7418. + * We need the preceding stores to the descriptor to complete
  7419. + * before updating the status field, which hands it over to the
  7420. + * hardware. The corresponding rmb() is "in the hardware".
  7421. + */
  7422. + wmb();
  7423. +
  7424. /* Send it on its way. Tell FEC it's ready, interrupt when done,
  7425. * it's the last BD of the frame, and to put the CRC on the end.
  7426. */
  7427. - status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
  7428. - | BD_ENET_TX_LAST | BD_ENET_TX_TC);
  7429. - bdp->cbd_sc = status;
  7430. -
  7431. - bdp_pre = fec_enet_get_prevdesc(bdp, fep);
  7432. - if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
  7433. - !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
  7434. - fep->delay_work.trig_tx = true;
  7435. - schedule_delayed_work(&(fep->delay_work.delay_work),
  7436. - msecs_to_jiffies(1));
  7437. + status = bdp->bd.cbd_sc & BD_ENET_TX_WRAP;
  7438. + bdp->bd.cbd_sc = status | BD_ENET_TX_READY | BD_ENET_TX_INTR |
  7439. + BD_ENET_TX_LAST | BD_ENET_TX_TC;
  7440. +
  7441. + /* Now walk backwards setting the TX_READY on each fragment */
  7442. + for (f = nr_frags - 1; f >= 0; f--) {
  7443. + unsigned i = index + f;
  7444. +
  7445. + if (i >= fep->tx_ring_size)
  7446. + i -= fep->tx_ring_size;
  7447. +
  7448. + bdp = fec_enet_tx_get(i, fep);
  7449. + status = bdp->bd.cbd_sc & BD_ENET_TX_WRAP;
  7450. + bdp->bd.cbd_sc = status | BD_ENET_TX_READY | BD_ENET_TX_INTR;
  7451. }
  7452. - /* If this was the last BD in the ring, start at the beginning again. */
  7453. - bdp = fec_enet_get_nextdesc(bdp, fep);
  7454. -
  7455. skb_tx_timestamp(skb);
  7456. + netdev_sent_queue(ndev, skb->len);
  7457. +
  7458. + if (++last >= fep->tx_ring_size)
  7459. + last = 0;
  7460. - fep->cur_tx = bdp;
  7461. + fep->tx_next = last;
  7462. - if (fep->cur_tx == fep->dirty_tx)
  7463. + if (ring_free(last, fep->tx_dirty, fep->tx_ring_size) < fep->tx_min)
  7464. netif_stop_queue(ndev);
  7465. /* Trigger transmission start */
  7466. - writel(0, fep->hwp + FEC_X_DES_ACTIVE);
  7467. + if (readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
  7468. + writel(0, fep->hwp + FEC_X_DES_ACTIVE);
  7469. return NETDEV_TX_OK;
  7470. +
  7471. + release_frags:
  7472. + fec_enet_tx_unmap_range(index, last, fep);
  7473. + release:
  7474. + dev_kfree_skb_any(skb);
  7475. + if (net_ratelimit())
  7476. + netdev_err(ndev, "Tx DMA memory map failed\n");
  7477. + return NETDEV_TX_OK;
  7478. }
  7479. /* Init RX & TX buffer descriptors
  7480. @@ -446,71 +547,60 @@
  7481. static void fec_enet_bd_init(struct net_device *dev)
  7482. {
  7483. struct fec_enet_private *fep = netdev_priv(dev);
  7484. - struct bufdesc *bdp;
  7485. + union bufdesc_u *bdp;
  7486. unsigned int i;
  7487. /* Initialize the receive buffer descriptors. */
  7488. - bdp = fep->rx_bd_base;
  7489. for (i = 0; i < fep->rx_ring_size; i++) {
  7490. + bdp = fec_enet_rx_get(i, fep);
  7491. /* Initialize the BD for every fragment in the page. */
  7492. - if (bdp->cbd_bufaddr)
  7493. - bdp->cbd_sc = BD_ENET_RX_EMPTY;
  7494. + if (bdp->bd.cbd_bufaddr)
  7495. + bdp->bd.cbd_sc = BD_ENET_RX_EMPTY;
  7496. else
  7497. - bdp->cbd_sc = 0;
  7498. - bdp = fec_enet_get_nextdesc(bdp, fep);
  7499. - }
  7500. + bdp->bd.cbd_sc = 0;
  7501. - /* Set the last buffer to wrap */
  7502. - bdp = fec_enet_get_prevdesc(bdp, fep);
  7503. - bdp->cbd_sc |= BD_SC_WRAP;
  7504. + if (i == fep->rx_ring_size - 1)
  7505. + bdp->bd.cbd_sc |= BD_SC_WRAP;
  7506. + }
  7507. - fep->cur_rx = fep->rx_bd_base;
  7508. + fep->rx_next = 0;
  7509. /* ...and the same for transmit */
  7510. - bdp = fep->tx_bd_base;
  7511. - fep->cur_tx = bdp;
  7512. for (i = 0; i < fep->tx_ring_size; i++) {
  7513. + bdp = fec_enet_tx_get(i, fep);
  7514. /* Initialize the BD for every fragment in the page. */
  7515. - bdp->cbd_sc = 0;
  7516. - if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
  7517. + if (i == fep->tx_ring_size - 1)
  7518. + bdp->bd.cbd_sc = BD_SC_WRAP;
  7519. + else
  7520. + bdp->bd.cbd_sc = 0;
  7521. + if (bdp->bd.cbd_bufaddr)
  7522. + fec_enet_tx_unmap(i, bdp, fep);
  7523. + if (fep->tx_skbuff[i]) {
  7524. dev_kfree_skb_any(fep->tx_skbuff[i]);
  7525. fep->tx_skbuff[i] = NULL;
  7526. }
  7527. - bdp->cbd_bufaddr = 0;
  7528. - bdp = fec_enet_get_nextdesc(bdp, fep);
  7529. }
  7530. - /* Set the last buffer to wrap */
  7531. - bdp = fec_enet_get_prevdesc(bdp, fep);
  7532. - bdp->cbd_sc |= BD_SC_WRAP;
  7533. - fep->dirty_tx = bdp;
  7534. + fep->tx_next = 0;
  7535. + fep->tx_dirty = fep->tx_ring_size - 1;
  7536. }
  7537. -/* This function is called to start or restart the FEC during a link
  7538. - * change. This only happens when switching between half and full
  7539. - * duplex.
  7540. +/*
  7541. + * This function is called to start or restart the FEC during a link
  7542. + * change, transmit timeout, or to reconfigure the FEC. The network
  7543. + * packet processing for this device must be stopped before this call.
  7544. */
  7545. static void
  7546. -fec_restart(struct net_device *ndev, int duplex)
  7547. +fec_restart(struct net_device *ndev)
  7548. {
  7549. struct fec_enet_private *fep = netdev_priv(ndev);
  7550. - const struct platform_device_id *id_entry =
  7551. - platform_get_device_id(fep->pdev);
  7552. - int i;
  7553. u32 val;
  7554. u32 temp_mac[2];
  7555. u32 rcntl = OPT_FRAME_SIZE | 0x04;
  7556. u32 ecntl = 0x2; /* ETHEREN */
  7557. - if (netif_running(ndev)) {
  7558. - netif_device_detach(ndev);
  7559. - napi_disable(&fep->napi);
  7560. - netif_stop_queue(ndev);
  7561. - netif_tx_lock_bh(ndev);
  7562. - }
  7563. -
  7564. /* Whack a reset. We should wait for this. */
  7565. writel(1, fep->hwp + FEC_ECNTRL);
  7566. udelay(10);
  7567. @@ -519,7 +609,7 @@
  7568. * enet-mac reset will reset mac address registers too,
  7569. * so need to reconfigure it.
  7570. */
  7571. - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
  7572. + if (fep->quirks & FEC_QUIRK_ENET_MAC) {
  7573. memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
  7574. writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
  7575. writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
  7576. @@ -531,27 +621,16 @@
  7577. /* Set maximum receive buffer size. */
  7578. writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
  7579. - fec_enet_bd_init(ndev);
  7580. + if (fep->rx_bd_base)
  7581. + fec_enet_bd_init(ndev);
  7582. + netdev_reset_queue(ndev);
  7583. /* Set receive and transmit descriptor base. */
  7584. - writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
  7585. - if (fep->bufdesc_ex)
  7586. - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
  7587. - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
  7588. - else
  7589. - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
  7590. - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
  7591. -
  7592. -
  7593. - for (i = 0; i <= TX_RING_MOD_MASK; i++) {
  7594. - if (fep->tx_skbuff[i]) {
  7595. - dev_kfree_skb_any(fep->tx_skbuff[i]);
  7596. - fep->tx_skbuff[i] = NULL;
  7597. - }
  7598. - }
  7599. + writel(fep->rx_bd_dma, fep->hwp + FEC_R_DES_START);
  7600. + writel(fep->tx_bd_dma, fep->hwp + FEC_X_DES_START);
  7601. /* Enable MII mode */
  7602. - if (duplex) {
  7603. + if (fep->full_duplex == DUPLEX_FULL) {
  7604. /* FD enable */
  7605. writel(0x04, fep->hwp + FEC_X_CNTRL);
  7606. } else {
  7607. @@ -560,15 +639,15 @@
  7608. writel(0x0, fep->hwp + FEC_X_CNTRL);
  7609. }
  7610. - fep->full_duplex = duplex;
  7611. -
  7612. /* Set MII speed */
  7613. writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
  7614. #if !defined(CONFIG_M5272)
  7615. /* set RX checksum */
  7616. val = readl(fep->hwp + FEC_RACC);
  7617. - if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
  7618. + if (fep->quirks & FEC_QUIRK_RX_SHIFT16)
  7619. + val |= FEC_RACC_SHIFT16;
  7620. + if (fep->flags & FEC_FLAG_RX_CSUM)
  7621. val |= FEC_RACC_OPTIONS;
  7622. else
  7623. val &= ~FEC_RACC_OPTIONS;
  7624. @@ -579,9 +658,9 @@
  7625. * The phy interface and speed need to get configured
  7626. * differently on enet-mac.
  7627. */
  7628. - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
  7629. + if (fep->quirks & FEC_QUIRK_ENET_MAC) {
  7630. /* Enable flow control and length check */
  7631. - rcntl |= 0x40000000 | 0x00000020;
  7632. + rcntl |= 0x40000000;
  7633. /* RGMII, RMII or MII */
  7634. if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
  7635. @@ -602,7 +681,7 @@
  7636. }
  7637. } else {
  7638. #ifdef FEC_MIIGSK_ENR
  7639. - if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
  7640. + if (fep->quirks & FEC_QUIRK_USE_GASKET) {
  7641. u32 cfgr;
  7642. /* disable the gasket and wait */
  7643. writel(0, fep->hwp + FEC_MIIGSK_ENR);
  7644. @@ -627,22 +706,24 @@
  7645. }
  7646. #if !defined(CONFIG_M5272)
  7647. - /* enable pause frame*/
  7648. - if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
  7649. - ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
  7650. - fep->phy_dev && fep->phy_dev->pause)) {
  7651. - rcntl |= FEC_ENET_FCE;
  7652. -
  7653. - /* set FIFO threshold parameter to reduce overrun */
  7654. - writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
  7655. - writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
  7656. - writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
  7657. - writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
  7658. + if (fep->full_duplex == DUPLEX_FULL) {
  7659. + /*
  7660. + * Configure pause modes according to the current status.
  7661. + * Must only be enabled for full duplex links.
  7662. + */
  7663. + if (fep->pause_mode & FEC_PAUSE_FLAG_RX)
  7664. + rcntl |= FEC_ENET_FCE;
  7665. - /* OPD */
  7666. - writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
  7667. - } else {
  7668. - rcntl &= ~FEC_ENET_FCE;
  7669. + if (fep->pause_mode & FEC_PAUSE_FLAG_TX) {
  7670. + /* set FIFO threshold parameter to reduce overrun */
  7671. + writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
  7672. + writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
  7673. + writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
  7674. + writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
  7675. +
  7676. + /* OPD */
  7677. + writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
  7678. + }
  7679. }
  7680. #endif /* !defined(CONFIG_M5272) */
  7681. @@ -655,14 +736,14 @@
  7682. writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
  7683. #endif
  7684. - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
  7685. + if (fep->quirks & FEC_QUIRK_ENET_MAC) {
  7686. /* enable ENET endian swap */
  7687. ecntl |= (1 << 8);
  7688. /* enable ENET store and forward mode */
  7689. writel(1 << 8, fep->hwp + FEC_X_WMRK);
  7690. }
  7691. - if (fep->bufdesc_ex)
  7692. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  7693. ecntl |= (1 << 4);
  7694. #ifndef CONFIG_M5272
  7695. @@ -674,26 +755,17 @@
  7696. writel(ecntl, fep->hwp + FEC_ECNTRL);
  7697. writel(0, fep->hwp + FEC_R_DES_ACTIVE);
  7698. - if (fep->bufdesc_ex)
  7699. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  7700. fec_ptp_start_cyclecounter(ndev);
  7701. /* Enable interrupts we wish to service */
  7702. writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
  7703. -
  7704. - if (netif_running(ndev)) {
  7705. - netif_tx_unlock_bh(ndev);
  7706. - netif_wake_queue(ndev);
  7707. - napi_enable(&fep->napi);
  7708. - netif_device_attach(ndev);
  7709. - }
  7710. }
  7711. static void
  7712. fec_stop(struct net_device *ndev)
  7713. {
  7714. struct fec_enet_private *fep = netdev_priv(ndev);
  7715. - const struct platform_device_id *id_entry =
  7716. - platform_get_device_id(fep->pdev);
  7717. u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
  7718. /* We cannot expect a graceful transmit stop without link !!! */
  7719. @@ -711,7 +783,7 @@
  7720. writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
  7721. /* We have to keep ENET enabled to have MII interrupt stay working */
  7722. - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
  7723. + if (fep->quirks & FEC_QUIRK_ENET_MAC) {
  7724. writel(2, fep->hwp + FEC_ECNTRL);
  7725. writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
  7726. }
  7727. @@ -723,127 +795,312 @@
  7728. {
  7729. struct fec_enet_private *fep = netdev_priv(ndev);
  7730. + fec_dump(ndev);
  7731. +
  7732. ndev->stats.tx_errors++;
  7733. - fep->delay_work.timeout = true;
  7734. - schedule_delayed_work(&(fep->delay_work.delay_work), 0);
  7735. + schedule_work(&fep->tx_timeout_work);
  7736. }
  7737. -static void fec_enet_work(struct work_struct *work)
  7738. +static void fec_enet_timeout_work(struct work_struct *work)
  7739. {
  7740. struct fec_enet_private *fep =
  7741. - container_of(work,
  7742. - struct fec_enet_private,
  7743. - delay_work.delay_work.work);
  7744. -
  7745. - if (fep->delay_work.timeout) {
  7746. - fep->delay_work.timeout = false;
  7747. - fec_restart(fep->netdev, fep->full_duplex);
  7748. - netif_wake_queue(fep->netdev);
  7749. - }
  7750. + container_of(work, struct fec_enet_private, tx_timeout_work);
  7751. + struct net_device *ndev = fep->netdev;
  7752. - if (fep->delay_work.trig_tx) {
  7753. - fep->delay_work.trig_tx = false;
  7754. - writel(0, fep->hwp + FEC_X_DES_ACTIVE);
  7755. + rtnl_lock();
  7756. + if (netif_device_present(ndev) || netif_running(ndev)) {
  7757. + mutex_lock(&fep->mutex);
  7758. + napi_disable(&fep->napi);
  7759. + netif_tx_lock_bh(ndev);
  7760. + fec_restart(ndev);
  7761. + netif_wake_queue(ndev);
  7762. + netif_tx_unlock_bh(ndev);
  7763. + napi_enable(&fep->napi);
  7764. + mutex_unlock(&fep->mutex);
  7765. }
  7766. + rtnl_unlock();
  7767. }
  7768. static void
  7769. +fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
  7770. + struct skb_shared_hwtstamps *hwtstamps)
  7771. +{
  7772. + unsigned long flags;
  7773. + u64 ns;
  7774. +
  7775. + spin_lock_irqsave(&fep->tmreg_lock, flags);
  7776. + ns = timecounter_cyc2time(&fep->tc, ts);
  7777. + spin_unlock_irqrestore(&fep->tmreg_lock, flags);
  7778. +
  7779. + memset(hwtstamps, 0, sizeof(*hwtstamps));
  7780. + hwtstamps->hwtstamp = ns_to_ktime(ns);
  7781. +}
  7782. +
  7783. +static void noinline
  7784. fec_enet_tx(struct net_device *ndev)
  7785. {
  7786. - struct fec_enet_private *fep;
  7787. - struct bufdesc *bdp;
  7788. - unsigned short status;
  7789. + struct fec_enet_private *fep = netdev_priv(ndev);
  7790. + union bufdesc_u *bdp;
  7791. struct sk_buff *skb;
  7792. - int index = 0;
  7793. -
  7794. - fep = netdev_priv(ndev);
  7795. - bdp = fep->dirty_tx;
  7796. + unsigned index = fep->tx_dirty;
  7797. + unsigned pkts_compl, bytes_compl;
  7798. - /* get next bdp of dirty_tx */
  7799. - bdp = fec_enet_get_nextdesc(bdp, fep);
  7800. + pkts_compl = bytes_compl = 0;
  7801. + do {
  7802. + unsigned status, cbd_esc;
  7803. - while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
  7804. + if (++index >= fep->tx_ring_size)
  7805. + index = 0;
  7806. /* current queue is empty */
  7807. - if (bdp == fep->cur_tx)
  7808. + if (index == fep->tx_next)
  7809. break;
  7810. - if (fep->bufdesc_ex)
  7811. - index = (struct bufdesc_ex *)bdp -
  7812. - (struct bufdesc_ex *)fep->tx_bd_base;
  7813. - else
  7814. - index = bdp - fep->tx_bd_base;
  7815. + bdp = fec_enet_tx_get(index, fep);
  7816. +
  7817. + status = bdp->bd.cbd_sc;
  7818. + if (status & BD_ENET_TX_READY)
  7819. + break;
  7820. +
  7821. + fec_enet_tx_unmap(index, bdp, fep);
  7822. skb = fep->tx_skbuff[index];
  7823. - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
  7824. - DMA_TO_DEVICE);
  7825. - bdp->cbd_bufaddr = 0;
  7826. + fep->tx_skbuff[index] = NULL;
  7827. /* Check for errors. */
  7828. - if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
  7829. - BD_ENET_TX_RL | BD_ENET_TX_UN |
  7830. - BD_ENET_TX_CSL)) {
  7831. - ndev->stats.tx_errors++;
  7832. - if (status & BD_ENET_TX_HB) /* No heartbeat */
  7833. - ndev->stats.tx_heartbeat_errors++;
  7834. - if (status & BD_ENET_TX_LC) /* Late collision */
  7835. - ndev->stats.tx_window_errors++;
  7836. - if (status & BD_ENET_TX_RL) /* Retrans limit */
  7837. - ndev->stats.tx_aborted_errors++;
  7838. - if (status & BD_ENET_TX_UN) /* Underrun */
  7839. - ndev->stats.tx_fifo_errors++;
  7840. - if (status & BD_ENET_TX_CSL) /* Carrier lost */
  7841. - ndev->stats.tx_carrier_errors++;
  7842. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  7843. + cbd_esc = bdp->ebd.cbd_esc;
  7844. + if (cbd_esc & BD_ENET_TX_TXE) {
  7845. + ndev->stats.tx_errors++;
  7846. + if (cbd_esc & BD_ENET_TX_EE) { /* excess collision */
  7847. + ndev->stats.collisions += 16;
  7848. + ndev->stats.tx_aborted_errors++;
  7849. + }
  7850. + if (cbd_esc & BD_ENET_TX_LCE) /* late collision error */
  7851. + ndev->stats.tx_window_errors++;
  7852. + if (cbd_esc & (BD_ENET_TX_UE | BD_ENET_TX_FE | BD_ENET_TX_OE))
  7853. + ndev->stats.tx_fifo_errors++;
  7854. + goto next;
  7855. + }
  7856. } else {
  7857. - ndev->stats.tx_packets++;
  7858. - ndev->stats.tx_bytes += bdp->cbd_datlen;
  7859. + if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
  7860. + BD_ENET_TX_RL | BD_ENET_TX_UN |
  7861. + BD_ENET_TX_CSL)) {
  7862. + ndev->stats.tx_errors++;
  7863. + if (status & BD_ENET_TX_HB) /* No heartbeat */
  7864. + ndev->stats.tx_heartbeat_errors++;
  7865. + if (status & BD_ENET_TX_LC) /* Late collision */
  7866. + ndev->stats.tx_window_errors++;
  7867. + if (status & BD_ENET_TX_RL) /* Retrans limit */
  7868. + ndev->stats.tx_aborted_errors++;
  7869. + if (status & BD_ENET_TX_UN) /* Underrun */
  7870. + ndev->stats.tx_fifo_errors++;
  7871. + if (status & BD_ENET_TX_CSL) /* Carrier lost */
  7872. + ndev->stats.tx_carrier_errors++;
  7873. + goto next;
  7874. + }
  7875. }
  7876. - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
  7877. - fep->bufdesc_ex) {
  7878. - struct skb_shared_hwtstamps shhwtstamps;
  7879. - unsigned long flags;
  7880. - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
  7881. -
  7882. - memset(&shhwtstamps, 0, sizeof(shhwtstamps));
  7883. - spin_lock_irqsave(&fep->tmreg_lock, flags);
  7884. - shhwtstamps.hwtstamp = ns_to_ktime(
  7885. - timecounter_cyc2time(&fep->tc, ebdp->ts));
  7886. - spin_unlock_irqrestore(&fep->tmreg_lock, flags);
  7887. - skb_tstamp_tx(skb, &shhwtstamps);
  7888. + if (skb) {
  7889. + ndev->stats.tx_packets++;
  7890. + ndev->stats.tx_bytes += skb->len;
  7891. }
  7892. - if (status & BD_ENET_TX_READY)
  7893. - netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n");
  7894. -
  7895. /* Deferred means some collisions occurred during transmit,
  7896. * but we eventually sent the packet OK.
  7897. */
  7898. if (status & BD_ENET_TX_DEF)
  7899. ndev->stats.collisions++;
  7900. + next:
  7901. + if (skb) {
  7902. + if (fep->flags & FEC_FLAG_BUFDESC_EX &&
  7903. + unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
  7904. + struct skb_shared_hwtstamps shhwtstamps;
  7905. - /* Free the sk buffer associated with this last transmit */
  7906. - dev_kfree_skb_any(skb);
  7907. - fep->tx_skbuff[index] = NULL;
  7908. + fec_enet_hwtstamp(fep, bdp->ebd.ts, &shhwtstamps);
  7909. + skb_tstamp_tx(skb, &shhwtstamps);
  7910. + }
  7911. - fep->dirty_tx = bdp;
  7912. + pkts_compl++;
  7913. + bytes_compl += skb->len;
  7914. - /* Update pointer to next buffer descriptor to be transmitted */
  7915. - bdp = fec_enet_get_nextdesc(bdp, fep);
  7916. + /* Free the sk buffer associated with this last transmit */
  7917. + dev_kfree_skb_any(skb);
  7918. + }
  7919. - /* Since we have freed up a buffer, the ring is no longer full
  7920. - */
  7921. - if (fep->dirty_tx != fep->cur_tx) {
  7922. - if (netif_queue_stopped(ndev))
  7923. - netif_wake_queue(ndev);
  7924. + fep->tx_dirty = index;
  7925. + } while (1);
  7926. +
  7927. + netdev_completed_queue(ndev, pkts_compl, bytes_compl);
  7928. +
  7929. + /* ERR006538: Keep the transmitter going */
  7930. + if (index != fep->tx_next && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
  7931. + writel(0, fep->hwp + FEC_X_DES_ACTIVE);
  7932. +
  7933. + if (netif_queue_stopped(ndev) &&
  7934. + ring_free(fep->tx_next, fep->tx_dirty, fep->tx_ring_size) >=
  7935. + fep->tx_min)
  7936. + netif_wake_queue(ndev);
  7937. +}
  7938. +
  7939. +
  7940. +static void
  7941. +fec_enet_receive(struct sk_buff *skb, union bufdesc_u *bdp, struct net_device *ndev)
  7942. +{
  7943. + struct fec_enet_private *fep = netdev_priv(ndev);
  7944. +
  7945. + skb->protocol = eth_type_trans(skb, ndev);
  7946. +
  7947. + /* Get receive timestamp from the skb */
  7948. + if (fep->hwts_rx_en && fep->flags & FEC_FLAG_BUFDESC_EX)
  7949. + fec_enet_hwtstamp(fep, bdp->ebd.ts, skb_hwtstamps(skb));
  7950. +
  7951. + if (fep->flags & FEC_FLAG_RX_CSUM) {
  7952. + if (!(bdp->ebd.cbd_esc & FLAG_RX_CSUM_ERROR)) {
  7953. + /* don't check it */
  7954. + skb->ip_summed = CHECKSUM_UNNECESSARY;
  7955. + } else {
  7956. + skb_checksum_none_assert(skb);
  7957. }
  7958. }
  7959. - return;
  7960. +
  7961. + napi_gro_receive(&fep->napi, skb);
  7962. +}
  7963. +
  7964. +static void
  7965. +fec_enet_receive_copy(unsigned pkt_len, unsigned index, union bufdesc_u *bdp, struct net_device *ndev)
  7966. +{
  7967. + struct fec_enet_private *fep = netdev_priv(ndev);
  7968. + struct sk_buff *skb;
  7969. + unsigned char *data;
  7970. + bool vlan_packet_rcvd = false;
  7971. +
  7972. + /*
  7973. + * Detect the presence of the VLAN tag, and adjust
  7974. + * the packet length appropriately.
  7975. + */
  7976. + if (fep->flags & FEC_FLAG_RX_VLAN &&
  7977. + bdp->ebd.cbd_esc & BD_ENET_RX_VLAN) {
  7978. + pkt_len -= VLAN_HLEN;
  7979. + vlan_packet_rcvd = true;
  7980. + }
  7981. +
  7982. + /* This does 16 byte alignment, exactly what we need. */
  7983. + skb = netdev_alloc_skb(ndev, pkt_len + NET_IP_ALIGN);
  7984. + if (unlikely(!skb)) {
  7985. + ndev->stats.rx_dropped++;
  7986. + return;
  7987. + }
  7988. +
  7989. + dma_sync_single_for_cpu(&fep->pdev->dev, bdp->bd.cbd_bufaddr,
  7990. + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  7991. +
  7992. + data = fep->rx_skbuff[index]->data;
  7993. +
  7994. +#ifndef CONFIG_M5272
  7995. + /*
  7996. + * If we have enabled this feature, we need to discard
  7997. + * the two bytes at the beginning of the packet before
  7998. + * copying it.
  7999. + */
  8000. + if (fep->quirks & FEC_QUIRK_RX_SHIFT16) {
  8001. + pkt_len -= 2;
  8002. + data += 2;
  8003. + }
  8004. +#endif
  8005. +
  8006. + if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
  8007. + swap_buffer(data, pkt_len);
  8008. +
  8009. + skb_reserve(skb, NET_IP_ALIGN);
  8010. + skb_put(skb, pkt_len); /* Make room */
  8011. +
  8012. + /* If this is a VLAN packet remove the VLAN Tag */
  8013. + if (vlan_packet_rcvd) {
  8014. + struct vlan_hdr *vlan = (struct vlan_hdr *)(data + ETH_HLEN);
  8015. +
  8016. + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  8017. + ntohs(vlan->h_vlan_TCI));
  8018. +
  8019. + /* Extract the frame data without the VLAN header. */
  8020. + skb_copy_to_linear_data(skb, data, 2 * ETH_ALEN);
  8021. + skb_copy_to_linear_data_offset(skb, 2 * ETH_ALEN,
  8022. + data + 2 * ETH_ALEN + VLAN_HLEN,
  8023. + pkt_len - 2 * ETH_ALEN);
  8024. + } else {
  8025. + skb_copy_to_linear_data(skb, data, pkt_len);
  8026. + }
  8027. +
  8028. + dma_sync_single_for_device(&fep->pdev->dev, bdp->bd.cbd_bufaddr,
  8029. + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8030. +
  8031. + fec_enet_receive(skb, bdp, ndev);
  8032. }
  8033. +static void
  8034. +fec_enet_receive_nocopy(unsigned pkt_len, unsigned index, union bufdesc_u *bdp,
  8035. + struct net_device *ndev)
  8036. +{
  8037. + struct fec_enet_private *fep = netdev_priv(ndev);
  8038. + struct sk_buff *skb, *skb_new;
  8039. + unsigned char *data;
  8040. + dma_addr_t addr;
  8041. +
  8042. + skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
  8043. + if (!skb_new) {
  8044. + ndev->stats.rx_dropped++;
  8045. + return;
  8046. + }
  8047. +
  8048. + addr = dma_map_single(&fep->pdev->dev, skb_new->data,
  8049. + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8050. + if (dma_mapping_error(&fep->pdev->dev, addr)) {
  8051. + dev_kfree_skb(skb_new);
  8052. + ndev->stats.rx_dropped++;
  8053. + return;
  8054. + }
  8055. -/* During a receive, the cur_rx points to the current incoming buffer.
  8056. + /* We have the new skb, so proceed to deal with the received data. */
  8057. + dma_unmap_single(&fep->pdev->dev, bdp->bd.cbd_bufaddr,
  8058. + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8059. +
  8060. + skb = fep->rx_skbuff[index];
  8061. +
  8062. + /* Now subsitute in the new skb */
  8063. + fep->rx_skbuff[index] = skb_new;
  8064. + bdp->bd.cbd_bufaddr = addr;
  8065. +
  8066. + /*
  8067. + * Update the skb length according to the raw packet length.
  8068. + * Then remove the two bytes of additional padding.
  8069. + */
  8070. + skb_put(skb, pkt_len);
  8071. + data = skb_pull_inline(skb, 2);
  8072. +
  8073. + if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
  8074. + swap_buffer(data, skb->len);
  8075. +
  8076. + /*
  8077. + * Now juggle things for the VLAN tag - if the hardware
  8078. + * flags this as present, we need to read the tag, and
  8079. + * then shuffle the ethernet addresses up.
  8080. + */
  8081. + if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
  8082. + bdp->ebd.cbd_esc & BD_ENET_RX_VLAN) {
  8083. + struct vlan_hdr *vlan = (struct vlan_hdr *)(data + ETH_HLEN);
  8084. +
  8085. + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  8086. + ntohs(vlan->h_vlan_TCI));
  8087. +
  8088. + memmove(data + VLAN_HLEN, data, 2 * ETH_ALEN);
  8089. + skb_pull_inline(skb, VLAN_HLEN);
  8090. + }
  8091. +
  8092. + fec_enet_receive(skb, bdp, ndev);
  8093. +}
  8094. +
  8095. +/* During a receive, the rx_next points to the current incoming buffer.
  8096. * When we update through the ring, if the next incoming buffer has
  8097. * not been given to the system, we just set the empty indicator,
  8098. * effectively tossing the packet.
  8099. @@ -852,18 +1109,9 @@
  8100. fec_enet_rx(struct net_device *ndev, int budget)
  8101. {
  8102. struct fec_enet_private *fep = netdev_priv(ndev);
  8103. - const struct platform_device_id *id_entry =
  8104. - platform_get_device_id(fep->pdev);
  8105. - struct bufdesc *bdp;
  8106. - unsigned short status;
  8107. - struct sk_buff *skb;
  8108. ushort pkt_len;
  8109. - __u8 *data;
  8110. int pkt_received = 0;
  8111. - struct bufdesc_ex *ebdp = NULL;
  8112. - bool vlan_packet_rcvd = false;
  8113. - u16 vlan_tag;
  8114. - int index = 0;
  8115. + unsigned index = fep->rx_next;
  8116. #ifdef CONFIG_M532x
  8117. flush_cache_all();
  8118. @@ -872,12 +1120,17 @@
  8119. /* First, grab all of the stats for the incoming packet.
  8120. * These get messed up if we get called due to a busy condition.
  8121. */
  8122. - bdp = fep->cur_rx;
  8123. + do {
  8124. + union bufdesc_u *bdp = fec_enet_rx_get(index, fep);
  8125. + unsigned status;
  8126. - while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
  8127. + status = bdp->bd.cbd_sc;
  8128. + if (status & BD_ENET_RX_EMPTY)
  8129. + break;
  8130. if (pkt_received >= budget)
  8131. break;
  8132. +
  8133. pkt_received++;
  8134. /* Since we have allocated space to hold a complete frame,
  8135. @@ -886,155 +1139,81 @@
  8136. if ((status & BD_ENET_RX_LAST) == 0)
  8137. netdev_err(ndev, "rcv is not +last\n");
  8138. - if (!fep->opened)
  8139. - goto rx_processing_done;
  8140. + writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
  8141. /* Check for errors. */
  8142. - if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
  8143. - BD_ENET_RX_CR | BD_ENET_RX_OV)) {
  8144. + if (status & BD_ENET_RX_ERROR) {
  8145. ndev->stats.rx_errors++;
  8146. - if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
  8147. - /* Frame too long or too short. */
  8148. - ndev->stats.rx_length_errors++;
  8149. - }
  8150. - if (status & BD_ENET_RX_NO) /* Frame alignment */
  8151. - ndev->stats.rx_frame_errors++;
  8152. - if (status & BD_ENET_RX_CR) /* CRC Error */
  8153. - ndev->stats.rx_crc_errors++;
  8154. - if (status & BD_ENET_RX_OV) /* FIFO overrun */
  8155. - ndev->stats.rx_fifo_errors++;
  8156. - }
  8157. - /* Report late collisions as a frame error.
  8158. - * On this error, the BD is closed, but we don't know what we
  8159. - * have in the buffer. So, just drop this frame on the floor.
  8160. - */
  8161. - if (status & BD_ENET_RX_CL) {
  8162. - ndev->stats.rx_errors++;
  8163. - ndev->stats.rx_frame_errors++;
  8164. + /*
  8165. + * Report late collisions as a frame error. On this
  8166. + * error, the BD is closed, but we don't know what we
  8167. + * have in the buffer. So, just drop this frame on
  8168. + * the floor.
  8169. + */
  8170. + if (status & BD_ENET_RX_CL) {
  8171. + ndev->stats.rx_frame_errors++;
  8172. + } else {
  8173. + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH))
  8174. + /* Frame too long or too short. */
  8175. + ndev->stats.rx_length_errors++;
  8176. + if (status & BD_ENET_RX_NO) /* Frame alignment */
  8177. + ndev->stats.rx_frame_errors++;
  8178. + if (status & BD_ENET_RX_CR) /* CRC Error */
  8179. + ndev->stats.rx_crc_errors++;
  8180. + if (status & BD_ENET_RX_OV) /* FIFO overrun */
  8181. + ndev->stats.rx_fifo_errors++;
  8182. + }
  8183. goto rx_processing_done;
  8184. }
  8185. /* Process the incoming frame. */
  8186. ndev->stats.rx_packets++;
  8187. - pkt_len = bdp->cbd_datlen;
  8188. - ndev->stats.rx_bytes += pkt_len;
  8189. -
  8190. - if (fep->bufdesc_ex)
  8191. - index = (struct bufdesc_ex *)bdp -
  8192. - (struct bufdesc_ex *)fep->rx_bd_base;
  8193. - else
  8194. - index = bdp - fep->rx_bd_base;
  8195. - data = fep->rx_skbuff[index]->data;
  8196. - dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
  8197. - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8198. -
  8199. - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
  8200. - swap_buffer(data, pkt_len);
  8201. - /* Extract the enhanced buffer descriptor */
  8202. - ebdp = NULL;
  8203. - if (fep->bufdesc_ex)
  8204. - ebdp = (struct bufdesc_ex *)bdp;
  8205. -
  8206. - /* If this is a VLAN packet remove the VLAN Tag */
  8207. - vlan_packet_rcvd = false;
  8208. - if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  8209. - fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
  8210. - /* Push and remove the vlan tag */
  8211. - struct vlan_hdr *vlan_header =
  8212. - (struct vlan_hdr *) (data + ETH_HLEN);
  8213. - vlan_tag = ntohs(vlan_header->h_vlan_TCI);
  8214. - pkt_len -= VLAN_HLEN;
  8215. -
  8216. - vlan_packet_rcvd = true;
  8217. - }
  8218. -
  8219. - /* This does 16 byte alignment, exactly what we need.
  8220. - * The packet length includes FCS, but we don't want to
  8221. - * include that when passing upstream as it messes up
  8222. - * bridging applications.
  8223. + /*
  8224. + * The packet length includes FCS, but we don't want
  8225. + * to include that when passing upstream as it messes
  8226. + * up bridging applications.
  8227. */
  8228. - skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
  8229. + pkt_len = bdp->bd.cbd_datlen - 4;
  8230. + ndev->stats.rx_bytes += pkt_len;
  8231. - if (unlikely(!skb)) {
  8232. - ndev->stats.rx_dropped++;
  8233. + if (fec_enet_rx_zerocopy(fep, pkt_len)) {
  8234. + fec_enet_receive_nocopy(pkt_len, index, bdp, ndev);
  8235. } else {
  8236. - int payload_offset = (2 * ETH_ALEN);
  8237. - skb_reserve(skb, NET_IP_ALIGN);
  8238. - skb_put(skb, pkt_len - 4); /* Make room */
  8239. -
  8240. - /* Extract the frame data without the VLAN header. */
  8241. - skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN));
  8242. - if (vlan_packet_rcvd)
  8243. - payload_offset = (2 * ETH_ALEN) + VLAN_HLEN;
  8244. - skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN),
  8245. - data + payload_offset,
  8246. - pkt_len - 4 - (2 * ETH_ALEN));
  8247. -
  8248. - skb->protocol = eth_type_trans(skb, ndev);
  8249. -
  8250. - /* Get receive timestamp from the skb */
  8251. - if (fep->hwts_rx_en && fep->bufdesc_ex) {
  8252. - struct skb_shared_hwtstamps *shhwtstamps =
  8253. - skb_hwtstamps(skb);
  8254. - unsigned long flags;
  8255. -
  8256. - memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  8257. -
  8258. - spin_lock_irqsave(&fep->tmreg_lock, flags);
  8259. - shhwtstamps->hwtstamp = ns_to_ktime(
  8260. - timecounter_cyc2time(&fep->tc, ebdp->ts));
  8261. - spin_unlock_irqrestore(&fep->tmreg_lock, flags);
  8262. - }
  8263. -
  8264. - if (fep->bufdesc_ex &&
  8265. - (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
  8266. - if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
  8267. - /* don't check it */
  8268. - skb->ip_summed = CHECKSUM_UNNECESSARY;
  8269. - } else {
  8270. - skb_checksum_none_assert(skb);
  8271. - }
  8272. - }
  8273. -
  8274. - /* Handle received VLAN packets */
  8275. - if (vlan_packet_rcvd)
  8276. - __vlan_hwaccel_put_tag(skb,
  8277. - htons(ETH_P_8021Q),
  8278. - vlan_tag);
  8279. -
  8280. - napi_gro_receive(&fep->napi, skb);
  8281. + fec_enet_receive_copy(pkt_len, index, bdp, ndev);
  8282. }
  8283. - dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
  8284. - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8285. rx_processing_done:
  8286. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  8287. + bdp->ebd.cbd_esc = BD_ENET_RX_INT;
  8288. + bdp->ebd.cbd_prot = 0;
  8289. + bdp->ebd.cbd_bdu = 0;
  8290. + }
  8291. +
  8292. + /*
  8293. + * Ensure that the previous writes have completed before
  8294. + * the status update becomes visible.
  8295. + */
  8296. + wmb();
  8297. +
  8298. /* Clear the status flags for this buffer */
  8299. status &= ~BD_ENET_RX_STATS;
  8300. /* Mark the buffer empty */
  8301. status |= BD_ENET_RX_EMPTY;
  8302. - bdp->cbd_sc = status;
  8303. -
  8304. - if (fep->bufdesc_ex) {
  8305. - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
  8306. -
  8307. - ebdp->cbd_esc = BD_ENET_RX_INT;
  8308. - ebdp->cbd_prot = 0;
  8309. - ebdp->cbd_bdu = 0;
  8310. - }
  8311. -
  8312. - /* Update BD pointer to next entry */
  8313. - bdp = fec_enet_get_nextdesc(bdp, fep);
  8314. + bdp->bd.cbd_sc = status;
  8315. /* Doing this here will keep the FEC running while we process
  8316. * incoming frames. On a heavily loaded network, we should be
  8317. * able to keep up at the expense of system resources.
  8318. */
  8319. writel(0, fep->hwp + FEC_R_DES_ACTIVE);
  8320. - }
  8321. - fep->cur_rx = bdp;
  8322. +
  8323. + if (++index >= fep->rx_ring_size)
  8324. + index = 0;
  8325. + } while (1);
  8326. + fep->rx_next = index;
  8327. return pkt_received;
  8328. }
  8329. @@ -1044,29 +1223,25 @@
  8330. {
  8331. struct net_device *ndev = dev_id;
  8332. struct fec_enet_private *fep = netdev_priv(ndev);
  8333. + const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF;
  8334. uint int_events;
  8335. irqreturn_t ret = IRQ_NONE;
  8336. - do {
  8337. - int_events = readl(fep->hwp + FEC_IEVENT);
  8338. - writel(int_events, fep->hwp + FEC_IEVENT);
  8339. + int_events = readl(fep->hwp + FEC_IEVENT);
  8340. + writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
  8341. - if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
  8342. - ret = IRQ_HANDLED;
  8343. + if (int_events & napi_mask) {
  8344. + ret = IRQ_HANDLED;
  8345. - /* Disable the RX interrupt */
  8346. - if (napi_schedule_prep(&fep->napi)) {
  8347. - writel(FEC_RX_DISABLED_IMASK,
  8348. - fep->hwp + FEC_IMASK);
  8349. - __napi_schedule(&fep->napi);
  8350. - }
  8351. - }
  8352. + /* Disable the NAPI interrupts */
  8353. + writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
  8354. + napi_schedule(&fep->napi);
  8355. + }
  8356. - if (int_events & FEC_ENET_MII) {
  8357. - ret = IRQ_HANDLED;
  8358. - complete(&fep->mdio_done);
  8359. - }
  8360. - } while (int_events);
  8361. + if (int_events & FEC_ENET_MII) {
  8362. + ret = IRQ_HANDLED;
  8363. + complete(&fep->mdio_done);
  8364. + }
  8365. return ret;
  8366. }
  8367. @@ -1074,8 +1249,16 @@
  8368. static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
  8369. {
  8370. struct net_device *ndev = napi->dev;
  8371. - int pkts = fec_enet_rx(ndev, budget);
  8372. struct fec_enet_private *fep = netdev_priv(ndev);
  8373. + int pkts;
  8374. +
  8375. + /*
  8376. + * Clear any pending transmit or receive interrupts before
  8377. + * processing the rings to avoid racing with the hardware.
  8378. + */
  8379. + writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT);
  8380. +
  8381. + pkts = fec_enet_rx(ndev, budget);
  8382. fec_enet_tx(ndev);
  8383. @@ -1173,26 +1356,78 @@
  8384. return;
  8385. }
  8386. - if (phy_dev->link) {
  8387. + /*
  8388. + * If the netdev is down, or is going down, we're not interested
  8389. + * in link state events, so just mark our idea of the link as down
  8390. + * and ignore the event.
  8391. + */
  8392. + if (!netif_running(ndev) || !netif_device_present(ndev)) {
  8393. + fep->link = 0;
  8394. + } else if (phy_dev->link) {
  8395. if (!fep->link) {
  8396. fep->link = phy_dev->link;
  8397. status_change = 1;
  8398. }
  8399. - if (fep->full_duplex != phy_dev->duplex)
  8400. + if (fep->full_duplex != phy_dev->duplex) {
  8401. + fep->full_duplex = phy_dev->duplex;
  8402. status_change = 1;
  8403. + }
  8404. if (phy_dev->speed != fep->speed) {
  8405. fep->speed = phy_dev->speed;
  8406. status_change = 1;
  8407. }
  8408. + if (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) {
  8409. + u32 lcl_adv = phy_dev->advertising;
  8410. + u32 rmt_adv = phy_dev->lp_advertising;
  8411. + unsigned mode = 0;
  8412. +
  8413. + if (lcl_adv & rmt_adv & ADVERTISED_Pause) {
  8414. + /*
  8415. + * Local Device Link Partner
  8416. + * Pause AsymDir Pause AsymDir Result
  8417. + * 1 X 1 X TX+RX
  8418. + */
  8419. + mode = FEC_PAUSE_FLAG_TX | FEC_PAUSE_FLAG_RX;
  8420. + } else if (lcl_adv & rmt_adv & ADVERTISED_Asym_Pause) {
  8421. + /*
  8422. + * 0 1 1 1 RX
  8423. + * 1 1 0 1 TX
  8424. + */
  8425. + if (rmt_adv & ADVERTISED_Pause)
  8426. + mode = FEC_PAUSE_FLAG_RX;
  8427. + else
  8428. + mode = FEC_PAUSE_FLAG_TX;
  8429. + }
  8430. +
  8431. + if (mode != fep->pause_mode) {
  8432. + fep->pause_mode = mode;
  8433. + status_change = 1;
  8434. + }
  8435. + }
  8436. +
  8437. /* if any of the above changed restart the FEC */
  8438. - if (status_change)
  8439. - fec_restart(ndev, phy_dev->duplex);
  8440. + if (status_change) {
  8441. + mutex_lock(&fep->mutex);
  8442. + napi_disable(&fep->napi);
  8443. + netif_tx_lock_bh(ndev);
  8444. + fec_restart(ndev);
  8445. + netif_wake_queue(ndev);
  8446. + netif_tx_unlock_bh(ndev);
  8447. + napi_enable(&fep->napi);
  8448. + mutex_unlock(&fep->mutex);
  8449. + }
  8450. } else {
  8451. if (fep->link) {
  8452. + mutex_lock(&fep->mutex);
  8453. + napi_disable(&fep->napi);
  8454. + netif_tx_lock_bh(ndev);
  8455. fec_stop(ndev);
  8456. + netif_tx_unlock_bh(ndev);
  8457. + napi_enable(&fep->napi);
  8458. + mutex_unlock(&fep->mutex);
  8459. fep->link = phy_dev->link;
  8460. status_change = 1;
  8461. }
  8462. @@ -1202,23 +1437,35 @@
  8463. phy_print_status(phy_dev);
  8464. }
  8465. -static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  8466. +static unsigned long fec_enet_mdio_op(struct fec_enet_private *fep,
  8467. + unsigned data)
  8468. {
  8469. - struct fec_enet_private *fep = bus->priv;
  8470. unsigned long time_left;
  8471. fep->mii_timeout = 0;
  8472. init_completion(&fep->mdio_done);
  8473. - /* start a read op */
  8474. - writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
  8475. - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
  8476. - FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
  8477. + mutex_lock(&fep->mutex);
  8478. +
  8479. + /* start operation */
  8480. + writel(data, fep->hwp + FEC_MII_DATA);
  8481. /* wait for end of transfer */
  8482. time_left = wait_for_completion_timeout(&fep->mdio_done,
  8483. usecs_to_jiffies(FEC_MII_TIMEOUT));
  8484. - if (time_left == 0) {
  8485. +
  8486. + mutex_unlock(&fep->mutex);
  8487. +
  8488. + return time_left;
  8489. +}
  8490. +
  8491. +static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  8492. +{
  8493. + struct fec_enet_private *fep = bus->priv;
  8494. +
  8495. + if (fec_enet_mdio_op(fep, FEC_MMFR_ST | FEC_MMFR_OP_READ |
  8496. + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
  8497. + FEC_MMFR_TA) == 0) {
  8498. fep->mii_timeout = 1;
  8499. netdev_err(fep->netdev, "MDIO read timeout\n");
  8500. return -ETIMEDOUT;
  8501. @@ -1232,21 +1479,10 @@
  8502. u16 value)
  8503. {
  8504. struct fec_enet_private *fep = bus->priv;
  8505. - unsigned long time_left;
  8506. -
  8507. - fep->mii_timeout = 0;
  8508. - init_completion(&fep->mdio_done);
  8509. -
  8510. - /* start a write op */
  8511. - writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
  8512. - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
  8513. - FEC_MMFR_TA | FEC_MMFR_DATA(value),
  8514. - fep->hwp + FEC_MII_DATA);
  8515. - /* wait for end of transfer */
  8516. - time_left = wait_for_completion_timeout(&fep->mdio_done,
  8517. - usecs_to_jiffies(FEC_MII_TIMEOUT));
  8518. - if (time_left == 0) {
  8519. + if (fec_enet_mdio_op(fep, FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
  8520. + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
  8521. + FEC_MMFR_TA | FEC_MMFR_DATA(value)) == 0) {
  8522. fep->mii_timeout = 1;
  8523. netdev_err(fep->netdev, "MDIO write timeout\n");
  8524. return -ETIMEDOUT;
  8525. @@ -1255,11 +1491,37 @@
  8526. return 0;
  8527. }
  8528. +static void fec_enet_phy_config(struct net_device *ndev)
  8529. +{
  8530. +#ifndef CONFIG_M5272
  8531. + struct fec_enet_private *fep = netdev_priv(ndev);
  8532. + struct phy_device *phy = fep->phy_dev;
  8533. + unsigned pause = 0;
  8534. +
  8535. + /*
  8536. + * Pause advertisment logic is weird. We don't advertise the raw
  8537. + * "can tx" and "can rx" modes, but instead it is whether we support
  8538. + * symmetric flow or asymmetric flow.
  8539. + *
  8540. + * Symmetric flow means we can only support both transmit and receive
  8541. + * flow control frames together. Asymmetric flow means we can
  8542. + * independently control each. Note that there is no bit encoding
  8543. + * for "I can only receive flow control frames."
  8544. + */
  8545. + if (fep->pause_flag & FEC_PAUSE_FLAG_RX)
  8546. + pause |= ADVERTISED_Asym_Pause | ADVERTISED_Pause;
  8547. + if (fep->pause_flag & FEC_PAUSE_FLAG_TX)
  8548. + pause |= ADVERTISED_Asym_Pause;
  8549. +
  8550. + pause &= phy->supported;
  8551. + pause |= phy->advertising & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
  8552. + phy->advertising = pause;
  8553. +#endif
  8554. +}
  8555. +
  8556. static int fec_enet_mii_probe(struct net_device *ndev)
  8557. {
  8558. struct fec_enet_private *fep = netdev_priv(ndev);
  8559. - const struct platform_device_id *id_entry =
  8560. - platform_get_device_id(fep->pdev);
  8561. struct phy_device *phy_dev = NULL;
  8562. char mdio_bus_id[MII_BUS_ID_SIZE];
  8563. char phy_name[MII_BUS_ID_SIZE + 3];
  8564. @@ -1297,10 +1559,11 @@
  8565. }
  8566. /* mask with MAC supported features */
  8567. - if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
  8568. + if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
  8569. phy_dev->supported &= PHY_GBIT_FEATURES;
  8570. + phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
  8571. #if !defined(CONFIG_M5272)
  8572. - phy_dev->supported |= SUPPORTED_Pause;
  8573. + phy_dev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  8574. #endif
  8575. }
  8576. else
  8577. @@ -1312,6 +1575,8 @@
  8578. fep->link = 0;
  8579. fep->full_duplex = 0;
  8580. + fec_enet_phy_config(ndev);
  8581. +
  8582. netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
  8583. fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
  8584. fep->phy_dev->irq);
  8585. @@ -1324,8 +1589,6 @@
  8586. static struct mii_bus *fec0_mii_bus;
  8587. struct net_device *ndev = platform_get_drvdata(pdev);
  8588. struct fec_enet_private *fep = netdev_priv(ndev);
  8589. - const struct platform_device_id *id_entry =
  8590. - platform_get_device_id(fep->pdev);
  8591. int err = -ENXIO, i;
  8592. /*
  8593. @@ -1344,7 +1607,7 @@
  8594. * mdio interface in board design, and need to be configured by
  8595. * fec0 mii_bus.
  8596. */
  8597. - if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
  8598. + if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
  8599. /* fec1 uses fec0 mii_bus */
  8600. if (mii_cnt && fec0_mii_bus) {
  8601. fep->mii_bus = fec0_mii_bus;
  8602. @@ -1365,7 +1628,7 @@
  8603. * document.
  8604. */
  8605. fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
  8606. - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
  8607. + if (fep->quirks & FEC_QUIRK_ENET_MAC)
  8608. fep->phy_speed--;
  8609. fep->phy_speed <<= 1;
  8610. writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
  8611. @@ -1399,7 +1662,7 @@
  8612. mii_cnt++;
  8613. /* save fec0 mii_bus */
  8614. - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
  8615. + if (fep->quirks & FEC_QUIRK_ENET_MAC)
  8616. fec0_mii_bus = fep->mii_bus;
  8617. return 0;
  8618. @@ -1461,7 +1724,7 @@
  8619. {
  8620. struct fec_enet_private *fep = netdev_priv(ndev);
  8621. - if (fep->bufdesc_ex) {
  8622. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  8623. info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
  8624. SOF_TIMESTAMPING_RX_SOFTWARE |
  8625. @@ -1485,6 +1748,51 @@
  8626. }
  8627. }
  8628. +static void fec_enet_get_ringparam(struct net_device *ndev,
  8629. + struct ethtool_ringparam *ring)
  8630. +{
  8631. + struct fec_enet_private *fep = netdev_priv(ndev);
  8632. +
  8633. + ring->rx_max_pending = RX_RING_SIZE;
  8634. + ring->tx_max_pending = TX_RING_SIZE;
  8635. + ring->rx_pending = fep->rx_ring_size;
  8636. + ring->tx_pending = fep->tx_ring_size;
  8637. +}
  8638. +
  8639. +static int fec_enet_set_ringparam(struct net_device *ndev,
  8640. + struct ethtool_ringparam *ring)
  8641. +{
  8642. + struct fec_enet_private *fep = netdev_priv(ndev);
  8643. + unsigned rx, tx, tx_min;
  8644. +
  8645. + tx_min = ndev->features & NETIF_F_SG ? TX_RING_SIZE_MIN_SG : 16;
  8646. +
  8647. + rx = clamp_t(u32, ring->rx_pending, 16, RX_RING_SIZE);
  8648. + tx = clamp_t(u32, ring->tx_pending, tx_min, TX_RING_SIZE);
  8649. +
  8650. + if (tx == fep->tx_ring_size && rx == fep->rx_ring_size)
  8651. + return 0;
  8652. +
  8653. + /* Setting the ring size while the interface is down is easy */
  8654. + if (!netif_running(ndev)) {
  8655. + fep->tx_ring_size = tx;
  8656. + fep->rx_ring_size = rx;
  8657. + } else {
  8658. + return -EINVAL;
  8659. +
  8660. + napi_disable(&fep->napi);
  8661. + netif_tx_lock_bh(ndev);
  8662. + fec_stop(ndev);
  8663. + /* reallocate ring */
  8664. + fec_restart(ndev);
  8665. + netif_wake_queue(ndev);
  8666. + netif_tx_unlock_bh(ndev);
  8667. + napi_enable(&fep->napi);
  8668. + }
  8669. +
  8670. + return 0;
  8671. +}
  8672. +
  8673. #if !defined(CONFIG_M5272)
  8674. static void fec_enet_get_pauseparam(struct net_device *ndev,
  8675. @@ -1493,42 +1801,81 @@
  8676. struct fec_enet_private *fep = netdev_priv(ndev);
  8677. pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
  8678. - pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
  8679. - pause->rx_pause = pause->tx_pause;
  8680. + pause->rx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_RX) != 0;
  8681. + pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_TX) != 0;
  8682. }
  8683. static int fec_enet_set_pauseparam(struct net_device *ndev,
  8684. struct ethtool_pauseparam *pause)
  8685. {
  8686. struct fec_enet_private *fep = netdev_priv(ndev);
  8687. + unsigned pause_flag, changed;
  8688. + struct phy_device *phy = fep->phy_dev;
  8689. - if (pause->tx_pause != pause->rx_pause) {
  8690. - netdev_info(ndev,
  8691. - "hardware only support enable/disable both tx and rx");
  8692. + if (!phy)
  8693. + return -ENODEV;
  8694. + if (!(phy->supported & SUPPORTED_Pause))
  8695. + return -EINVAL;
  8696. + if (!(phy->supported & SUPPORTED_Asym_Pause) &&
  8697. + pause->rx_pause != pause->tx_pause)
  8698. return -EINVAL;
  8699. - }
  8700. - fep->pause_flag = 0;
  8701. + pause_flag = 0;
  8702. + if (pause->autoneg)
  8703. + pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
  8704. + if (pause->rx_pause)
  8705. + pause_flag |= FEC_PAUSE_FLAG_RX;
  8706. + if (pause->tx_pause)
  8707. + pause_flag |= FEC_PAUSE_FLAG_TX;
  8708. +
  8709. + changed = fep->pause_flag ^ pause_flag;
  8710. + fep->pause_flag = pause_flag;
  8711. +
  8712. + /* configure the phy advertisment according to our new options */
  8713. + fec_enet_phy_config(ndev);
  8714. +
  8715. + if (changed) {
  8716. + if (pause_flag & FEC_PAUSE_FLAG_AUTONEG) {
  8717. + if (netif_running(ndev))
  8718. + phy_start_aneg(fep->phy_dev);
  8719. + } else {
  8720. + int adv, old_adv;
  8721. - /* tx pause must be same as rx pause */
  8722. - fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
  8723. - fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
  8724. -
  8725. - if (pause->rx_pause || pause->autoneg) {
  8726. - fep->phy_dev->supported |= ADVERTISED_Pause;
  8727. - fep->phy_dev->advertising |= ADVERTISED_Pause;
  8728. - } else {
  8729. - fep->phy_dev->supported &= ~ADVERTISED_Pause;
  8730. - fep->phy_dev->advertising &= ~ADVERTISED_Pause;
  8731. - }
  8732. + /*
  8733. + * Even if we are not in autonegotiate mode, we
  8734. + * still update the phy with our capabilities so
  8735. + * our link parter can make the appropriate
  8736. + * decision. PHYLIB provides no way to do this.
  8737. + */
  8738. + adv = phy_read(phy, MII_ADVERTISE);
  8739. + if (adv >= 0) {
  8740. + old_adv = adv;
  8741. + adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
  8742. + if (phy->advertising & ADVERTISED_Pause)
  8743. + adv |= ADVERTISE_PAUSE_CAP;
  8744. + if (phy->advertising & ADVERTISED_Asym_Pause)
  8745. + adv |= ADVERTISE_PAUSE_ASYM;
  8746. - if (pause->autoneg) {
  8747. - if (netif_running(ndev))
  8748. - fec_stop(ndev);
  8749. - phy_start_aneg(fep->phy_dev);
  8750. + if (old_adv != adv)
  8751. + phy_write(phy, MII_ADVERTISE, adv);
  8752. + }
  8753. +
  8754. + /* Forced pause mode */
  8755. + fep->pause_mode = fep->pause_flag;
  8756. +
  8757. + if (netif_running(ndev)) {
  8758. + mutex_lock(&fep->mutex);
  8759. + napi_disable(&fep->napi);
  8760. + netif_tx_lock_bh(ndev);
  8761. + fec_stop(ndev);
  8762. + fec_restart(ndev);
  8763. + netif_wake_queue(ndev);
  8764. + netif_tx_unlock_bh(ndev);
  8765. + napi_enable(&fep->napi);
  8766. + mutex_unlock(&fep->mutex);
  8767. + }
  8768. + }
  8769. }
  8770. - if (netif_running(ndev))
  8771. - fec_restart(ndev, 0);
  8772. return 0;
  8773. }
  8774. @@ -1645,21 +1992,21 @@
  8775. }
  8776. static const struct ethtool_ops fec_enet_ethtool_ops = {
  8777. -#if !defined(CONFIG_M5272)
  8778. - .get_pauseparam = fec_enet_get_pauseparam,
  8779. - .set_pauseparam = fec_enet_set_pauseparam,
  8780. -#endif
  8781. .get_settings = fec_enet_get_settings,
  8782. .set_settings = fec_enet_set_settings,
  8783. .get_drvinfo = fec_enet_get_drvinfo,
  8784. - .get_link = ethtool_op_get_link,
  8785. - .get_ts_info = fec_enet_get_ts_info,
  8786. .nway_reset = fec_enet_nway_reset,
  8787. + .get_link = ethtool_op_get_link,
  8788. + .get_ringparam = fec_enet_get_ringparam,
  8789. + .set_ringparam = fec_enet_set_ringparam,
  8790. #ifndef CONFIG_M5272
  8791. - .get_ethtool_stats = fec_enet_get_ethtool_stats,
  8792. + .get_pauseparam = fec_enet_get_pauseparam,
  8793. + .set_pauseparam = fec_enet_set_pauseparam,
  8794. .get_strings = fec_enet_get_strings,
  8795. + .get_ethtool_stats = fec_enet_get_ethtool_stats,
  8796. .get_sset_count = fec_enet_get_sset_count,
  8797. #endif
  8798. + .get_ts_info = fec_enet_get_ts_info,
  8799. };
  8800. static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
  8801. @@ -1673,7 +2020,7 @@
  8802. if (!phydev)
  8803. return -ENODEV;
  8804. - if (fep->bufdesc_ex) {
  8805. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  8806. if (cmd == SIOCSHWTSTAMP)
  8807. return fec_ptp_set(ndev, rq);
  8808. if (cmd == SIOCGHWTSTAMP)
  8809. @@ -1688,23 +2035,33 @@
  8810. struct fec_enet_private *fep = netdev_priv(ndev);
  8811. unsigned int i;
  8812. struct sk_buff *skb;
  8813. - struct bufdesc *bdp;
  8814. + union bufdesc_u *bdp;
  8815. - bdp = fep->rx_bd_base;
  8816. for (i = 0; i < fep->rx_ring_size; i++) {
  8817. - skb = fep->rx_skbuff[i];
  8818. + bdp = fec_enet_rx_get(i, fep);
  8819. - if (bdp->cbd_bufaddr)
  8820. - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
  8821. + skb = fep->rx_skbuff[i];
  8822. + fep->rx_skbuff[i] = NULL;
  8823. + if (skb) {
  8824. + dma_unmap_single(&fep->pdev->dev, bdp->bd.cbd_bufaddr,
  8825. FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8826. - if (skb)
  8827. dev_kfree_skb(skb);
  8828. - bdp = fec_enet_get_nextdesc(bdp, fep);
  8829. + }
  8830. }
  8831. - bdp = fep->tx_bd_base;
  8832. - for (i = 0; i < fep->tx_ring_size; i++)
  8833. + for (i = 0; i < fep->tx_ring_size; i++) {
  8834. + bdp = fec_enet_tx_get(i, fep);
  8835. + if (bdp->bd.cbd_bufaddr)
  8836. + fec_enet_tx_unmap(i, bdp, fep);
  8837. kfree(fep->tx_bounce[i]);
  8838. + fep->tx_bounce[i] = NULL;
  8839. + skb = fep->tx_skbuff[i];
  8840. + fep->tx_skbuff[i] = NULL;
  8841. + if (skb)
  8842. + dev_kfree_skb(skb);
  8843. + }
  8844. +
  8845. + dma_free_coherent(NULL, PAGE_SIZE, fep->rx_bd_base, fep->rx_bd_dma);
  8846. }
  8847. static int fec_enet_alloc_buffers(struct net_device *ndev)
  8848. @@ -1712,59 +2069,82 @@
  8849. struct fec_enet_private *fep = netdev_priv(ndev);
  8850. unsigned int i;
  8851. struct sk_buff *skb;
  8852. - struct bufdesc *bdp;
  8853. + union bufdesc_u *bdp;
  8854. + union bufdesc_u *rx_cbd_cpu, *tx_cbd_cpu;
  8855. + dma_addr_t rx_cbd_dma, tx_cbd_dma;
  8856. +
  8857. + /* Allocate memory for buffer descriptors. */
  8858. + rx_cbd_cpu = dma_alloc_coherent(NULL, PAGE_SIZE, &rx_cbd_dma,
  8859. + GFP_KERNEL);
  8860. + tx_cbd_cpu = dma_alloc_coherent(NULL, PAGE_SIZE, &tx_cbd_dma,
  8861. + GFP_KERNEL);
  8862. + if (!rx_cbd_cpu || !tx_cbd_cpu) {
  8863. + if (rx_cbd_cpu)
  8864. + dma_free_coherent(NULL, PAGE_SIZE, rx_cbd_cpu, rx_cbd_dma);
  8865. + if (tx_cbd_cpu)
  8866. + dma_free_coherent(NULL, PAGE_SIZE, tx_cbd_cpu, tx_cbd_dma);
  8867. + return -ENOMEM;
  8868. + }
  8869. +
  8870. + memset(rx_cbd_cpu, 0, PAGE_SIZE);
  8871. + memset(tx_cbd_cpu, 0, PAGE_SIZE);
  8872. +
  8873. + /* Set receive and transmit descriptor base. */
  8874. + fep->rx_bd_base = rx_cbd_cpu;
  8875. + fep->rx_bd_dma = rx_cbd_dma;
  8876. + fep->tx_bd_base = tx_cbd_cpu;
  8877. + fep->tx_bd_dma = tx_cbd_dma;
  8878. - bdp = fep->rx_bd_base;
  8879. for (i = 0; i < fep->rx_ring_size; i++) {
  8880. + dma_addr_t addr;
  8881. +
  8882. skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
  8883. - if (!skb) {
  8884. - fec_enet_free_buffers(ndev);
  8885. - return -ENOMEM;
  8886. - }
  8887. - fep->rx_skbuff[i] = skb;
  8888. + if (!skb)
  8889. + goto err_alloc;
  8890. - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
  8891. + addr = dma_map_single(&fep->pdev->dev, skb->data,
  8892. FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
  8893. - if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
  8894. - fec_enet_free_buffers(ndev);
  8895. + if (dma_mapping_error(&fep->pdev->dev, addr)) {
  8896. + dev_kfree_skb(skb);
  8897. if (net_ratelimit())
  8898. netdev_err(ndev, "Rx DMA memory map failed\n");
  8899. - return -ENOMEM;
  8900. + goto err_alloc;
  8901. }
  8902. - bdp->cbd_sc = BD_ENET_RX_EMPTY;
  8903. - if (fep->bufdesc_ex) {
  8904. - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
  8905. - ebdp->cbd_esc = BD_ENET_RX_INT;
  8906. - }
  8907. + fep->rx_skbuff[i] = skb;
  8908. + bdp = fec_enet_rx_get(i, fep);
  8909. + bdp->bd.cbd_bufaddr = addr;
  8910. + bdp->bd.cbd_sc = BD_ENET_RX_EMPTY;
  8911. + /* Set the last buffer to wrap. */
  8912. + if (i == fep->rx_ring_size - 1)
  8913. + bdp->bd.cbd_sc |= BD_SC_WRAP;
  8914. - bdp = fec_enet_get_nextdesc(bdp, fep);
  8915. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  8916. + bdp->ebd.cbd_esc = BD_ENET_RX_INT;
  8917. }
  8918. - /* Set the last buffer to wrap. */
  8919. - bdp = fec_enet_get_prevdesc(bdp, fep);
  8920. - bdp->cbd_sc |= BD_SC_WRAP;
  8921. -
  8922. - bdp = fep->tx_bd_base;
  8923. for (i = 0; i < fep->tx_ring_size; i++) {
  8924. + bdp = fec_enet_tx_get(i, fep);
  8925. fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
  8926. + if (!fep->tx_bounce[i])
  8927. + goto err_alloc;
  8928. - bdp->cbd_sc = 0;
  8929. - bdp->cbd_bufaddr = 0;
  8930. -
  8931. - if (fep->bufdesc_ex) {
  8932. - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
  8933. - ebdp->cbd_esc = BD_ENET_TX_INT;
  8934. - }
  8935. + /* Set the last buffer to wrap. */
  8936. + if (i == fep->tx_ring_size - 1)
  8937. + bdp->bd.cbd_sc = BD_SC_WRAP;
  8938. + else
  8939. + bdp->bd.cbd_sc = 0;
  8940. + bdp->bd.cbd_bufaddr = 0;
  8941. - bdp = fec_enet_get_nextdesc(bdp, fep);
  8942. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  8943. + bdp->ebd.cbd_esc = BD_ENET_TX_INT;
  8944. }
  8945. - /* Set the last buffer to wrap. */
  8946. - bdp = fec_enet_get_prevdesc(bdp, fep);
  8947. - bdp->cbd_sc |= BD_SC_WRAP;
  8948. -
  8949. return 0;
  8950. +
  8951. + err_alloc:
  8952. + fec_enet_free_buffers(ndev);
  8953. + return -ENOMEM;
  8954. }
  8955. static int
  8956. @@ -1788,10 +2168,12 @@
  8957. return ret;
  8958. }
  8959. + mutex_lock(&fep->mutex);
  8960. + fec_restart(ndev);
  8961. + mutex_unlock(&fep->mutex);
  8962. napi_enable(&fep->napi);
  8963. phy_start(fep->phy_dev);
  8964. netif_start_queue(ndev);
  8965. - fep->opened = 1;
  8966. return 0;
  8967. }
  8968. @@ -1800,17 +2182,19 @@
  8969. {
  8970. struct fec_enet_private *fep = netdev_priv(ndev);
  8971. - /* Don't know what to do yet. */
  8972. - napi_disable(&fep->napi);
  8973. - fep->opened = 0;
  8974. - netif_stop_queue(ndev);
  8975. - fec_stop(ndev);
  8976. + phy_stop(fep->phy_dev);
  8977. - if (fep->phy_dev) {
  8978. - phy_stop(fep->phy_dev);
  8979. - phy_disconnect(fep->phy_dev);
  8980. + if (netif_device_present(ndev)) {
  8981. + napi_disable(&fep->napi);
  8982. + netif_tx_disable(ndev);
  8983. + mutex_lock(&fep->mutex);
  8984. + fec_stop(ndev);
  8985. + mutex_unlock(&fep->mutex);
  8986. }
  8987. + phy_disconnect(fep->phy_dev);
  8988. + fep->phy_dev = NULL;
  8989. +
  8990. fec_enet_free_buffers(ndev);
  8991. return 0;
  8992. @@ -1935,28 +2319,67 @@
  8993. }
  8994. #endif
  8995. +static netdev_features_t fec_fix_features(struct net_device *ndev,
  8996. + netdev_features_t features)
  8997. +{
  8998. + struct fec_enet_private *fep = netdev_priv(ndev);
  8999. +
  9000. + /*
  9001. + * NETIF_F_SG requires a minimum transmit ring size. If we
  9002. + * have less than this size, we can't support this feature.
  9003. + */
  9004. + if (fep->tx_ring_size < TX_RING_SIZE_MIN_SG)
  9005. + features &= ~NETIF_F_SG;
  9006. +
  9007. + return features;
  9008. +}
  9009. +
  9010. +#define FEATURES_NEED_QUIESCE (NETIF_F_RXCSUM | NETIF_F_SG)
  9011. +
  9012. static int fec_set_features(struct net_device *netdev,
  9013. netdev_features_t features)
  9014. {
  9015. struct fec_enet_private *fep = netdev_priv(netdev);
  9016. netdev_features_t changed = features ^ netdev->features;
  9017. + /* Quiesce the device if necessary */
  9018. + if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
  9019. + mutex_lock(&fep->mutex);
  9020. + napi_disable(&fep->napi);
  9021. + netif_tx_lock_bh(netdev);
  9022. + fec_stop(netdev);
  9023. + }
  9024. +
  9025. netdev->features = features;
  9026. /* Receive checksum has been changed */
  9027. if (changed & NETIF_F_RXCSUM) {
  9028. if (features & NETIF_F_RXCSUM)
  9029. - fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
  9030. + fep->flags |= FEC_FLAG_RX_CSUM;
  9031. else
  9032. - fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
  9033. + fep->flags &= ~FEC_FLAG_RX_CSUM;
  9034. + }
  9035. - if (netif_running(netdev)) {
  9036. - fec_stop(netdev);
  9037. - fec_restart(netdev, fep->phy_dev->duplex);
  9038. - netif_wake_queue(netdev);
  9039. - } else {
  9040. - fec_restart(netdev, fep->phy_dev->duplex);
  9041. - }
  9042. + if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
  9043. + if (features & NETIF_F_HW_VLAN_CTAG_RX)
  9044. + fep->flags |= FEC_FLAG_RX_VLAN;
  9045. + else
  9046. + fep->flags &= ~FEC_FLAG_RX_VLAN;
  9047. + }
  9048. +
  9049. + /* Set the appropriate minimum transmit ring free threshold */
  9050. + if (features & NETIF_F_SG)
  9051. + fep->tx_min = MAX_SKB_FRAGS + 1;
  9052. + else
  9053. + fep->tx_min = 1;
  9054. +
  9055. + /* Resume the device after updates */
  9056. + if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
  9057. + fec_restart(netdev);
  9058. + netif_wake_queue(netdev);
  9059. + netif_tx_unlock_bh(netdev);
  9060. + napi_enable(&fep->napi);
  9061. + mutex_unlock(&fep->mutex);
  9062. }
  9063. return 0;
  9064. @@ -1975,27 +2398,13 @@
  9065. #ifdef CONFIG_NET_POLL_CONTROLLER
  9066. .ndo_poll_controller = fec_poll_controller,
  9067. #endif
  9068. + .ndo_fix_features = fec_fix_features,
  9069. .ndo_set_features = fec_set_features,
  9070. };
  9071. - /*
  9072. - * XXX: We need to clean up on failure exits here.
  9073. - *
  9074. - */
  9075. -static int fec_enet_init(struct net_device *ndev)
  9076. +static void fec_enet_init(struct net_device *ndev)
  9077. {
  9078. struct fec_enet_private *fep = netdev_priv(ndev);
  9079. - const struct platform_device_id *id_entry =
  9080. - platform_get_device_id(fep->pdev);
  9081. - struct bufdesc *cbd_base;
  9082. -
  9083. - /* Allocate memory for buffer descriptors. */
  9084. - cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
  9085. - GFP_KERNEL);
  9086. - if (!cbd_base)
  9087. - return -ENOMEM;
  9088. -
  9089. - memset(cbd_base, 0, PAGE_SIZE);
  9090. fep->netdev = ndev;
  9091. @@ -2008,13 +2417,8 @@
  9092. fep->tx_ring_size = TX_RING_SIZE;
  9093. fep->rx_ring_size = RX_RING_SIZE;
  9094. - /* Set receive and transmit descriptor base. */
  9095. - fep->rx_bd_base = cbd_base;
  9096. - if (fep->bufdesc_ex)
  9097. - fep->tx_bd_base = (struct bufdesc *)
  9098. - (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
  9099. - else
  9100. - fep->tx_bd_base = cbd_base + fep->rx_ring_size;
  9101. + fep->rx_bd_base = fep->tx_bd_base = NULL;
  9102. + fep->rx_bd_dma = fep->tx_bd_dma = 0;
  9103. /* The FEC Ethernet specific entries in the device structure */
  9104. ndev->watchdog_timeo = TX_TIMEOUT;
  9105. @@ -2024,24 +2428,37 @@
  9106. writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
  9107. netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
  9108. - if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
  9109. - /* enable hw VLAN support */
  9110. - ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
  9111. - ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
  9112. - }
  9113. -
  9114. - if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
  9115. - /* enable hw accelerator */
  9116. - ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
  9117. - | NETIF_F_RXCSUM);
  9118. - ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
  9119. - | NETIF_F_RXCSUM);
  9120. - fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
  9121. + if (fep->flags & FEC_FLAG_BUFDESC_EX) {
  9122. + /* Features which require the enhanced buffer descriptors */
  9123. + if (fep->quirks & FEC_QUIRK_HAS_VLAN) {
  9124. + /* enable hw VLAN support */
  9125. + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
  9126. + ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
  9127. + fep->flags |= FEC_FLAG_RX_VLAN;
  9128. + }
  9129. +
  9130. + if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
  9131. + /* enable hw accelerator */
  9132. + ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
  9133. + | NETIF_F_RXCSUM);
  9134. + ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
  9135. + | NETIF_F_RXCSUM);
  9136. + fep->flags |= FEC_FLAG_RX_CSUM;
  9137. + }
  9138. }
  9139. - fec_restart(ndev, 0);
  9140. + if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) {
  9141. + /* don't enable SG if we need to swap frames */
  9142. + ndev->features |= NETIF_F_SG;
  9143. + ndev->hw_features |= NETIF_F_SG;
  9144. + }
  9145. - return 0;
  9146. + if (ndev->features & NETIF_F_SG)
  9147. + fep->tx_min = MAX_SKB_FRAGS + 1;
  9148. + else
  9149. + fep->tx_min = 1;
  9150. +
  9151. + fec_restart(ndev);
  9152. }
  9153. #ifdef CONFIG_OF
  9154. @@ -2107,11 +2524,16 @@
  9155. /* setup board info structure */
  9156. fep = netdev_priv(ndev);
  9157. + mutex_init(&fep->mutex);
  9158. +
  9159. + if (pdev->id_entry)
  9160. + fep->quirks = pdev->id_entry->driver_data;
  9161. #if !defined(CONFIG_M5272)
  9162. /* default enable pause frame auto negotiation */
  9163. - if (pdev->id_entry &&
  9164. - (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
  9165. - fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
  9166. + if (fep->quirks & FEC_QUIRK_HAS_GBIT)
  9167. + fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG |
  9168. + FEC_PAUSE_FLAG_TX |
  9169. + FEC_PAUSE_FLAG_RX;
  9170. #endif
  9171. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  9172. @@ -2124,7 +2546,9 @@
  9173. fep->pdev = pdev;
  9174. fep->dev_id = dev_id++;
  9175. - fep->bufdesc_ex = 0;
  9176. + fep->flags = 0;
  9177. + if (pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX)
  9178. + fep->flags |= FEC_FLAG_BUFDESC_EX;
  9179. platform_set_drvdata(pdev, ndev);
  9180. @@ -2157,11 +2581,9 @@
  9181. fep->clk_enet_out = NULL;
  9182. fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
  9183. - fep->bufdesc_ex =
  9184. - pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
  9185. if (IS_ERR(fep->clk_ptp)) {
  9186. fep->clk_ptp = NULL;
  9187. - fep->bufdesc_ex = 0;
  9188. + fep->flags &= ~FEC_FLAG_BUFDESC_EX;
  9189. }
  9190. ret = clk_prepare_enable(fep->clk_ahb);
  9191. @@ -2198,12 +2620,10 @@
  9192. fec_reset_phy(pdev);
  9193. - if (fep->bufdesc_ex)
  9194. + if (fep->flags & FEC_FLAG_BUFDESC_EX)
  9195. fec_ptp_init(pdev);
  9196. - ret = fec_enet_init(ndev);
  9197. - if (ret)
  9198. - goto failed_init;
  9199. + fec_enet_init(ndev);
  9200. for (i = 0; i < FEC_IRQ_NUM; i++) {
  9201. irq = platform_get_irq(pdev, i);
  9202. @@ -2230,17 +2650,16 @@
  9203. if (ret)
  9204. goto failed_register;
  9205. - if (fep->bufdesc_ex && fep->ptp_clock)
  9206. + if (fep->flags & FEC_FLAG_BUFDESC_EX && fep->ptp_clock)
  9207. netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
  9208. - INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work);
  9209. + INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
  9210. return 0;
  9211. failed_register:
  9212. fec_enet_mii_remove(fep);
  9213. failed_mii_init:
  9214. failed_irq:
  9215. -failed_init:
  9216. if (fep->reg_phy)
  9217. regulator_disable(fep->reg_phy);
  9218. failed_regulator:
  9219. @@ -2266,7 +2685,7 @@
  9220. struct net_device *ndev = platform_get_drvdata(pdev);
  9221. struct fec_enet_private *fep = netdev_priv(ndev);
  9222. - cancel_delayed_work_sync(&(fep->delay_work.delay_work));
  9223. + cancel_work_sync(&fep->tx_timeout_work);
  9224. unregister_netdev(ndev);
  9225. fec_enet_mii_remove(fep);
  9226. del_timer_sync(&fep->time_keep);
  9227. @@ -2292,10 +2711,19 @@
  9228. struct net_device *ndev = dev_get_drvdata(dev);
  9229. struct fec_enet_private *fep = netdev_priv(ndev);
  9230. + rtnl_lock();
  9231. if (netif_running(ndev)) {
  9232. - fec_stop(ndev);
  9233. + phy_stop(fep->phy_dev);
  9234. + napi_disable(&fep->napi);
  9235. + netif_tx_lock_bh(ndev);
  9236. netif_device_detach(ndev);
  9237. + netif_tx_unlock_bh(ndev);
  9238. + mutex_lock(&fep->mutex);
  9239. + fec_stop(ndev);
  9240. + mutex_unlock(&fep->mutex);
  9241. }
  9242. + rtnl_unlock();
  9243. +
  9244. if (fep->clk_ptp)
  9245. clk_disable_unprepare(fep->clk_ptp);
  9246. if (fep->clk_enet_out)
  9247. @@ -2342,10 +2770,18 @@
  9248. goto failed_clk_ptp;
  9249. }
  9250. + rtnl_lock();
  9251. if (netif_running(ndev)) {
  9252. - fec_restart(ndev, fep->full_duplex);
  9253. + mutex_lock(&fep->mutex);
  9254. + fec_restart(ndev);
  9255. + mutex_unlock(&fep->mutex);
  9256. + netif_tx_lock_bh(ndev);
  9257. netif_device_attach(ndev);
  9258. + netif_tx_unlock_bh(ndev);
  9259. + napi_enable(&fep->napi);
  9260. + phy_start(fep->phy_dev);
  9261. }
  9262. + rtnl_unlock();
  9263. return 0;
  9264. diff -Nur linux-3.15-rc6.orig/drivers/regulator/anatop-regulator.c linux-3.15-rc6/drivers/regulator/anatop-regulator.c
  9265. --- linux-3.15-rc6.orig/drivers/regulator/anatop-regulator.c 2014-05-21 23:42:02.000000000 +0200
  9266. +++ linux-3.15-rc6/drivers/regulator/anatop-regulator.c 2014-05-23 11:26:48.312940058 +0200
  9267. @@ -267,6 +267,7 @@
  9268. config.driver_data = sreg;
  9269. config.of_node = pdev->dev.of_node;
  9270. config.regmap = sreg->anatop;
  9271. + config.ena_gpio = -EINVAL;
  9272. /* Only core regulators have the ramp up delay configuration. */
  9273. if (sreg->control_reg && sreg->delay_bit_width) {
  9274. diff -Nur linux-3.15-rc6.orig/drivers/regulator/core.c linux-3.15-rc6/drivers/regulator/core.c
  9275. --- linux-3.15-rc6.orig/drivers/regulator/core.c 2014-05-21 23:42:02.000000000 +0200
  9276. +++ linux-3.15-rc6/drivers/regulator/core.c 2014-05-23 11:26:48.312940058 +0200
  9277. @@ -3459,7 +3459,7 @@
  9278. dev_set_drvdata(&rdev->dev, rdev);
  9279. - if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) {
  9280. + if (gpio_is_valid(config->ena_gpio)) {
  9281. ret = regulator_ena_gpio_request(rdev, config);
  9282. if (ret != 0) {
  9283. rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
  9284. diff -Nur linux-3.15-rc6.orig/drivers/regulator/dummy.c linux-3.15-rc6/drivers/regulator/dummy.c
  9285. --- linux-3.15-rc6.orig/drivers/regulator/dummy.c 2014-05-21 23:42:02.000000000 +0200
  9286. +++ linux-3.15-rc6/drivers/regulator/dummy.c 2014-05-23 11:26:48.312940058 +0200
  9287. @@ -48,6 +48,7 @@
  9288. config.dev = &pdev->dev;
  9289. config.init_data = &dummy_initdata;
  9290. + config.ena_gpio = -EINVAL;
  9291. dummy_regulator_rdev = regulator_register(&dummy_desc, &config);
  9292. if (IS_ERR(dummy_regulator_rdev)) {
  9293. diff -Nur linux-3.15-rc6.orig/drivers/regulator/fixed.c linux-3.15-rc6/drivers/regulator/fixed.c
  9294. --- linux-3.15-rc6.orig/drivers/regulator/fixed.c 2014-05-21 23:42:02.000000000 +0200
  9295. +++ linux-3.15-rc6/drivers/regulator/fixed.c 2014-05-23 11:26:48.312940058 +0200
  9296. @@ -161,9 +161,7 @@
  9297. drvdata->desc.n_voltages = 1;
  9298. drvdata->desc.fixed_uV = config->microvolts;
  9299. -
  9300. - if (config->gpio >= 0)
  9301. - cfg.ena_gpio = config->gpio;
  9302. + cfg.ena_gpio = config->gpio;
  9303. cfg.ena_gpio_invert = !config->enable_high;
  9304. if (config->enabled_at_boot) {
  9305. if (config->enable_high)
  9306. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/drm-ddc-connector.c linux-3.15-rc6/drivers/staging/imx-drm/drm-ddc-connector.c
  9307. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/drm-ddc-connector.c 1970-01-01 01:00:00.000000000 +0100
  9308. +++ linux-3.15-rc6/drivers/staging/imx-drm/drm-ddc-connector.c 2014-05-23 11:26:48.312940058 +0200
  9309. @@ -0,0 +1,92 @@
  9310. +#include <linux/i2c.h>
  9311. +#include <linux/module.h>
  9312. +#include <drm/drmP.h>
  9313. +#include <drm/drm_crtc_helper.h>
  9314. +#include <drm/drm_edid.h>
  9315. +
  9316. +#include "drm-ddc-connector.h"
  9317. +
  9318. +static enum drm_connector_status
  9319. +drm_ddc_connector_detect(struct drm_connector *connector, bool force)
  9320. +{
  9321. + struct drm_ddc_connector *ddc_conn = to_ddc_conn(connector);
  9322. +
  9323. + return ddc_conn->detect ? ddc_conn->detect(connector, force) :
  9324. + connector_status_connected;
  9325. +}
  9326. +
  9327. +int drm_ddc_connector_get_modes(struct drm_connector *connector)
  9328. +{
  9329. + struct drm_ddc_connector *ddc_conn = to_ddc_conn(connector);
  9330. + struct edid *edid;
  9331. + int ret = 0;
  9332. +
  9333. + if (!ddc_conn->ddc)
  9334. + return 0;
  9335. +
  9336. + edid = drm_get_edid(connector, ddc_conn->ddc);
  9337. + if (edid) {
  9338. + drm_mode_connector_update_edid_property(connector, edid);
  9339. + ret = drm_add_edid_modes(connector, edid);
  9340. + /* Store the ELD */
  9341. + drm_edid_to_eld(connector, edid);
  9342. + kfree(edid);
  9343. + }
  9344. +
  9345. + return ret;
  9346. +}
  9347. +EXPORT_SYMBOL_GPL(drm_ddc_connector_get_modes);
  9348. +
  9349. +static void drm_ddc_connector_destroy(struct drm_connector *connector)
  9350. +{
  9351. + struct drm_ddc_connector *ddc_conn = to_ddc_conn(connector);
  9352. +
  9353. + drm_sysfs_connector_remove(connector);
  9354. + drm_connector_cleanup(connector);
  9355. + if (ddc_conn->ddc)
  9356. + i2c_put_adapter(ddc_conn->ddc);
  9357. +}
  9358. +
  9359. +static const struct drm_connector_funcs drm_ddc_connector_funcs = {
  9360. + .dpms = drm_helper_connector_dpms,
  9361. + .fill_modes = drm_helper_probe_single_connector_modes,
  9362. + .detect = drm_ddc_connector_detect,
  9363. + .destroy = drm_ddc_connector_destroy,
  9364. +};
  9365. +
  9366. +int drm_ddc_connector_add(struct drm_device *drm,
  9367. + struct drm_ddc_connector *ddc_conn, int connector_type)
  9368. +{
  9369. + drm_connector_init(drm, &ddc_conn->connector, &drm_ddc_connector_funcs,
  9370. + connector_type);
  9371. + return 0;
  9372. +}
  9373. +EXPORT_SYMBOL_GPL(drm_ddc_connector_add);
  9374. +
  9375. +struct drm_ddc_connector *drm_ddc_connector_create(struct drm_device *drm,
  9376. + struct device_node *np, void *private)
  9377. +{
  9378. + struct drm_ddc_connector *ddc_conn;
  9379. + struct device_node *ddc_node;
  9380. +
  9381. + ddc_conn = devm_kzalloc(drm->dev, sizeof(*ddc_conn), GFP_KERNEL);
  9382. + if (!ddc_conn)
  9383. + return ERR_PTR(-ENOMEM);
  9384. +
  9385. + ddc_conn->private = private;
  9386. +
  9387. + ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
  9388. + if (ddc_node) {
  9389. + ddc_conn->ddc = of_find_i2c_adapter_by_node(ddc_node);
  9390. + of_node_put(ddc_node);
  9391. + if (!ddc_conn->ddc)
  9392. + return ERR_PTR(-EPROBE_DEFER);
  9393. + }
  9394. +
  9395. + return ddc_conn;
  9396. +}
  9397. +EXPORT_SYMBOL_GPL(drm_ddc_connector_create);
  9398. +
  9399. +MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
  9400. +MODULE_DESCRIPTION("Generic DRM DDC connector module");
  9401. +MODULE_LICENSE("GPL v2");
  9402. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/drm-ddc-connector.h linux-3.15-rc6/drivers/staging/imx-drm/drm-ddc-connector.h
  9403. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/drm-ddc-connector.h 1970-01-01 01:00:00.000000000 +0100
  9404. +++ linux-3.15-rc6/drivers/staging/imx-drm/drm-ddc-connector.h 2014-05-23 11:26:48.312940058 +0200
  9405. @@ -0,0 +1,26 @@
  9406. +#ifndef DRM_DDC_CONNECTOR_H
  9407. +#define DRM_DDC_CONNECTOR_H
  9408. +
  9409. +struct drm_ddc_connector {
  9410. + struct i2c_adapter *ddc;
  9411. + struct drm_connector connector;
  9412. + enum drm_connector_status (*detect)(struct drm_connector *, bool);
  9413. + void *private;
  9414. +};
  9415. +
  9416. +#define to_ddc_conn(c) container_of(c, struct drm_ddc_connector, connector)
  9417. +
  9418. +int drm_ddc_connector_get_modes(struct drm_connector *connector);
  9419. +int drm_ddc_connector_add(struct drm_device *drm,
  9420. + struct drm_ddc_connector *ddc_conn, int connector_type);
  9421. +struct drm_ddc_connector *drm_ddc_connector_create(struct drm_device *drm,
  9422. + struct device_node *np, void *private);
  9423. +
  9424. +static inline void *drm_ddc_private(struct drm_connector *connector)
  9425. +{
  9426. + struct drm_ddc_connector *ddc_conn = to_ddc_conn(connector);
  9427. +
  9428. + return ddc_conn->private;
  9429. +}
  9430. +
  9431. +#endif
  9432. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/dw-hdmi-audio.c linux-3.15-rc6/drivers/staging/imx-drm/dw-hdmi-audio.c
  9433. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/dw-hdmi-audio.c 1970-01-01 01:00:00.000000000 +0100
  9434. +++ linux-3.15-rc6/drivers/staging/imx-drm/dw-hdmi-audio.c 2014-05-23 11:26:48.316940071 +0200
  9435. @@ -0,0 +1,654 @@
  9436. +/*
  9437. + * DesignWare HDMI audio driver
  9438. + *
  9439. + * This program is free software; you can redistribute it and/or modify
  9440. + * it under the terms of the GNU General Public License version 2 as
  9441. + * published by the Free Software Foundation.
  9442. + *
  9443. + * Written and tested against the (alleged) DW HDMI Tx found in iMX6S.
  9444. + */
  9445. +#include <linux/delay.h>
  9446. +#include <linux/io.h>
  9447. +#include <linux/interrupt.h>
  9448. +#include <linux/module.h>
  9449. +#include <linux/platform_device.h>
  9450. +
  9451. +#include <sound/asoundef.h>
  9452. +#include <sound/core.h>
  9453. +#include <sound/initval.h>
  9454. +#include <sound/pcm.h>
  9455. +
  9456. +#include "dw-hdmi-audio.h"
  9457. +
  9458. +#define DRIVER_NAME "dw-hdmi-audio"
  9459. +
  9460. +/* Provide some bits rather than bit offsets */
  9461. +enum {
  9462. + HDMI_AHB_DMA_CONF0_SW_FIFO_RST = BIT(7),
  9463. + HDMI_AHB_DMA_CONF0_EN_HLOCK = BIT(3),
  9464. + HDMI_AHB_DMA_START_START = BIT(0),
  9465. + HDMI_AHB_DMA_STOP_STOP = BIT(0),
  9466. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR = BIT(5),
  9467. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST = BIT(4),
  9468. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY = BIT(3),
  9469. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE = BIT(2),
  9470. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL = BIT(1),
  9471. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0),
  9472. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL =
  9473. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR |
  9474. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST |
  9475. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY |
  9476. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE |
  9477. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL |
  9478. + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY,
  9479. + HDMI_IH_AHBDMAAUD_STAT0_ERROR = BIT(5),
  9480. + HDMI_IH_AHBDMAAUD_STAT0_LOST = BIT(4),
  9481. + HDMI_IH_AHBDMAAUD_STAT0_RETRY = BIT(3),
  9482. + HDMI_IH_AHBDMAAUD_STAT0_DONE = BIT(2),
  9483. + HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL = BIT(1),
  9484. + HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0),
  9485. + HDMI_IH_AHBDMAAUD_STAT0_ALL =
  9486. + HDMI_IH_AHBDMAAUD_STAT0_ERROR |
  9487. + HDMI_IH_AHBDMAAUD_STAT0_LOST |
  9488. + HDMI_IH_AHBDMAAUD_STAT0_RETRY |
  9489. + HDMI_IH_AHBDMAAUD_STAT0_DONE |
  9490. + HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL |
  9491. + HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY,
  9492. + HDMI_AHB_DMA_CONF0_INCR16 = 2 << 1,
  9493. + HDMI_AHB_DMA_CONF0_INCR8 = 1 << 1,
  9494. + HDMI_AHB_DMA_CONF0_INCR4 = 0,
  9495. + HDMI_AHB_DMA_CONF0_BURST_MODE = BIT(0),
  9496. + HDMI_AHB_DMA_MASK_DONE = BIT(7),
  9497. + HDMI_REVISION_ID = 0x0001,
  9498. + HDMI_IH_AHBDMAAUD_STAT0 = 0x0109,
  9499. + HDMI_IH_MUTE_AHBDMAAUD_STAT0 = 0x0189,
  9500. + HDMI_AUD_N1 = 0x3200,
  9501. + HDMI_AUD_CTS1 = 0x3203,
  9502. + HDMI_AHB_DMA_CONF0 = 0x3600,
  9503. + HDMI_AHB_DMA_START = 0x3601,
  9504. + HDMI_AHB_DMA_STOP = 0x3602,
  9505. + HDMI_AHB_DMA_THRSLD = 0x3603,
  9506. + HDMI_AHB_DMA_STRADDR0 = 0x3604,
  9507. + HDMI_AHB_DMA_STPADDR0 = 0x3608,
  9508. + HDMI_AHB_DMA_STAT = 0x3612,
  9509. + HDMI_AHB_DMA_STAT_FULL = BIT(1),
  9510. + HDMI_AHB_DMA_MASK = 0x3614,
  9511. + HDMI_AHB_DMA_POL = 0x3615,
  9512. + HDMI_AHB_DMA_CONF1 = 0x3616,
  9513. + HDMI_AHB_DMA_BUFFPOL = 0x361a,
  9514. +};
  9515. +
  9516. +struct snd_dw_hdmi {
  9517. + struct snd_card *card;
  9518. + struct snd_pcm *pcm;
  9519. + struct dw_hdmi_audio_data data;
  9520. + struct snd_pcm_substream *substream;
  9521. + void (*reformat)(struct snd_dw_hdmi *, size_t, size_t);
  9522. + void *buf_src;
  9523. + void *buf_dst;
  9524. + dma_addr_t buf_addr;
  9525. + unsigned buf_offset;
  9526. + unsigned buf_period;
  9527. + unsigned buf_size;
  9528. + unsigned channels;
  9529. + uint8_t revision;
  9530. + uint8_t iec_offset;
  9531. + uint8_t cs[192][8];
  9532. +};
  9533. +
  9534. +static void dw_hdmi_writel(unsigned long val, void __iomem *ptr)
  9535. +{
  9536. + writeb_relaxed(val, ptr);
  9537. + writeb_relaxed(val >> 8, ptr + 1);
  9538. + writeb_relaxed(val >> 16, ptr + 2);
  9539. + writeb_relaxed(val >> 24, ptr + 3);
  9540. +}
  9541. +
  9542. +/*
  9543. + * Convert to hardware format: The userspace buffer contains IEC958 samples,
  9544. + * with the PCUV bits in bits 31..28 and audio samples in bits 27..4. We
  9545. + * need these to be in bits 27..24, with the IEC B bit in bit 28, and audio
  9546. + * samples in 23..0.
  9547. + *
  9548. + * Default preamble in bits 3..0: 8 = block start, 4 = even 2 = odd
  9549. + *
  9550. + * Ideally, we could do with having the data properly formatted in userspace.
  9551. + */
  9552. +static void dw_hdmi_reformat_iec958(struct snd_dw_hdmi *dw,
  9553. + size_t offset, size_t bytes)
  9554. +{
  9555. + uint32_t *src = dw->buf_src + offset;
  9556. + uint32_t *dst = dw->buf_dst + offset;
  9557. + uint32_t *end = dw->buf_src + offset + bytes;
  9558. +
  9559. + do {
  9560. + uint32_t b, sample = *src++;
  9561. +
  9562. + b = (sample & 8) << (28 - 3);
  9563. +
  9564. + sample >>= 4;
  9565. +
  9566. + *dst++ = sample | b;
  9567. + } while (src < end);
  9568. +}
  9569. +
  9570. +static uint32_t parity(uint32_t sample)
  9571. +{
  9572. + sample ^= sample >> 16;
  9573. + sample ^= sample >> 8;
  9574. + sample ^= sample >> 4;
  9575. + sample ^= sample >> 2;
  9576. + sample ^= sample >> 1;
  9577. + return (sample & 1) << 27;
  9578. +}
  9579. +
  9580. +static void dw_hdmi_reformat_s24(struct snd_dw_hdmi *dw,
  9581. + size_t offset, size_t bytes)
  9582. +{
  9583. + uint32_t *src = dw->buf_src + offset;
  9584. + uint32_t *dst = dw->buf_dst + offset;
  9585. + uint32_t *end = dw->buf_src + offset + bytes;
  9586. +
  9587. + do {
  9588. + unsigned i;
  9589. + uint8_t *cs;
  9590. +
  9591. + cs = dw->cs[dw->iec_offset++];
  9592. + if (dw->iec_offset >= 192)
  9593. + dw->iec_offset = 0;
  9594. +
  9595. + i = dw->channels;
  9596. + do {
  9597. + uint32_t sample = *src++;
  9598. +
  9599. + sample &= ~0xff000000;
  9600. + sample |= *cs++ << 24;
  9601. + sample |= parity(sample & ~0xf8000000);
  9602. +
  9603. + *dst++ = sample;
  9604. + } while (--i);
  9605. + } while (src < end);
  9606. +}
  9607. +
  9608. +static void dw_hdmi_create_cs(struct snd_dw_hdmi *dw,
  9609. + struct snd_pcm_runtime *runtime)
  9610. +{
  9611. + uint8_t cs[4];
  9612. + unsigned ch, i, j;
  9613. +
  9614. + cs[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_NONE;
  9615. + cs[1] = IEC958_AES1_CON_GENERAL;
  9616. + cs[2] = IEC958_AES2_CON_SOURCE_UNSPEC;
  9617. + cs[3] = IEC958_AES3_CON_CLOCK_1000PPM;
  9618. +
  9619. + switch (runtime->rate) {
  9620. + case 32000:
  9621. + cs[3] |= IEC958_AES3_CON_FS_32000;
  9622. + break;
  9623. + case 44100:
  9624. + cs[3] |= IEC958_AES3_CON_FS_44100;
  9625. + break;
  9626. + case 48000:
  9627. + cs[3] |= IEC958_AES3_CON_FS_48000;
  9628. + break;
  9629. + case 88200:
  9630. + cs[3] |= IEC958_AES3_CON_FS_88200;
  9631. + break;
  9632. + case 96000:
  9633. + cs[3] |= IEC958_AES3_CON_FS_96000;
  9634. + break;
  9635. + case 176400:
  9636. + cs[3] |= IEC958_AES3_CON_FS_176400;
  9637. + break;
  9638. + case 192000:
  9639. + cs[3] |= IEC958_AES3_CON_FS_192000;
  9640. + break;
  9641. + }
  9642. +
  9643. + memset(dw->cs, 0, sizeof(dw->cs));
  9644. +
  9645. + for (ch = 0; ch < 8; ch++) {
  9646. + cs[2] &= ~IEC958_AES2_CON_CHANNEL;
  9647. + cs[2] |= (ch + 1) << 4;
  9648. +
  9649. + for (i = 0; i < ARRAY_SIZE(cs); i++) {
  9650. + unsigned c = cs[i];
  9651. +
  9652. + for (j = 0; j < 8; j++, c >>= 1)
  9653. + dw->cs[i * 8 + j][ch] = (c & 1) << 2;
  9654. + }
  9655. + }
  9656. + dw->cs[0][0] |= BIT(4);
  9657. +}
  9658. +
  9659. +static void dw_hdmi_start_dma(struct snd_dw_hdmi *dw)
  9660. +{
  9661. + void __iomem *base = dw->data.base;
  9662. + unsigned offset = dw->buf_offset;
  9663. + unsigned period = dw->buf_period;
  9664. + u32 start, stop;
  9665. +
  9666. + dw->reformat(dw, offset, period);
  9667. +
  9668. + /* Clear all irqs before enabling irqs and starting DMA */
  9669. + writeb_relaxed(HDMI_IH_AHBDMAAUD_STAT0_ALL,
  9670. + base + HDMI_IH_AHBDMAAUD_STAT0);
  9671. +
  9672. + start = dw->buf_addr + offset;
  9673. + stop = start + period - 1;
  9674. +
  9675. + /* Setup the hardware start/stop addresses */
  9676. + dw_hdmi_writel(start, base + HDMI_AHB_DMA_STRADDR0);
  9677. + dw_hdmi_writel(stop, base + HDMI_AHB_DMA_STPADDR0);
  9678. +
  9679. + writeb_relaxed((u8)~HDMI_AHB_DMA_MASK_DONE, base + HDMI_AHB_DMA_MASK);
  9680. + writeb(HDMI_AHB_DMA_START_START, base + HDMI_AHB_DMA_START);
  9681. +
  9682. + offset += period;
  9683. + if (offset >= dw->buf_size)
  9684. + offset = 0;
  9685. + dw->buf_offset = offset;
  9686. +}
  9687. +
  9688. +static void dw_hdmi_stop_dma(struct snd_dw_hdmi *dw)
  9689. +{
  9690. + dw->substream = NULL;
  9691. +
  9692. + /* Disable interrupts before disabling DMA */
  9693. + writeb_relaxed(~0, dw->data.base + HDMI_AHB_DMA_MASK);
  9694. + writeb_relaxed(HDMI_AHB_DMA_STOP_STOP, dw->data.base + HDMI_AHB_DMA_STOP);
  9695. +}
  9696. +
  9697. +static irqreturn_t snd_dw_hdmi_irq(int irq, void *data)
  9698. +{
  9699. + struct snd_dw_hdmi *dw = data;
  9700. + struct snd_pcm_substream *substream;
  9701. + unsigned stat;
  9702. +
  9703. + stat = readb_relaxed(dw->data.base + HDMI_IH_AHBDMAAUD_STAT0);
  9704. + if (!stat)
  9705. + return IRQ_NONE;
  9706. +
  9707. + writeb_relaxed(stat, dw->data.base + HDMI_IH_AHBDMAAUD_STAT0);
  9708. +
  9709. + substream = dw->substream;
  9710. + if (stat & HDMI_IH_AHBDMAAUD_STAT0_DONE && substream) {
  9711. + snd_pcm_period_elapsed(substream);
  9712. + if (dw->substream)
  9713. + dw_hdmi_start_dma(dw);
  9714. + }
  9715. +
  9716. + return IRQ_HANDLED;
  9717. +}
  9718. +
  9719. +static struct snd_pcm_hardware dw_hdmi_hw = {
  9720. + .info = SNDRV_PCM_INFO_INTERLEAVED |
  9721. + SNDRV_PCM_INFO_BLOCK_TRANSFER |
  9722. + SNDRV_PCM_INFO_MMAP |
  9723. + SNDRV_PCM_INFO_MMAP_VALID,
  9724. + .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE |
  9725. + SNDRV_PCM_FMTBIT_S24_LE,
  9726. + .rates = SNDRV_PCM_RATE_32000 |
  9727. + SNDRV_PCM_RATE_44100 |
  9728. + SNDRV_PCM_RATE_48000 |
  9729. + SNDRV_PCM_RATE_88200 |
  9730. + SNDRV_PCM_RATE_96000 |
  9731. + SNDRV_PCM_RATE_176400 |
  9732. + SNDRV_PCM_RATE_192000,
  9733. + .channels_min = 2,
  9734. + .channels_max = 8,
  9735. + .buffer_bytes_max = 64 * 1024,
  9736. + .period_bytes_min = 256,
  9737. + .period_bytes_max = 8192, /* ERR004323: must limit to 8k */
  9738. + .periods_min = 2,
  9739. + .periods_max = 16,
  9740. + .fifo_size = 0,
  9741. +};
  9742. +
  9743. +static unsigned rates_mask[] = {
  9744. + SNDRV_PCM_RATE_32000,
  9745. + SNDRV_PCM_RATE_44100,
  9746. + SNDRV_PCM_RATE_48000,
  9747. + SNDRV_PCM_RATE_88200,
  9748. + SNDRV_PCM_RATE_96000,
  9749. + SNDRV_PCM_RATE_176400,
  9750. + SNDRV_PCM_RATE_192000,
  9751. +};
  9752. +
  9753. +static void dw_hdmi_parse_eld(struct snd_dw_hdmi *dw,
  9754. + struct snd_pcm_runtime *runtime)
  9755. +{
  9756. + u8 *sad, *eld = dw->data.eld;
  9757. + unsigned eld_ver, mnl, sad_count, rates, rate_mask, i;
  9758. + unsigned max_channels;
  9759. +
  9760. + eld_ver = eld[0] >> 3;
  9761. + if (eld_ver != 2 && eld_ver != 31)
  9762. + return;
  9763. +
  9764. + mnl = eld[4] & 0x1f;
  9765. + if (mnl > 16)
  9766. + return;
  9767. +
  9768. + sad_count = eld[5] >> 4;
  9769. + sad = eld + 20 + mnl;
  9770. +
  9771. + /* Start from the basic audio settings */
  9772. + max_channels = 2;
  9773. + rates = 7;
  9774. + while (sad_count > 0) {
  9775. + switch (sad[0] & 0x78) {
  9776. + case 0x08: /* PCM */
  9777. + max_channels = max(max_channels, (sad[0] & 7) + 1u);
  9778. + rates |= sad[1];
  9779. + break;
  9780. + }
  9781. + sad += 3;
  9782. + sad_count -= 1;
  9783. + }
  9784. +
  9785. + for (rate_mask = i = 0; i < ARRAY_SIZE(rates_mask); i++)
  9786. + if (rates & 1 << i)
  9787. + rate_mask |= rates_mask[i];
  9788. +
  9789. + runtime->hw.rates &= rate_mask;
  9790. + runtime->hw.channels_max = min(runtime->hw.channels_max, max_channels);
  9791. +}
  9792. +
  9793. +static int dw_hdmi_open(struct snd_pcm_substream *substream)
  9794. +{
  9795. + struct snd_pcm_runtime *runtime = substream->runtime;
  9796. + struct snd_dw_hdmi *dw = substream->private_data;
  9797. + void __iomem *base = dw->data.base;
  9798. + int ret;
  9799. +
  9800. + /* Clear FIFO */
  9801. + writeb_relaxed(HDMI_AHB_DMA_CONF0_SW_FIFO_RST,
  9802. + base + HDMI_AHB_DMA_CONF0);
  9803. +
  9804. + /* Configure interrupt polarities */
  9805. + writeb_relaxed(~0, base + HDMI_AHB_DMA_POL);
  9806. + writeb_relaxed(~0, base + HDMI_AHB_DMA_BUFFPOL);
  9807. +
  9808. + /* Keep interrupts masked, and clear any pending */
  9809. + writeb_relaxed(~0, base + HDMI_AHB_DMA_MASK);
  9810. + writeb_relaxed(~0, base + HDMI_IH_AHBDMAAUD_STAT0);
  9811. +
  9812. + ret = request_irq(dw->data.irq, snd_dw_hdmi_irq, IRQF_SHARED,
  9813. + "dw-hdmi-audio", dw);
  9814. + if (ret)
  9815. + return ret;
  9816. +
  9817. + /* Un-mute done interrupt */
  9818. + writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL &
  9819. + ~HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE,
  9820. + base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
  9821. +
  9822. + runtime->hw = dw_hdmi_hw;
  9823. + dw_hdmi_parse_eld(dw, runtime);
  9824. + snd_pcm_limit_hw_rates(runtime);
  9825. +
  9826. + return 0;
  9827. +}
  9828. +
  9829. +static int dw_hdmi_close(struct snd_pcm_substream *substream)
  9830. +{
  9831. + struct snd_dw_hdmi *dw = substream->private_data;
  9832. +
  9833. + /* Mute all interrupts */
  9834. + writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL,
  9835. + dw->data.base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
  9836. +
  9837. + free_irq(dw->data.irq, dw);
  9838. +
  9839. + return 0;
  9840. +}
  9841. +
  9842. +static int dw_hdmi_hw_free(struct snd_pcm_substream *substream)
  9843. +{
  9844. + return snd_pcm_lib_free_vmalloc_buffer(substream);
  9845. +}
  9846. +
  9847. +static int dw_hdmi_hw_params(struct snd_pcm_substream *substream,
  9848. + struct snd_pcm_hw_params *params)
  9849. +{
  9850. + return snd_pcm_lib_alloc_vmalloc_buffer(substream,
  9851. + params_buffer_bytes(params));
  9852. +}
  9853. +
  9854. +static int dw_hdmi_prepare(struct snd_pcm_substream *substream)
  9855. +{
  9856. + struct snd_pcm_runtime *runtime = substream->runtime;
  9857. + struct snd_dw_hdmi *dw = substream->private_data;
  9858. + uint8_t threshold, conf0, conf1;
  9859. +
  9860. + /* Setup as per 3.0.5 FSL 4.1.0 BSP */
  9861. + switch (dw->revision) {
  9862. + case 0x0a:
  9863. + conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE |
  9864. + HDMI_AHB_DMA_CONF0_INCR4;
  9865. + if (runtime->channels == 2)
  9866. + threshold = 126;
  9867. + else
  9868. + threshold = 124;
  9869. + break;
  9870. + case 0x1a:
  9871. + conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE |
  9872. + HDMI_AHB_DMA_CONF0_INCR8;
  9873. + threshold = 128;
  9874. + break;
  9875. + default:
  9876. + /* NOTREACHED */
  9877. + return -EINVAL;
  9878. + }
  9879. +
  9880. + dw->data.set_sample_rate(dw->data.hdmi, runtime->rate);
  9881. +
  9882. + /* Minimum number of bytes in the fifo. */
  9883. + runtime->hw.fifo_size = threshold * 32;
  9884. +
  9885. + conf0 |= HDMI_AHB_DMA_CONF0_EN_HLOCK;
  9886. + conf1 = (1 << runtime->channels) - 1;
  9887. +
  9888. + writeb_relaxed(threshold, dw->data.base + HDMI_AHB_DMA_THRSLD);
  9889. + writeb_relaxed(conf0, dw->data.base + HDMI_AHB_DMA_CONF0);
  9890. + writeb_relaxed(conf1, dw->data.base + HDMI_AHB_DMA_CONF1);
  9891. +
  9892. + switch (runtime->format) {
  9893. + case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
  9894. + dw->reformat = dw_hdmi_reformat_iec958;
  9895. + break;
  9896. + case SNDRV_PCM_FORMAT_S24_LE:
  9897. + dw_hdmi_create_cs(dw, runtime);
  9898. + dw->reformat = dw_hdmi_reformat_s24;
  9899. + break;
  9900. + }
  9901. + dw->iec_offset = 0;
  9902. + dw->channels = runtime->channels;
  9903. + dw->buf_src = runtime->dma_area;
  9904. + dw->buf_dst = substream->dma_buffer.area;
  9905. + dw->buf_addr = substream->dma_buffer.addr;
  9906. + dw->buf_period = snd_pcm_lib_period_bytes(substream);
  9907. + dw->buf_size = snd_pcm_lib_buffer_bytes(substream);
  9908. +
  9909. + return 0;
  9910. +}
  9911. +
  9912. +static int dw_hdmi_trigger(struct snd_pcm_substream *substream, int cmd)
  9913. +{
  9914. + struct snd_dw_hdmi *dw = substream->private_data;
  9915. + void __iomem *base = dw->data.base;
  9916. + unsigned n[3], cts[3];
  9917. + int ret = 0, i;
  9918. + bool err005174;
  9919. +
  9920. + switch (cmd) {
  9921. + case SNDRV_PCM_TRIGGER_START:
  9922. + err005174 = dw->revision == 0x0a;
  9923. + if (err005174) {
  9924. + for (i = 2; i >= 1; i--) {
  9925. + n[i] = readb_relaxed(base + HDMI_AUD_N1 + i);
  9926. + cts[i] = readb_relaxed(base + HDMI_AUD_CTS1 + i);
  9927. + writeb_relaxed(0, base + HDMI_AUD_N1 + i);
  9928. + writeb_relaxed(0, base + HDMI_AUD_CTS1 + i);
  9929. + }
  9930. + }
  9931. +
  9932. + dw->buf_offset = 0;
  9933. + dw->substream = substream;
  9934. + dw_hdmi_start_dma(dw);
  9935. +
  9936. + if (err005174) {
  9937. + for (i = 2; i >= 1; i--)
  9938. + writeb_relaxed(cts[i], base + HDMI_AUD_CTS1 + i);
  9939. + for (i = 2; i >= 1; i--)
  9940. + writeb_relaxed(n[i], base + HDMI_AUD_N1 + i);
  9941. + }
  9942. +
  9943. + substream->runtime->delay = substream->runtime->period_size;
  9944. + break;
  9945. +
  9946. + case SNDRV_PCM_TRIGGER_STOP:
  9947. + dw_hdmi_stop_dma(dw);
  9948. + break;
  9949. +
  9950. + default:
  9951. + ret = -EINVAL;
  9952. + break;
  9953. + }
  9954. +
  9955. + return ret;
  9956. +}
  9957. +
  9958. +static snd_pcm_uframes_t dw_hdmi_pointer(struct snd_pcm_substream *substream)
  9959. +{
  9960. + struct snd_pcm_runtime *runtime = substream->runtime;
  9961. + struct snd_dw_hdmi *dw = substream->private_data;
  9962. +
  9963. + return bytes_to_frames(runtime, dw->buf_offset);
  9964. +}
  9965. +
  9966. +static struct snd_pcm_ops snd_dw_hdmi_ops = {
  9967. + .open = dw_hdmi_open,
  9968. + .close = dw_hdmi_close,
  9969. + .ioctl = snd_pcm_lib_ioctl,
  9970. + .hw_params = dw_hdmi_hw_params,
  9971. + .hw_free = dw_hdmi_hw_free,
  9972. + .prepare = dw_hdmi_prepare,
  9973. + .trigger = dw_hdmi_trigger,
  9974. + .pointer = dw_hdmi_pointer,
  9975. + .page = snd_pcm_lib_get_vmalloc_page,
  9976. +};
  9977. +
  9978. +static int snd_dw_hdmi_probe(struct platform_device *pdev)
  9979. +{
  9980. + const struct dw_hdmi_audio_data *data = pdev->dev.platform_data;
  9981. + struct device *dev = pdev->dev.parent;
  9982. + struct snd_dw_hdmi *dw;
  9983. + struct snd_card *card;
  9984. + struct snd_pcm *pcm;
  9985. + unsigned revision;
  9986. + int ret;
  9987. +
  9988. + writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL,
  9989. + data->base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
  9990. + revision = readb_relaxed(data->base + HDMI_REVISION_ID);
  9991. + if (revision != 0x0a && revision != 0x1a) {
  9992. + dev_err(dev, "dw-hdmi-audio: unknown revision 0x%02x\n",
  9993. + revision);
  9994. + return -ENXIO;
  9995. + }
  9996. +
  9997. + ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
  9998. + THIS_MODULE, sizeof(struct snd_dw_hdmi), &card);
  9999. + if (ret < 0)
  10000. + return ret;
  10001. +
  10002. + snd_card_set_dev(card, dev);
  10003. +
  10004. + strlcpy(card->driver, DRIVER_NAME, sizeof(card->driver));
  10005. + strlcpy(card->shortname, "DW-HDMI", sizeof(card->shortname));
  10006. + snprintf(card->longname, sizeof(card->longname),
  10007. + "%s rev 0x%02x, irq %d", card->shortname, revision,
  10008. + data->irq);
  10009. +
  10010. + dw = card->private_data;
  10011. + dw->card = card;
  10012. + dw->data = *data;
  10013. + dw->revision = revision;
  10014. +
  10015. + ret = snd_pcm_new(card, "DW HDMI", 0, 1, 0, &pcm);
  10016. + if (ret < 0)
  10017. + goto err;
  10018. +
  10019. + dw->pcm = pcm;
  10020. + pcm->private_data = dw;
  10021. + strlcpy(pcm->name, DRIVER_NAME, sizeof(pcm->name));
  10022. + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_dw_hdmi_ops);
  10023. +
  10024. + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
  10025. + dev, 64 * 1024, 64 * 1024);
  10026. +
  10027. + ret = snd_card_register(card);
  10028. + if (ret < 0)
  10029. + goto err;
  10030. +
  10031. + platform_set_drvdata(pdev, dw);
  10032. +
  10033. + return 0;
  10034. +
  10035. +err:
  10036. + snd_card_free(card);
  10037. + return ret;
  10038. +}
  10039. +
  10040. +static int snd_dw_hdmi_remove(struct platform_device *pdev)
  10041. +{
  10042. + struct snd_dw_hdmi *dw = platform_get_drvdata(pdev);
  10043. +
  10044. + snd_card_free(dw->card);
  10045. +
  10046. + return 0;
  10047. +}
  10048. +
  10049. +#ifdef CONFIG_PM_SLEEP
  10050. +static int snd_dw_hdmi_suspend(struct device *dev)
  10051. +{
  10052. + struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
  10053. +
  10054. + snd_power_change_state(dw->card, SNDRV_CTL_POWER_D3cold);
  10055. + snd_pcm_suspend_all(dw->pcm);
  10056. +
  10057. + return 0;
  10058. +}
  10059. +
  10060. +static int snd_dw_hdmi_resume(struct device *dev)
  10061. +{
  10062. + struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
  10063. +
  10064. + snd_power_change_state(dw->card, SNDRV_CTL_POWER_D0);
  10065. +
  10066. + return 0;
  10067. +}
  10068. +
  10069. +static SIMPLE_DEV_PM_OPS(snd_dw_hdmi_pm, snd_dw_hdmi_suspend,
  10070. + snd_dw_hdmi_resume);
  10071. +#define PM_OPS &snd_dw_hdmi_pm
  10072. +#else
  10073. +#define PM_OPS NULL
  10074. +#endif
  10075. +
  10076. +static struct platform_driver snd_dw_hdmi_driver = {
  10077. + .probe = snd_dw_hdmi_probe,
  10078. + .remove = snd_dw_hdmi_remove,
  10079. + .driver = {
  10080. + .name = "dw-hdmi-audio",
  10081. + .owner = THIS_MODULE,
  10082. + .pm = PM_OPS,
  10083. + },
  10084. +};
  10085. +
  10086. +module_platform_driver(snd_dw_hdmi_driver);
  10087. +
  10088. +MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
  10089. +MODULE_LICENSE("GPL");
  10090. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/dw-hdmi-audio.h linux-3.15-rc6/drivers/staging/imx-drm/dw-hdmi-audio.h
  10091. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/dw-hdmi-audio.h 1970-01-01 01:00:00.000000000 +0100
  10092. +++ linux-3.15-rc6/drivers/staging/imx-drm/dw-hdmi-audio.h 2014-05-23 11:26:48.316940071 +0200
  10093. @@ -0,0 +1,15 @@
  10094. +#ifndef DW_HDMI_AUDIO_H
  10095. +#define DW_HDMI_AUDIO_H
  10096. +
  10097. +struct imx_hdmi;
  10098. +
  10099. +struct dw_hdmi_audio_data {
  10100. + phys_addr_t phys;
  10101. + void __iomem *base;
  10102. + int irq;
  10103. + struct imx_hdmi *hdmi;
  10104. + u8 *eld;
  10105. + void (*set_sample_rate)(struct imx_hdmi *, unsigned);
  10106. +};
  10107. +
  10108. +#endif
  10109. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/dw-hdmi-cec.c linux-3.15-rc6/drivers/staging/imx-drm/dw-hdmi-cec.c
  10110. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/dw-hdmi-cec.c 1970-01-01 01:00:00.000000000 +0100
  10111. +++ linux-3.15-rc6/drivers/staging/imx-drm/dw-hdmi-cec.c 2014-05-23 11:26:48.316940071 +0200
  10112. @@ -0,0 +1,205 @@
  10113. +/* http://git.freescale.com/git/cgit.cgi/imx/linux-2.6-imx.git/tree/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c?h=imx_3.0.35_4.1.0 */
  10114. +#include <linux/cec-dev.h>
  10115. +#include <linux/interrupt.h>
  10116. +#include <linux/io.h>
  10117. +#include <linux/module.h>
  10118. +#include <linux/platform_device.h>
  10119. +#include <linux/sched.h>
  10120. +#include <linux/slab.h>
  10121. +
  10122. +#include "imx-hdmi.h"
  10123. +#include "dw-hdmi-cec.h"
  10124. +
  10125. +#define DEV_NAME "mxc_hdmi_cec"
  10126. +
  10127. +enum {
  10128. + CEC_STAT_DONE = BIT(0),
  10129. + CEC_STAT_EOM = BIT(1),
  10130. + CEC_STAT_NACK = BIT(2),
  10131. + CEC_STAT_ARBLOST = BIT(3),
  10132. + CEC_STAT_ERROR_INIT = BIT(4),
  10133. + CEC_STAT_ERROR_FOLL = BIT(5),
  10134. + CEC_STAT_WAKEUP = BIT(6),
  10135. +
  10136. + CEC_CTRL_START = BIT(0),
  10137. + CEC_CTRL_NORMAL = 1 << 1,
  10138. +};
  10139. +
  10140. +struct dw_hdmi_cec {
  10141. + struct cec_dev cec;
  10142. +
  10143. + struct device *dev;
  10144. + void __iomem *base;
  10145. + const struct dw_hdmi_cec_ops *ops;
  10146. + void *ops_data;
  10147. + int irq;
  10148. +};
  10149. +
  10150. +static void dw_hdmi_set_address(struct cec_dev *cec_dev, unsigned addresses)
  10151. +{
  10152. + struct dw_hdmi_cec *cec = container_of(cec_dev, struct dw_hdmi_cec, cec);
  10153. +
  10154. + writeb(addresses & 255, cec->base + HDMI_CEC_ADDR_L);
  10155. + writeb(addresses >> 8, cec->base + HDMI_CEC_ADDR_H);
  10156. +}
  10157. +
  10158. +static void dw_hdmi_send_message(struct cec_dev *cec_dev, u8 *msg,
  10159. + size_t count)
  10160. +{
  10161. + struct dw_hdmi_cec *cec = container_of(cec_dev, struct dw_hdmi_cec, cec);
  10162. + unsigned i;
  10163. +
  10164. + for (i = 0; i < count; i++)
  10165. + writeb(msg[i], cec->base + HDMI_CEC_TX_DATA0 + i);
  10166. +
  10167. + writeb(count, cec->base + HDMI_CEC_TX_CNT);
  10168. + writeb(CEC_CTRL_NORMAL | CEC_CTRL_START, cec->base + HDMI_CEC_CTRL);
  10169. +}
  10170. +
  10171. +static irqreturn_t dw_hdmi_cec_irq(int irq, void *data)
  10172. +{
  10173. + struct dw_hdmi_cec *cec = data;
  10174. + struct cec_dev *cec_dev = &cec->cec;
  10175. + unsigned stat = readb(cec->base + HDMI_IH_CEC_STAT0);
  10176. +
  10177. + if (stat == 0)
  10178. + return IRQ_NONE;
  10179. +
  10180. + writeb(stat, cec->base + HDMI_IH_CEC_STAT0);
  10181. +
  10182. + if (stat & CEC_STAT_ERROR_INIT) {
  10183. + if (cec->cec.retries) {
  10184. + unsigned v = readb(cec->base + HDMI_CEC_CTRL);
  10185. + writeb(v | CEC_CTRL_START, cec->base + HDMI_CEC_CTRL);
  10186. + cec->cec.retries -= 1;
  10187. + } else {
  10188. + cec->cec.write_busy = 0;
  10189. + cec_dev_event(cec_dev, MESSAGE_TYPE_SEND_ERROR, NULL, 0);
  10190. + }
  10191. + } else if (stat & (CEC_STAT_DONE | CEC_STAT_NACK))
  10192. + cec_dev_send_complete(cec_dev, stat & CEC_STAT_DONE);
  10193. +
  10194. + if (stat & CEC_STAT_EOM) {
  10195. + unsigned len, i;
  10196. + u8 msg[MAX_MESSAGE_LEN];
  10197. +
  10198. + len = readb(cec->base + HDMI_CEC_RX_CNT);
  10199. + if (len > sizeof(msg))
  10200. + len = sizeof(msg);
  10201. +
  10202. + for (i = 0; i < len; i++)
  10203. + msg[i] = readb(cec->base + HDMI_CEC_RX_DATA0 + i);
  10204. +
  10205. + writeb(0, cec->base + HDMI_CEC_LOCK);
  10206. +
  10207. + cec_dev_receive(cec_dev, msg, len);
  10208. + }
  10209. +
  10210. + return IRQ_HANDLED;
  10211. +}
  10212. +EXPORT_SYMBOL(dw_hdmi_cec_irq);
  10213. +
  10214. +static void dw_hdmi_cec_release(struct cec_dev *cec_dev)
  10215. +{
  10216. + struct dw_hdmi_cec *cec = container_of(cec_dev, struct dw_hdmi_cec, cec);
  10217. +
  10218. + writeb(~0, cec->base + HDMI_CEC_MASK);
  10219. + writeb(~0, cec->base + HDMI_IH_MUTE_CEC_STAT0);
  10220. + writeb(0, cec->base + HDMI_CEC_POLARITY);
  10221. +
  10222. + free_irq(cec->irq, cec);
  10223. +
  10224. + cec->ops->disable(cec->ops_data);
  10225. +}
  10226. +
  10227. +static int dw_hdmi_cec_open(struct cec_dev *cec_dev)
  10228. +{
  10229. + struct dw_hdmi_cec *cec = container_of(cec_dev, struct dw_hdmi_cec, cec);
  10230. + unsigned irqs;
  10231. + int ret;
  10232. +
  10233. + writeb(0, cec->base + HDMI_CEC_CTRL);
  10234. + writeb(~0, cec->base + HDMI_IH_CEC_STAT0);
  10235. + writeb(0, cec->base + HDMI_CEC_LOCK);
  10236. +
  10237. + ret = request_irq(cec->irq, dw_hdmi_cec_irq, IRQF_SHARED,
  10238. + DEV_NAME, cec);
  10239. + if (ret < 0)
  10240. + return ret;
  10241. +
  10242. + dw_hdmi_set_address(cec_dev, cec_dev->addresses);
  10243. +
  10244. + cec->ops->enable(cec->ops_data);
  10245. +
  10246. + irqs = CEC_STAT_ERROR_INIT | CEC_STAT_NACK | CEC_STAT_EOM |
  10247. + CEC_STAT_DONE;
  10248. + writeb(irqs, cec->base + HDMI_CEC_POLARITY);
  10249. + writeb(~irqs, cec->base + HDMI_CEC_MASK);
  10250. + writeb(~irqs, cec->base + HDMI_IH_MUTE_CEC_STAT0);
  10251. +
  10252. + return 0;
  10253. +}
  10254. +
  10255. +static int dw_hdmi_cec_probe(struct platform_device *pdev)
  10256. +{
  10257. + struct dw_hdmi_cec_data *data = dev_get_platdata(&pdev->dev);
  10258. + struct dw_hdmi_cec *cec;
  10259. +
  10260. + if (!data)
  10261. + return -ENXIO;
  10262. +
  10263. + cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
  10264. + if (!cec)
  10265. + return -ENOMEM;
  10266. +
  10267. + cec->dev = &pdev->dev;
  10268. + cec->base = data->base;
  10269. + cec->irq = data->irq;
  10270. + cec->ops = data->ops;
  10271. + cec->ops_data = data->ops_data;
  10272. + cec->cec.open = dw_hdmi_cec_open;
  10273. + cec->cec.release = dw_hdmi_cec_release;
  10274. + cec->cec.send_message = dw_hdmi_send_message;
  10275. + cec->cec.set_address = dw_hdmi_set_address;
  10276. +
  10277. + cec_dev_init(&cec->cec, THIS_MODULE);
  10278. +
  10279. + /* FIXME: soft-reset the CEC interface */
  10280. +
  10281. + dw_hdmi_set_address(&cec->cec, cec->cec.addresses);
  10282. + writeb(0, cec->base + HDMI_CEC_TX_CNT);
  10283. + writeb(~0, cec->base + HDMI_CEC_MASK);
  10284. + writeb(~0, cec->base + HDMI_IH_MUTE_CEC_STAT0);
  10285. + writeb(0, cec->base + HDMI_CEC_POLARITY);
  10286. +
  10287. + /*
  10288. + * Our device is just a convenience - we want to link to the real
  10289. + * hardware device here, so that userspace can see the association
  10290. + * between the HDMI hardware and its associated CEC chardev.
  10291. + */
  10292. + return cec_dev_add(&cec->cec, cec->dev->parent, DEV_NAME);
  10293. +}
  10294. +
  10295. +static int dw_hdmi_cec_remove(struct platform_device *pdev)
  10296. +{
  10297. + struct dw_hdmi_cec *cec = platform_get_drvdata(pdev);
  10298. +
  10299. + cec_dev_remove(&cec->cec);
  10300. +
  10301. + return 0;
  10302. +}
  10303. +
  10304. +static struct platform_driver dw_hdmi_cec_driver = {
  10305. + .probe = dw_hdmi_cec_probe,
  10306. + .remove = dw_hdmi_cec_remove,
  10307. + .driver = {
  10308. + .name = "dw-hdmi-cec",
  10309. + .owner = THIS_MODULE,
  10310. + },
  10311. +};
  10312. +module_platform_driver(dw_hdmi_cec_driver);
  10313. +
  10314. +MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
  10315. +MODULE_DESCRIPTION("Synopsis Designware HDMI CEC driver for i.MX");
  10316. +MODULE_LICENSE("GPL");
  10317. +MODULE_ALIAS(PLATFORM_MODULE_PREFIX "dw-hdmi-cec");
  10318. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/dw-hdmi-cec.h linux-3.15-rc6/drivers/staging/imx-drm/dw-hdmi-cec.h
  10319. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/dw-hdmi-cec.h 1970-01-01 01:00:00.000000000 +0100
  10320. +++ linux-3.15-rc6/drivers/staging/imx-drm/dw-hdmi-cec.h 2014-05-23 11:26:48.316940071 +0200
  10321. @@ -0,0 +1,16 @@
  10322. +#ifndef DW_HDMI_CEC_H
  10323. +#define DW_HDMI_CEC_H
  10324. +
  10325. +struct dw_hdmi_cec_ops {
  10326. + void (*enable)(void *);
  10327. + void (*disable)(void *);
  10328. +};
  10329. +
  10330. +struct dw_hdmi_cec_data {
  10331. + void __iomem *base;
  10332. + int irq;
  10333. + const struct dw_hdmi_cec_ops *ops;
  10334. + void *ops_data;
  10335. +};
  10336. +
  10337. +#endif
  10338. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/imx-hdmi.c linux-3.15-rc6/drivers/staging/imx-drm/imx-hdmi.c
  10339. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/imx-hdmi.c 2014-05-21 23:42:02.000000000 +0200
  10340. +++ linux-3.15-rc6/drivers/staging/imx-drm/imx-hdmi.c 2014-05-23 11:26:48.316940071 +0200
  10341. @@ -28,6 +28,9 @@
  10342. #include <drm/drm_edid.h>
  10343. #include <drm/drm_encoder_slave.h>
  10344. +#include "drm-ddc-connector.h"
  10345. +#include "dw-hdmi-audio.h"
  10346. +#include "dw-hdmi-cec.h"
  10347. #include "ipu-v3/imx-ipu-v3.h"
  10348. #include "imx-hdmi.h"
  10349. #include "imx-drm.h"
  10350. @@ -112,27 +115,27 @@
  10351. };
  10352. struct imx_hdmi {
  10353. - struct drm_connector connector;
  10354. + struct drm_ddc_connector *ddc_conn;
  10355. struct drm_encoder encoder;
  10356. + struct platform_device *audio;
  10357. + struct platform_device *cec;
  10358. enum imx_hdmi_devtype dev_type;
  10359. struct device *dev;
  10360. struct clk *isfr_clk;
  10361. struct clk *iahb_clk;
  10362. - enum drm_connector_status connector_status;
  10363. -
  10364. struct hdmi_data_info hdmi_data;
  10365. int vic;
  10366. u8 edid[HDMI_EDID_LEN];
  10367. + u8 mc_clkdis;
  10368. bool cable_plugin;
  10369. bool phy_enabled;
  10370. struct drm_display_mode previous_mode;
  10371. struct regmap *regmap;
  10372. - struct i2c_adapter *ddc;
  10373. void __iomem *regs;
  10374. unsigned int sample_rate;
  10375. @@ -362,6 +365,12 @@
  10376. hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock);
  10377. }
  10378. +static void imx_hdmi_set_sample_rate(struct imx_hdmi *hdmi, unsigned rate)
  10379. +{
  10380. + hdmi->sample_rate = rate;
  10381. + hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock);
  10382. +}
  10383. +
  10384. /*
  10385. * this submodule is responsible for the video data synchronization.
  10386. * for example, for RGB 4:4:4 input, the data map is defined as
  10387. @@ -1148,8 +1157,6 @@
  10388. /* HDMI Initialization Step B.4 */
  10389. static void imx_hdmi_enable_video_path(struct imx_hdmi *hdmi)
  10390. {
  10391. - u8 clkdis;
  10392. -
  10393. /* control period minimum duration */
  10394. hdmi_writeb(hdmi, 12, HDMI_FC_CTRLDUR);
  10395. hdmi_writeb(hdmi, 32, HDMI_FC_EXCTRLDUR);
  10396. @@ -1161,23 +1168,28 @@
  10397. hdmi_writeb(hdmi, 0x21, HDMI_FC_CH2PREAM);
  10398. /* Enable pixel clock and tmds data path */
  10399. - clkdis = 0x7F;
  10400. - clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
  10401. - hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
  10402. + hdmi->mc_clkdis |= HDMI_MC_CLKDIS_HDCPCLK_DISABLE |
  10403. + HDMI_MC_CLKDIS_CSCCLK_DISABLE |
  10404. + HDMI_MC_CLKDIS_AUDCLK_DISABLE |
  10405. + HDMI_MC_CLKDIS_PREPCLK_DISABLE |
  10406. + HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
  10407. + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
  10408. + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
  10409. - clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
  10410. - hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
  10411. + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
  10412. + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
  10413. /* Enable csc path */
  10414. if (is_color_space_conversion(hdmi)) {
  10415. - clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
  10416. - hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
  10417. + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
  10418. + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
  10419. }
  10420. }
  10421. static void hdmi_enable_audio_clk(struct imx_hdmi *hdmi)
  10422. {
  10423. - hdmi_modb(hdmi, 0, HDMI_MC_CLKDIS_AUDCLK_DISABLE, HDMI_MC_CLKDIS);
  10424. + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE;
  10425. + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
  10426. }
  10427. /* Workaround to clear the overflow condition */
  10428. @@ -1380,41 +1392,16 @@
  10429. static enum drm_connector_status imx_hdmi_connector_detect(struct drm_connector
  10430. *connector, bool force)
  10431. {
  10432. - struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
  10433. - connector);
  10434. - return hdmi->connector_status;
  10435. -}
  10436. -
  10437. -static int imx_hdmi_connector_get_modes(struct drm_connector *connector)
  10438. -{
  10439. - struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
  10440. - connector);
  10441. - struct edid *edid;
  10442. - int ret;
  10443. + struct imx_hdmi *hdmi = drm_ddc_private(connector);
  10444. - if (!hdmi->ddc)
  10445. - return 0;
  10446. -
  10447. - edid = drm_get_edid(connector, hdmi->ddc);
  10448. - if (edid) {
  10449. - dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n",
  10450. - edid->width_cm, edid->height_cm);
  10451. -
  10452. - drm_mode_connector_update_edid_property(connector, edid);
  10453. - ret = drm_add_edid_modes(connector, edid);
  10454. - kfree(edid);
  10455. - } else {
  10456. - dev_dbg(hdmi->dev, "failed to get edid\n");
  10457. - }
  10458. -
  10459. - return 0;
  10460. + return hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_HPD ?
  10461. + connector_status_connected : connector_status_disconnected;
  10462. }
  10463. static struct drm_encoder *imx_hdmi_connector_best_encoder(struct drm_connector
  10464. *connector)
  10465. {
  10466. - struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
  10467. - connector);
  10468. + struct imx_hdmi *hdmi = drm_ddc_private(connector);
  10469. return &hdmi->encoder;
  10470. }
  10471. @@ -1483,15 +1470,8 @@
  10472. .disable = imx_hdmi_encoder_disable,
  10473. };
  10474. -static struct drm_connector_funcs imx_hdmi_connector_funcs = {
  10475. - .dpms = drm_helper_connector_dpms,
  10476. - .fill_modes = drm_helper_probe_single_connector_modes,
  10477. - .detect = imx_hdmi_connector_detect,
  10478. - .destroy = imx_drm_connector_destroy,
  10479. -};
  10480. -
  10481. static struct drm_connector_helper_funcs imx_hdmi_connector_helper_funcs = {
  10482. - .get_modes = imx_hdmi_connector_get_modes,
  10483. + .get_modes = drm_ddc_connector_get_modes,
  10484. .mode_valid = imx_drm_connector_mode_valid,
  10485. .best_encoder = imx_hdmi_connector_best_encoder,
  10486. };
  10487. @@ -1524,7 +1504,6 @@
  10488. hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0);
  10489. - hdmi->connector_status = connector_status_connected;
  10490. imx_hdmi_poweron(hdmi);
  10491. } else {
  10492. dev_dbg(hdmi->dev, "EVENT=plugout\n");
  10493. @@ -1532,10 +1511,9 @@
  10494. hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD,
  10495. HDMI_PHY_POL0);
  10496. - hdmi->connector_status = connector_status_disconnected;
  10497. imx_hdmi_poweroff(hdmi);
  10498. }
  10499. - drm_helper_hpd_irq_event(hdmi->connector.dev);
  10500. + drm_helper_hpd_irq_event(hdmi->ddc_conn->connector.dev);
  10501. }
  10502. hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
  10503. @@ -1553,24 +1531,42 @@
  10504. if (ret)
  10505. return ret;
  10506. - hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
  10507. + hdmi->ddc_conn->connector.polled = DRM_CONNECTOR_POLL_HPD;
  10508. drm_encoder_helper_add(&hdmi->encoder, &imx_hdmi_encoder_helper_funcs);
  10509. drm_encoder_init(drm, &hdmi->encoder, &imx_hdmi_encoder_funcs,
  10510. DRM_MODE_ENCODER_TMDS);
  10511. - drm_connector_helper_add(&hdmi->connector,
  10512. + drm_connector_helper_add(&hdmi->ddc_conn->connector,
  10513. &imx_hdmi_connector_helper_funcs);
  10514. - drm_connector_init(drm, &hdmi->connector, &imx_hdmi_connector_funcs,
  10515. - DRM_MODE_CONNECTOR_HDMIA);
  10516. -
  10517. - hdmi->connector.encoder = &hdmi->encoder;
  10518. + drm_ddc_connector_add(drm, hdmi->ddc_conn, DRM_MODE_CONNECTOR_HDMIA);
  10519. - drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
  10520. + drm_mode_connector_attach_encoder(&hdmi->ddc_conn->connector, &hdmi->encoder);
  10521. return 0;
  10522. }
  10523. +static void imx_hdmi_cec_enable(void *data)
  10524. +{
  10525. + struct imx_hdmi *hdmi = data;
  10526. +
  10527. + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
  10528. + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
  10529. +}
  10530. +
  10531. +static void imx_hdmi_cec_disable(void *data)
  10532. +{
  10533. + struct imx_hdmi *hdmi = data;
  10534. +
  10535. + hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CECCLK_DISABLE;
  10536. + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
  10537. +}
  10538. +
  10539. +static const struct dw_hdmi_cec_ops imx_hdmi_cec_ops = {
  10540. + .enable = imx_hdmi_cec_enable,
  10541. + .disable = imx_hdmi_cec_disable,
  10542. +};
  10543. +
  10544. static struct platform_device_id imx_hdmi_devtype[] = {
  10545. {
  10546. .name = "imx6q-hdmi",
  10547. @@ -1592,11 +1588,13 @@
  10548. static int imx_hdmi_bind(struct device *dev, struct device *master, void *data)
  10549. {
  10550. struct platform_device *pdev = to_platform_device(dev);
  10551. + struct platform_device_info pdevinfo;
  10552. const struct of_device_id *of_id =
  10553. of_match_device(imx_hdmi_dt_ids, dev);
  10554. struct drm_device *drm = data;
  10555. struct device_node *np = dev->of_node;
  10556. - struct device_node *ddc_node;
  10557. + struct dw_hdmi_audio_data audio;
  10558. + struct dw_hdmi_cec_data cec;
  10559. struct imx_hdmi *hdmi;
  10560. struct resource *iores;
  10561. int ret, irq;
  10562. @@ -1605,27 +1603,22 @@
  10563. if (!hdmi)
  10564. return -ENOMEM;
  10565. + hdmi->ddc_conn = drm_ddc_connector_create(drm, np, hdmi);
  10566. + if (IS_ERR(hdmi->ddc_conn))
  10567. + return PTR_ERR(hdmi->ddc_conn);
  10568. +
  10569. + hdmi->ddc_conn->detect = imx_hdmi_connector_detect;
  10570. +
  10571. hdmi->dev = dev;
  10572. - hdmi->connector_status = connector_status_disconnected;
  10573. hdmi->sample_rate = 48000;
  10574. hdmi->ratio = 100;
  10575. + hdmi->mc_clkdis = 0x7f;
  10576. if (of_id) {
  10577. const struct platform_device_id *device_id = of_id->data;
  10578. hdmi->dev_type = device_id->driver_data;
  10579. }
  10580. - ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
  10581. - if (ddc_node) {
  10582. - hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
  10583. - if (!hdmi->ddc)
  10584. - dev_dbg(hdmi->dev, "failed to read ddc node\n");
  10585. -
  10586. - of_node_put(ddc_node);
  10587. - } else {
  10588. - dev_dbg(hdmi->dev, "no ddc property found\n");
  10589. - }
  10590. -
  10591. irq = platform_get_irq(pdev, 0);
  10592. if (irq < 0)
  10593. return -EINVAL;
  10594. @@ -1711,6 +1704,35 @@
  10595. /* Unmute interrupts */
  10596. hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
  10597. + memset(&pdevinfo, 0, sizeof(pdevinfo));
  10598. + pdevinfo.parent = dev;
  10599. + pdevinfo.id = PLATFORM_DEVID_AUTO;
  10600. +
  10601. + audio.phys = iores->start;
  10602. + audio.base = hdmi->regs;
  10603. + audio.irq = irq;
  10604. + audio.hdmi = hdmi;
  10605. + audio.eld = hdmi->ddc_conn->connector.eld;
  10606. + audio.set_sample_rate = imx_hdmi_set_sample_rate;
  10607. +
  10608. + pdevinfo.name = "dw-hdmi-audio";
  10609. + pdevinfo.data = &audio;
  10610. + pdevinfo.size_data = sizeof(audio);
  10611. + pdevinfo.dma_mask = DMA_BIT_MASK(32);
  10612. + hdmi->audio = platform_device_register_full(&pdevinfo);
  10613. +
  10614. + cec.base = hdmi->regs;
  10615. + cec.irq = irq;
  10616. + cec.ops = &imx_hdmi_cec_ops;
  10617. + cec.ops_data = hdmi;
  10618. +
  10619. + pdevinfo.name = "dw-hdmi-cec";
  10620. + pdevinfo.data = &cec;
  10621. + pdevinfo.size_data = sizeof(cec);
  10622. + pdevinfo.dma_mask = 0;
  10623. +
  10624. + hdmi->cec = platform_device_register_full(&pdevinfo);
  10625. +
  10626. dev_set_drvdata(dev, hdmi);
  10627. return 0;
  10628. @@ -1728,15 +1750,19 @@
  10629. {
  10630. struct imx_hdmi *hdmi = dev_get_drvdata(dev);
  10631. + if (!IS_ERR(hdmi->audio))
  10632. + platform_device_unregister(hdmi->audio);
  10633. + if (!IS_ERR(hdmi->cec))
  10634. + platform_device_unregister(hdmi->cec);
  10635. +
  10636. /* Disable all interrupts */
  10637. hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
  10638. - hdmi->connector.funcs->destroy(&hdmi->connector);
  10639. + hdmi->ddc_conn->connector.funcs->destroy(&hdmi->ddc_conn->connector);
  10640. hdmi->encoder.funcs->destroy(&hdmi->encoder);
  10641. clk_disable_unprepare(hdmi->iahb_clk);
  10642. clk_disable_unprepare(hdmi->isfr_clk);
  10643. - i2c_put_adapter(hdmi->ddc);
  10644. }
  10645. static const struct component_ops hdmi_ops = {
  10646. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/imx-ldb.c linux-3.15-rc6/drivers/staging/imx-drm/imx-ldb.c
  10647. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/imx-ldb.c 2014-05-21 23:42:02.000000000 +0200
  10648. +++ linux-3.15-rc6/drivers/staging/imx-drm/imx-ldb.c 2014-05-23 11:26:48.316940071 +0200
  10649. @@ -24,6 +24,7 @@
  10650. #include <drm/drmP.h>
  10651. #include <drm/drm_fb_helper.h>
  10652. #include <drm/drm_crtc_helper.h>
  10653. +#include <drm/drm_panel.h>
  10654. #include <linux/mfd/syscon.h>
  10655. #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  10656. #include <linux/of_address.h>
  10657. @@ -60,6 +61,7 @@
  10658. struct imx_ldb *ldb;
  10659. struct drm_connector connector;
  10660. struct drm_encoder encoder;
  10661. + struct drm_panel *panel;
  10662. struct device_node *child;
  10663. int chno;
  10664. void *edid;
  10665. @@ -96,6 +98,13 @@
  10666. struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
  10667. int num_modes = 0;
  10668. + if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs &&
  10669. + imx_ldb_ch->panel->funcs->get_modes) {
  10670. + num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel);
  10671. + if (num_modes > 0)
  10672. + return num_modes;
  10673. + }
  10674. +
  10675. if (imx_ldb_ch->edid) {
  10676. drm_mode_connector_update_edid_property(connector,
  10677. imx_ldb_ch->edid);
  10678. @@ -243,6 +252,8 @@
  10679. }
  10680. regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
  10681. +
  10682. + drm_panel_enable(imx_ldb_ch->panel);
  10683. }
  10684. static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
  10685. @@ -294,6 +305,8 @@
  10686. (ldb->ldb_ctrl & LDB_CH1_MODE_EN_MASK) == 0)
  10687. return;
  10688. + drm_panel_disable(imx_ldb_ch->panel);
  10689. +
  10690. if (imx_ldb_ch == &ldb->channel[0])
  10691. ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
  10692. else if (imx_ldb_ch == &ldb->channel[1])
  10693. @@ -379,6 +392,9 @@
  10694. drm_connector_init(drm, &imx_ldb_ch->connector,
  10695. &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
  10696. + if (imx_ldb_ch->panel)
  10697. + drm_panel_attach(imx_ldb_ch->panel, &imx_ldb_ch->connector);
  10698. +
  10699. drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
  10700. &imx_ldb_ch->encoder);
  10701. @@ -493,6 +509,7 @@
  10702. for_each_child_of_node(np, child) {
  10703. struct imx_ldb_channel *channel;
  10704. + struct device_node *panel_node;
  10705. ret = of_property_read_u32(child, "reg", &i);
  10706. if (ret || i < 0 || i > 1)
  10707. @@ -556,6 +573,10 @@
  10708. return -EINVAL;
  10709. }
  10710. + panel_node = of_parse_phandle(child, "fsl,panel", 0);
  10711. + if (panel_node)
  10712. + channel->panel = of_drm_find_panel(panel_node);
  10713. +
  10714. ret = imx_ldb_register(drm, channel);
  10715. if (ret)
  10716. return ret;
  10717. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/imx-tve.c linux-3.15-rc6/drivers/staging/imx-drm/imx-tve.c
  10718. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/imx-tve.c 2014-05-21 23:42:02.000000000 +0200
  10719. +++ linux-3.15-rc6/drivers/staging/imx-drm/imx-tve.c 2014-05-23 11:28:50.689342803 +0200
  10720. @@ -22,7 +22,6 @@
  10721. #include <linux/clk-provider.h>
  10722. #include <linux/component.h>
  10723. #include <linux/module.h>
  10724. -#include <linux/i2c.h>
  10725. #include <linux/regmap.h>
  10726. #include <linux/regulator/consumer.h>
  10727. #include <linux/spinlock.h>
  10728. @@ -31,6 +30,7 @@
  10729. #include <drm/drm_fb_helper.h>
  10730. #include <drm/drm_crtc_helper.h>
  10731. +#include "drm-ddc-connector.h"
  10732. #include "ipu-v3/imx-ipu-v3.h"
  10733. #include "imx-drm.h"
  10734. @@ -111,7 +111,7 @@
  10735. };
  10736. struct imx_tve {
  10737. - struct drm_connector connector;
  10738. + struct drm_ddc_connector *ddc_conn;
  10739. struct drm_encoder encoder;
  10740. struct device *dev;
  10741. spinlock_t lock; /* register lock */
  10742. @@ -120,7 +120,6 @@
  10743. struct regmap *regmap;
  10744. struct regulator *dac_reg;
  10745. - struct i2c_adapter *ddc;
  10746. struct clk *clk;
  10747. struct clk *di_sel_clk;
  10748. struct clk_hw clk_hw_di;
  10749. @@ -219,35 +218,10 @@
  10750. return 0;
  10751. }
  10752. -static enum drm_connector_status imx_tve_connector_detect(
  10753. - struct drm_connector *connector, bool force)
  10754. -{
  10755. - return connector_status_connected;
  10756. -}
  10757. -
  10758. -static int imx_tve_connector_get_modes(struct drm_connector *connector)
  10759. -{
  10760. - struct imx_tve *tve = con_to_tve(connector);
  10761. - struct edid *edid;
  10762. - int ret = 0;
  10763. -
  10764. - if (!tve->ddc)
  10765. - return 0;
  10766. -
  10767. - edid = drm_get_edid(connector, tve->ddc);
  10768. - if (edid) {
  10769. - drm_mode_connector_update_edid_property(connector, edid);
  10770. - ret = drm_add_edid_modes(connector, edid);
  10771. - kfree(edid);
  10772. - }
  10773. -
  10774. - return ret;
  10775. -}
  10776. -
  10777. static int imx_tve_connector_mode_valid(struct drm_connector *connector,
  10778. struct drm_display_mode *mode)
  10779. {
  10780. - struct imx_tve *tve = con_to_tve(connector);
  10781. + struct imx_tve *tve = to_ddc_conn(connector)->private;
  10782. unsigned long rate;
  10783. int ret;
  10784. @@ -274,7 +248,7 @@
  10785. static struct drm_encoder *imx_tve_connector_best_encoder(
  10786. struct drm_connector *connector)
  10787. {
  10788. - struct imx_tve *tve = con_to_tve(connector);
  10789. + struct imx_tve *tve = drm_ddc_private(connector);
  10790. return &tve->encoder;
  10791. }
  10792. @@ -362,15 +336,8 @@
  10793. tve_disable(tve);
  10794. }
  10795. -static struct drm_connector_funcs imx_tve_connector_funcs = {
  10796. - .dpms = drm_helper_connector_dpms,
  10797. - .fill_modes = drm_helper_probe_single_connector_modes,
  10798. - .detect = imx_tve_connector_detect,
  10799. - .destroy = imx_drm_connector_destroy,
  10800. -};
  10801. -
  10802. static struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
  10803. - .get_modes = imx_tve_connector_get_modes,
  10804. + .get_modes = drm_ddc_connector_get_modes,
  10805. .best_encoder = imx_tve_connector_best_encoder,
  10806. .mode_valid = imx_tve_connector_mode_valid,
  10807. };
  10808. @@ -513,12 +480,11 @@
  10809. drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs,
  10810. encoder_type);
  10811. - drm_connector_helper_add(&tve->connector,
  10812. + drm_connector_helper_add(&tve->ddc_conn->connector,
  10813. &imx_tve_connector_helper_funcs);
  10814. - drm_connector_init(drm, &tve->connector, &imx_tve_connector_funcs,
  10815. - DRM_MODE_CONNECTOR_VGA);
  10816. + drm_ddc_connector_add(drm, tve->ddc_conn, DRM_MODE_CONNECTOR_VGA);
  10817. - drm_mode_connector_attach_encoder(&tve->connector, &tve->encoder);
  10818. + drm_mode_connector_attach_encoder(&tve->ddc_conn->connector, &tve->encoder);
  10819. return 0;
  10820. }
  10821. @@ -567,7 +533,6 @@
  10822. struct platform_device *pdev = to_platform_device(dev);
  10823. struct drm_device *drm = data;
  10824. struct device_node *np = dev->of_node;
  10825. - struct device_node *ddc_node;
  10826. struct imx_tve *tve;
  10827. struct resource *res;
  10828. void __iomem *base;
  10829. @@ -579,15 +544,13 @@
  10830. if (!tve)
  10831. return -ENOMEM;
  10832. + tve->ddc_conn = drm_ddc_connector_create(drm, np, tve);
  10833. + if (IS_ERR(tve->ddc_conn))
  10834. + return PTR_ERR(tve->ddc_conn);
  10835. +
  10836. tve->dev = dev;
  10837. spin_lock_init(&tve->lock);
  10838. - ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
  10839. - if (ddc_node) {
  10840. - tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
  10841. - of_node_put(ddc_node);
  10842. - }
  10843. -
  10844. tve->mode = of_get_tve_mode(np);
  10845. if (tve->mode != TVE_MODE_VGA) {
  10846. dev_err(dev, "only VGA mode supported, currently\n");
  10847. @@ -694,7 +657,7 @@
  10848. {
  10849. struct imx_tve *tve = dev_get_drvdata(dev);
  10850. - tve->connector.funcs->destroy(&tve->connector);
  10851. + tve->ddc_conn->connector.funcs->destroy(&tve->ddc_conn->connector);
  10852. tve->encoder.funcs->destroy(&tve->encoder);
  10853. if (!IS_ERR(tve->dac_reg))
  10854. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h
  10855. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h 2014-05-21 23:42:02.000000000 +0200
  10856. +++ linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h 2014-05-23 11:26:48.316940071 +0200
  10857. @@ -76,6 +76,7 @@
  10858. IPU_IRQ_EOS = 192,
  10859. };
  10860. +int ipu_map_irq(struct ipu_soc *ipu, int irq);
  10861. int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
  10862. enum ipu_channel_irq irq);
  10863. @@ -114,8 +115,10 @@
  10864. void ipu_dc_put(struct ipu_dc *dc);
  10865. int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
  10866. u32 pixel_fmt, u32 width);
  10867. +void ipu_dc_enable(struct ipu_soc *ipu);
  10868. void ipu_dc_enable_channel(struct ipu_dc *dc);
  10869. void ipu_dc_disable_channel(struct ipu_dc *dc);
  10870. +void ipu_dc_disable(struct ipu_soc *ipu);
  10871. /*
  10872. * IPU Display Interface (di) functions
  10873. @@ -152,8 +155,10 @@
  10874. struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow);
  10875. void ipu_dp_put(struct ipu_dp *);
  10876. +int ipu_dp_enable(struct ipu_soc *ipu);
  10877. int ipu_dp_enable_channel(struct ipu_dp *dp);
  10878. void ipu_dp_disable_channel(struct ipu_dp *dp);
  10879. +void ipu_dp_disable(struct ipu_soc *ipu);
  10880. int ipu_dp_setup_channel(struct ipu_dp *dp,
  10881. enum ipu_color_space in, enum ipu_color_space out);
  10882. int ipu_dp_set_window_pos(struct ipu_dp *, u16 x_pos, u16 y_pos);
  10883. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/ipu-common.c linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/ipu-common.c
  10884. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/ipu-common.c 2014-05-21 23:42:02.000000000 +0200
  10885. +++ linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/ipu-common.c 2014-05-23 11:26:48.352940189 +0200
  10886. @@ -697,6 +697,12 @@
  10887. }
  10888. EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
  10889. +bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
  10890. +{
  10891. + return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
  10892. +}
  10893. +EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
  10894. +
  10895. int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
  10896. {
  10897. struct ipu_soc *ipu = channel->ipu;
  10898. @@ -714,6 +720,22 @@
  10899. }
  10900. EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
  10901. +int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms)
  10902. +{
  10903. + unsigned long timeout;
  10904. +
  10905. + timeout = jiffies + msecs_to_jiffies(ms);
  10906. + ipu_cm_write(ipu, BIT(irq % 32), IPU_INT_STAT(irq / 32));
  10907. + while (!(ipu_cm_read(ipu, IPU_INT_STAT(irq / 32) & BIT(irq % 32)))) {
  10908. + if (time_after(jiffies, timeout))
  10909. + return -ETIMEDOUT;
  10910. + cpu_relax();
  10911. + }
  10912. +
  10913. + return 0;
  10914. +}
  10915. +EXPORT_SYMBOL_GPL(ipu_wait_interrupt);
  10916. +
  10917. int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
  10918. {
  10919. struct ipu_soc *ipu = channel->ipu;
  10920. @@ -933,15 +955,22 @@
  10921. chained_irq_exit(chip, desc);
  10922. }
  10923. -int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
  10924. - enum ipu_channel_irq irq_type)
  10925. +int ipu_map_irq(struct ipu_soc *ipu, int irq)
  10926. {
  10927. - int irq = irq_linear_revmap(ipu->domain, irq_type + channel->num);
  10928. + int virq;
  10929. - if (!irq)
  10930. - irq = irq_create_mapping(ipu->domain, irq_type + channel->num);
  10931. + virq = irq_linear_revmap(ipu->domain, irq);
  10932. + if (!virq)
  10933. + virq = irq_create_mapping(ipu->domain, irq);
  10934. - return irq;
  10935. + return virq;
  10936. +}
  10937. +EXPORT_SYMBOL_GPL(ipu_map_irq);
  10938. +
  10939. +int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
  10940. + enum ipu_channel_irq irq_type)
  10941. +{
  10942. + return ipu_map_irq(ipu, irq_type + channel->num);
  10943. }
  10944. EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
  10945. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/ipu-dc.c linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
  10946. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/ipu-dc.c 2014-05-21 23:42:02.000000000 +0200
  10947. +++ linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/ipu-dc.c 2014-05-23 11:26:48.360940215 +0200
  10948. @@ -18,6 +18,7 @@
  10949. #include <linux/types.h>
  10950. #include <linux/errno.h>
  10951. #include <linux/delay.h>
  10952. +#include <linux/interrupt.h>
  10953. #include <linux/io.h>
  10954. #include "../imx-drm.h"
  10955. @@ -92,6 +93,7 @@
  10956. IPU_DC_MAP_GBR24, /* TVEv2 */
  10957. IPU_DC_MAP_BGR666,
  10958. IPU_DC_MAP_BGR24,
  10959. + IPU_DC_MAP_RGB666,
  10960. };
  10961. struct ipu_dc {
  10962. @@ -110,6 +112,9 @@
  10963. struct device *dev;
  10964. struct ipu_dc channels[IPU_DC_NUM_CHANNELS];
  10965. struct mutex mutex;
  10966. + struct completion comp;
  10967. + int dc_irq;
  10968. + int dp_irq;
  10969. };
  10970. static void dc_link_event(struct ipu_dc *dc, int event, int addr, int priority)
  10971. @@ -155,6 +160,8 @@
  10972. return IPU_DC_MAP_BGR666;
  10973. case V4L2_PIX_FMT_BGR24:
  10974. return IPU_DC_MAP_BGR24;
  10975. + case V4L2_PIX_FMT_RGB666:
  10976. + return IPU_DC_MAP_RGB666;
  10977. default:
  10978. return -EINVAL;
  10979. }
  10980. @@ -220,12 +227,16 @@
  10981. writel(0x0, dc->base + DC_WR_CH_ADDR);
  10982. writel(width, priv->dc_reg + DC_DISP_CONF2(dc->di));
  10983. - ipu_module_enable(priv->ipu, IPU_CONF_DC_EN);
  10984. -
  10985. return 0;
  10986. }
  10987. EXPORT_SYMBOL_GPL(ipu_dc_init_sync);
  10988. +void ipu_dc_enable(struct ipu_soc *ipu)
  10989. +{
  10990. + ipu_module_enable(ipu, IPU_CONF_DC_EN);
  10991. +}
  10992. +EXPORT_SYMBOL_GPL(ipu_dc_enable);
  10993. +
  10994. void ipu_dc_enable_channel(struct ipu_dc *dc)
  10995. {
  10996. int di;
  10997. @@ -239,41 +250,55 @@
  10998. }
  10999. EXPORT_SYMBOL_GPL(ipu_dc_enable_channel);
  11000. +static irqreturn_t dc_irq_handler(int irq, void *dev_id)
  11001. +{
  11002. + struct ipu_dc *dc = dev_id;
  11003. + u32 reg;
  11004. +
  11005. + reg = readl(dc->base + DC_WR_CH_CONF);
  11006. + reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
  11007. + writel(reg, dc->base + DC_WR_CH_CONF);
  11008. +
  11009. + /* The Freescale BSP kernel clears DIx_COUNTER_RELEASE here */
  11010. +
  11011. + complete(&dc->priv->comp);
  11012. + return IRQ_HANDLED;
  11013. +}
  11014. +
  11015. void ipu_dc_disable_channel(struct ipu_dc *dc)
  11016. {
  11017. struct ipu_dc_priv *priv = dc->priv;
  11018. + int irq, ret;
  11019. u32 val;
  11020. - int irq = 0, timeout = 50;
  11021. + /* TODO: Handle MEM_FG_SYNC differently from MEM_BG_SYNC */
  11022. if (dc->chno == 1)
  11023. - irq = IPU_IRQ_DC_FC_1;
  11024. + irq = priv->dc_irq;
  11025. else if (dc->chno == 5)
  11026. - irq = IPU_IRQ_DP_SF_END;
  11027. + irq = priv->dp_irq;
  11028. else
  11029. return;
  11030. - /* should wait for the interrupt here */
  11031. - mdelay(50);
  11032. -
  11033. - if (dc->di == 0)
  11034. - val = 0x00000002;
  11035. - else
  11036. - val = 0x00000020;
  11037. -
  11038. - /* Wait for DC triple buffer to empty */
  11039. - while ((readl(priv->dc_reg + DC_STAT) & val) != val) {
  11040. - usleep_range(2000, 20000);
  11041. - timeout -= 2;
  11042. - if (timeout <= 0)
  11043. - break;
  11044. + init_completion(&priv->comp);
  11045. + enable_irq(irq);
  11046. + ret = wait_for_completion_timeout(&priv->comp, msecs_to_jiffies(50));
  11047. + disable_irq(irq);
  11048. + if (ret <= 0) {
  11049. + dev_warn(priv->dev, "DC stop timeout after 50 ms\n");
  11050. +
  11051. + val = readl(dc->base + DC_WR_CH_CONF);
  11052. + val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
  11053. + writel(val, dc->base + DC_WR_CH_CONF);
  11054. }
  11055. -
  11056. - val = readl(dc->base + DC_WR_CH_CONF);
  11057. - val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
  11058. - writel(val, dc->base + DC_WR_CH_CONF);
  11059. }
  11060. EXPORT_SYMBOL_GPL(ipu_dc_disable_channel);
  11061. +void ipu_dc_disable(struct ipu_soc *ipu)
  11062. +{
  11063. + ipu_module_disable(ipu, IPU_CONF_DC_EN);
  11064. +}
  11065. +EXPORT_SYMBOL_GPL(ipu_dc_disable);
  11066. +
  11067. static void ipu_dc_map_config(struct ipu_dc_priv *priv, enum ipu_dc_map map,
  11068. int byte_num, int offset, int mask)
  11069. {
  11070. @@ -340,7 +365,7 @@
  11071. struct ipu_dc_priv *priv;
  11072. static int channel_offsets[] = { 0, 0x1c, 0x38, 0x54, 0x58, 0x5c,
  11073. 0x78, 0, 0x94, 0xb4};
  11074. - int i;
  11075. + int i, ret;
  11076. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  11077. if (!priv)
  11078. @@ -361,6 +386,23 @@
  11079. priv->channels[i].base = priv->dc_reg + channel_offsets[i];
  11080. }
  11081. + priv->dc_irq = ipu_map_irq(ipu, IPU_IRQ_DC_FC_1);
  11082. + if (!priv->dc_irq)
  11083. + return -EINVAL;
  11084. + ret = devm_request_irq(dev, priv->dc_irq, dc_irq_handler, 0, NULL,
  11085. + &priv->channels[1]);
  11086. + if (ret < 0)
  11087. + return ret;
  11088. + disable_irq(priv->dc_irq);
  11089. + priv->dp_irq = ipu_map_irq(ipu, IPU_IRQ_DP_SF_END);
  11090. + if (!priv->dp_irq)
  11091. + return -EINVAL;
  11092. + ret = devm_request_irq(dev, priv->dp_irq, dc_irq_handler, 0, NULL,
  11093. + &priv->channels[5]);
  11094. + if (ret < 0)
  11095. + return ret;
  11096. + disable_irq(priv->dp_irq);
  11097. +
  11098. writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(1) |
  11099. DC_WR_CH_CONF_PROG_DI_ID,
  11100. priv->channels[1].base + DC_WR_CH_CONF);
  11101. @@ -404,6 +446,12 @@
  11102. ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 1, 15, 0xff); /* green */
  11103. ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 0, 23, 0xff); /* blue */
  11104. + /* rgb666 */
  11105. + ipu_dc_map_clear(priv, IPU_DC_MAP_RGB666);
  11106. + ipu_dc_map_config(priv, IPU_DC_MAP_RGB666, 0, 5, 0xfc); /* blue */
  11107. + ipu_dc_map_config(priv, IPU_DC_MAP_RGB666, 1, 11, 0xfc); /* green */
  11108. + ipu_dc_map_config(priv, IPU_DC_MAP_RGB666, 2, 17, 0xfc); /* red */
  11109. +
  11110. return 0;
  11111. }
  11112. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/ipu-di.c linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/ipu-di.c
  11113. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/ipu-di.c 2014-05-21 23:42:02.000000000 +0200
  11114. +++ linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/ipu-di.c 2014-05-23 11:26:48.372940255 +0200
  11115. @@ -595,7 +595,7 @@
  11116. }
  11117. }
  11118. - if (!sig->clk_pol)
  11119. + if (sig->clk_pol)
  11120. di_gen |= DI_GEN_POLARITY_DISP_CLK;
  11121. ipu_di_write(di, di_gen, DI_GENERAL);
  11122. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
  11123. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c 2014-05-21 23:42:02.000000000 +0200
  11124. +++ linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c 2014-05-23 11:26:48.380940281 +0200
  11125. @@ -28,7 +28,12 @@
  11126. #define DMFC_GENERAL1 0x0014
  11127. #define DMFC_GENERAL2 0x0018
  11128. #define DMFC_IC_CTRL 0x001c
  11129. -#define DMFC_STAT 0x0020
  11130. +#define DMFC_WR_CHAN_ALT 0x0020
  11131. +#define DMFC_WR_CHAN_DEF_ALT 0x0024
  11132. +#define DMFC_DP_CHAN_ALT 0x0028
  11133. +#define DMFC_DP_CHAN_DEF_ALT 0x002c
  11134. +#define DMFC_GENERAL1_ALT 0x0030
  11135. +#define DMFC_STAT 0x0034
  11136. #define DMFC_WR_CHAN_1_28 0
  11137. #define DMFC_WR_CHAN_2_41 8
  11138. @@ -133,6 +138,20 @@
  11139. }
  11140. EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel);
  11141. +static void ipu_dmfc_wait_fifos(struct ipu_dmfc_priv *priv)
  11142. +{
  11143. + unsigned long timeout = jiffies + msecs_to_jiffies(1000);
  11144. +
  11145. + while ((readl(priv->base + DMFC_STAT) & 0x02fff000) != 0x02fff000) {
  11146. + if (time_after(jiffies, timeout)) {
  11147. + dev_warn(priv->dev,
  11148. + "Timeout waiting for DMFC FIFOs to clear\n");
  11149. + break;
  11150. + }
  11151. + cpu_relax();
  11152. + }
  11153. +}
  11154. +
  11155. void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
  11156. {
  11157. struct ipu_dmfc_priv *priv = dmfc->priv;
  11158. @@ -141,8 +160,10 @@
  11159. priv->use_count--;
  11160. - if (!priv->use_count)
  11161. + if (!priv->use_count) {
  11162. + ipu_dmfc_wait_fifos(priv);
  11163. ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN);
  11164. + }
  11165. if (priv->use_count < 0)
  11166. priv->use_count = 0;
  11167. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/ipu-dp.c linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/ipu-dp.c
  11168. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/ipu-dp.c 2014-05-21 23:42:02.000000000 +0200
  11169. +++ linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/ipu-dp.c 2014-05-23 11:26:48.388940307 +0200
  11170. @@ -215,10 +215,9 @@
  11171. }
  11172. EXPORT_SYMBOL_GPL(ipu_dp_setup_channel);
  11173. -int ipu_dp_enable_channel(struct ipu_dp *dp)
  11174. +int ipu_dp_enable(struct ipu_soc *ipu)
  11175. {
  11176. - struct ipu_flow *flow = to_flow(dp);
  11177. - struct ipu_dp_priv *priv = flow->priv;
  11178. + struct ipu_dp_priv *priv = ipu->dp_priv;
  11179. mutex_lock(&priv->mutex);
  11180. @@ -227,15 +226,28 @@
  11181. priv->use_count++;
  11182. - if (dp->foreground) {
  11183. - u32 reg;
  11184. + mutex_unlock(&priv->mutex);
  11185. +
  11186. + return 0;
  11187. +}
  11188. +EXPORT_SYMBOL_GPL(ipu_dp_enable);
  11189. +
  11190. +int ipu_dp_enable_channel(struct ipu_dp *dp)
  11191. +{
  11192. + struct ipu_flow *flow = to_flow(dp);
  11193. + struct ipu_dp_priv *priv = flow->priv;
  11194. + u32 reg;
  11195. +
  11196. + if (!dp->foreground)
  11197. + return 0;
  11198. +
  11199. + mutex_lock(&priv->mutex);
  11200. - reg = readl(flow->base + DP_COM_CONF);
  11201. - reg |= DP_COM_CONF_FG_EN;
  11202. - writel(reg, flow->base + DP_COM_CONF);
  11203. + reg = readl(flow->base + DP_COM_CONF);
  11204. + reg |= DP_COM_CONF_FG_EN;
  11205. + writel(reg, flow->base + DP_COM_CONF);
  11206. - ipu_srm_dp_sync_update(priv->ipu);
  11207. - }
  11208. + ipu_srm_dp_sync_update(priv->ipu);
  11209. mutex_unlock(&priv->mutex);
  11210. @@ -247,25 +259,38 @@
  11211. {
  11212. struct ipu_flow *flow = to_flow(dp);
  11213. struct ipu_dp_priv *priv = flow->priv;
  11214. + u32 reg, csc;
  11215. +
  11216. + if (!dp->foreground)
  11217. + return;
  11218. mutex_lock(&priv->mutex);
  11219. - priv->use_count--;
  11220. + reg = readl(flow->base + DP_COM_CONF);
  11221. + csc = reg & DP_COM_CONF_CSC_DEF_MASK;
  11222. + if (csc == DP_COM_CONF_CSC_DEF_FG)
  11223. + reg &= ~DP_COM_CONF_CSC_DEF_MASK;
  11224. +
  11225. + reg &= ~DP_COM_CONF_FG_EN;
  11226. + writel(reg, flow->base + DP_COM_CONF);
  11227. +
  11228. + writel(0, flow->base + DP_FG_POS);
  11229. + ipu_srm_dp_sync_update(priv->ipu);
  11230. +
  11231. + if (ipu_idmac_channel_busy(priv->ipu, IPUV3_CHANNEL_MEM_BG_SYNC))
  11232. + ipu_wait_interrupt(priv->ipu, IPU_IRQ_DP_SF_END, 50);
  11233. +
  11234. + mutex_unlock(&priv->mutex);
  11235. +}
  11236. +EXPORT_SYMBOL_GPL(ipu_dp_disable_channel);
  11237. - if (dp->foreground) {
  11238. - u32 reg, csc;
  11239. +void ipu_dp_disable(struct ipu_soc *ipu)
  11240. +{
  11241. + struct ipu_dp_priv *priv = ipu->dp_priv;
  11242. - reg = readl(flow->base + DP_COM_CONF);
  11243. - csc = reg & DP_COM_CONF_CSC_DEF_MASK;
  11244. - if (csc == DP_COM_CONF_CSC_DEF_FG)
  11245. - reg &= ~DP_COM_CONF_CSC_DEF_MASK;
  11246. -
  11247. - reg &= ~DP_COM_CONF_FG_EN;
  11248. - writel(reg, flow->base + DP_COM_CONF);
  11249. -
  11250. - writel(0, flow->base + DP_FG_POS);
  11251. - ipu_srm_dp_sync_update(priv->ipu);
  11252. - }
  11253. + mutex_lock(&priv->mutex);
  11254. +
  11255. + priv->use_count--;
  11256. if (!priv->use_count)
  11257. ipu_module_disable(priv->ipu, IPU_CONF_DP_EN);
  11258. @@ -275,7 +300,7 @@
  11259. mutex_unlock(&priv->mutex);
  11260. }
  11261. -EXPORT_SYMBOL_GPL(ipu_dp_disable_channel);
  11262. +EXPORT_SYMBOL_GPL(ipu_dp_disable);
  11263. struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow)
  11264. {
  11265. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/ipu-prv.h linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/ipu-prv.h
  11266. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/ipu-v3/ipu-prv.h 2014-05-21 23:42:02.000000000 +0200
  11267. +++ linux-3.15-rc6/drivers/staging/imx-drm/ipu-v3/ipu-prv.h 2014-05-23 11:26:48.396940333 +0200
  11268. @@ -185,6 +185,9 @@
  11269. int ipu_module_enable(struct ipu_soc *ipu, u32 mask);
  11270. int ipu_module_disable(struct ipu_soc *ipu, u32 mask);
  11271. +bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno);
  11272. +int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms);
  11273. +
  11274. int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
  11275. unsigned long base, u32 module, struct clk *ipu_clk);
  11276. void ipu_di_exit(struct ipu_soc *ipu, int id);
  11277. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/ipuv3-crtc.c linux-3.15-rc6/drivers/staging/imx-drm/ipuv3-crtc.c
  11278. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/ipuv3-crtc.c 2014-05-21 23:42:02.000000000 +0200
  11279. +++ linux-3.15-rc6/drivers/staging/imx-drm/ipuv3-crtc.c 2014-05-23 11:26:48.396940333 +0200
  11280. @@ -60,24 +60,32 @@
  11281. static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
  11282. {
  11283. + struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
  11284. +
  11285. if (ipu_crtc->enabled)
  11286. return;
  11287. - ipu_di_enable(ipu_crtc->di);
  11288. - ipu_dc_enable_channel(ipu_crtc->dc);
  11289. + ipu_dc_enable(ipu);
  11290. ipu_plane_enable(ipu_crtc->plane[0]);
  11291. + /* Start DC channel and DI after IDMAC */
  11292. + ipu_dc_enable_channel(ipu_crtc->dc);
  11293. + ipu_di_enable(ipu_crtc->di);
  11294. ipu_crtc->enabled = 1;
  11295. }
  11296. static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
  11297. {
  11298. + struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
  11299. +
  11300. if (!ipu_crtc->enabled)
  11301. return;
  11302. - ipu_plane_disable(ipu_crtc->plane[0]);
  11303. + /* Stop DC channel and DI before IDMAC */
  11304. ipu_dc_disable_channel(ipu_crtc->dc);
  11305. ipu_di_disable(ipu_crtc->di);
  11306. + ipu_plane_disable(ipu_crtc->plane[0]);
  11307. + ipu_dc_disable(ipu);
  11308. ipu_crtc->enabled = 0;
  11309. }
  11310. @@ -158,7 +166,7 @@
  11311. sig_cfg.Vsync_pol = 1;
  11312. sig_cfg.enable_pol = 1;
  11313. - sig_cfg.clk_pol = 1;
  11314. + sig_cfg.clk_pol = 0;
  11315. sig_cfg.width = mode->hdisplay;
  11316. sig_cfg.height = mode->vdisplay;
  11317. sig_cfg.pixel_fmt = out_pixel_fmt;
  11318. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/ipuv3-plane.c linux-3.15-rc6/drivers/staging/imx-drm/ipuv3-plane.c
  11319. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/ipuv3-plane.c 2014-05-21 23:42:02.000000000 +0200
  11320. +++ linux-3.15-rc6/drivers/staging/imx-drm/ipuv3-plane.c 2014-05-23 11:26:48.396940333 +0200
  11321. @@ -239,6 +239,8 @@
  11322. void ipu_plane_enable(struct ipu_plane *ipu_plane)
  11323. {
  11324. + if (ipu_plane->dp)
  11325. + ipu_dp_enable(ipu_plane->ipu);
  11326. ipu_dmfc_enable_channel(ipu_plane->dmfc);
  11327. ipu_idmac_enable_channel(ipu_plane->ipu_ch);
  11328. if (ipu_plane->dp)
  11329. @@ -257,6 +259,8 @@
  11330. ipu_dp_disable_channel(ipu_plane->dp);
  11331. ipu_idmac_disable_channel(ipu_plane->ipu_ch);
  11332. ipu_dmfc_disable_channel(ipu_plane->dmfc);
  11333. + if (ipu_plane->dp)
  11334. + ipu_dp_disable(ipu_plane->ipu);
  11335. }
  11336. static void ipu_plane_dpms(struct ipu_plane *ipu_plane, int mode)
  11337. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/Kconfig linux-3.15-rc6/drivers/staging/imx-drm/Kconfig
  11338. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/Kconfig 2014-05-21 23:42:02.000000000 +0200
  11339. +++ linux-3.15-rc6/drivers/staging/imx-drm/Kconfig 2014-05-23 11:26:48.396940333 +0200
  11340. @@ -35,6 +35,7 @@
  11341. config DRM_IMX_LDB
  11342. tristate "Support for LVDS displays"
  11343. depends on DRM_IMX && MFD_SYSCON
  11344. + select DRM_PANEL
  11345. help
  11346. Choose this to enable the internal LVDS Display Bridge (LDB)
  11347. found on i.MX53 and i.MX6 processors.
  11348. @@ -60,3 +61,20 @@
  11349. depends on DRM_IMX
  11350. help
  11351. Choose this if you want to use HDMI on i.MX6.
  11352. +
  11353. +config DRM_DW_HDMI_AUDIO
  11354. + tristate "Synopsis Designware Audio interface"
  11355. + depends on DRM_IMX_HDMI != n
  11356. + help
  11357. + Support the Audio interface which is part of the Synopsis
  11358. + Designware HDMI block. This is used in conjunction with
  11359. + the i.MX HDMI driver.
  11360. +
  11361. +config DRM_DW_HDMI_CEC
  11362. + tristate "Synopsis Designware CEC interface"
  11363. + depends on DRM_IMX_HDMI != n
  11364. + select HDMI_CEC_CORE
  11365. + help
  11366. + Support the CEC interface which is part of the Synposis
  11367. + Designware HDMI block. This is used in conjunction with
  11368. + the i.MX HDMI driver.
  11369. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/Makefile linux-3.15-rc6/drivers/staging/imx-drm/Makefile
  11370. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/Makefile 2014-05-21 23:42:02.000000000 +0200
  11371. +++ linux-3.15-rc6/drivers/staging/imx-drm/Makefile 2014-05-23 11:26:48.396940333 +0200
  11372. @@ -3,6 +3,7 @@
  11373. obj-$(CONFIG_DRM_IMX) += imxdrm.o
  11374. +obj-$(CONFIG_DRM_IMX) += drm-ddc-connector.o
  11375. obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
  11376. obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
  11377. obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
  11378. @@ -11,3 +12,5 @@
  11379. imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
  11380. obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
  11381. obj-$(CONFIG_DRM_IMX_HDMI) += imx-hdmi.o
  11382. +obj-$(CONFIG_DRM_DW_HDMI_AUDIO) += dw-hdmi-audio.o
  11383. +obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o
  11384. diff -Nur linux-3.15-rc6.orig/drivers/staging/imx-drm/parallel-display.c linux-3.15-rc6/drivers/staging/imx-drm/parallel-display.c
  11385. --- linux-3.15-rc6.orig/drivers/staging/imx-drm/parallel-display.c 2014-05-21 23:42:02.000000000 +0200
  11386. +++ linux-3.15-rc6/drivers/staging/imx-drm/parallel-display.c 2014-05-23 11:26:48.396940333 +0200
  11387. @@ -219,6 +219,8 @@
  11388. imxpd->interface_pix_fmt = V4L2_PIX_FMT_RGB565;
  11389. else if (!strcmp(fmt, "bgr666"))
  11390. imxpd->interface_pix_fmt = V4L2_PIX_FMT_BGR666;
  11391. + else if (!strcmp(fmt, "rgb666"))
  11392. + imxpd->interface_pix_fmt = V4L2_PIX_FMT_RGB666;
  11393. }
  11394. panel_node = of_parse_phandle(np, "fsl,panel", 0);
  11395. diff -Nur linux-3.15-rc6.orig/include/linux/cec-dev.h linux-3.15-rc6/include/linux/cec-dev.h
  11396. --- linux-3.15-rc6.orig/include/linux/cec-dev.h 1970-01-01 01:00:00.000000000 +0100
  11397. +++ linux-3.15-rc6/include/linux/cec-dev.h 2014-05-23 11:26:48.396940333 +0200
  11398. @@ -0,0 +1,69 @@
  11399. +#ifndef _LINUX_CEC_DEV_H
  11400. +#define _LINUX_CEC_DEV_H
  11401. +
  11402. +#include <linux/cdev.h>
  11403. +#include <linux/list.h>
  11404. +#include <linux/mutex.h>
  11405. +#include <linux/spinlock.h>
  11406. +#include <linux/wait.h>
  11407. +
  11408. +#include <uapi/linux/cec-dev.h>
  11409. +
  11410. +struct device;
  11411. +
  11412. +struct cec_dev {
  11413. + struct cdev cdev;
  11414. + dev_t devn;
  11415. +
  11416. + struct mutex mutex;
  11417. + unsigned users;
  11418. +
  11419. + spinlock_t lock;
  11420. + wait_queue_head_t waitq;
  11421. + struct list_head events;
  11422. + u8 write_busy;
  11423. +
  11424. + u8 retries;
  11425. + u16 addresses;
  11426. + u16 physical;
  11427. +
  11428. + int (*open)(struct cec_dev *);
  11429. + void (*release)(struct cec_dev *);
  11430. + void (*send_message)(struct cec_dev *, u8 *, size_t);
  11431. + void (*set_address)(struct cec_dev *, unsigned);
  11432. +};
  11433. +
  11434. +void cec_dev_event(struct cec_dev *cec_dev, int type, u8 *msg, size_t len);
  11435. +
  11436. +static inline void cec_dev_receive(struct cec_dev *cec_dev, u8 *msg,
  11437. + unsigned len)
  11438. +{
  11439. + cec_dev_event(cec_dev, MESSAGE_TYPE_RECEIVE_SUCCESS, msg, len);
  11440. +}
  11441. +
  11442. +static inline void cec_dev_send_complete(struct cec_dev *cec_dev, int ack)
  11443. +{
  11444. + cec_dev->retries = 0;
  11445. + cec_dev->write_busy = 0;
  11446. +
  11447. + cec_dev_event(cec_dev, ack ? MESSAGE_TYPE_SEND_SUCCESS :
  11448. + MESSAGE_TYPE_NOACK, NULL, 0);
  11449. +}
  11450. +
  11451. +static inline void cec_dev_disconnect(struct cec_dev *cec_dev)
  11452. +{
  11453. + cec_dev->physical = 0;
  11454. + cec_dev_event(cec_dev, MESSAGE_TYPE_DISCONNECTED, NULL, 0);
  11455. +}
  11456. +
  11457. +static inline void cec_dev_connect(struct cec_dev *cec_dev, u32 phys)
  11458. +{
  11459. + cec_dev->physical = phys;
  11460. + cec_dev_event(cec_dev, MESSAGE_TYPE_CONNECTED, NULL, 0);
  11461. +}
  11462. +
  11463. +void cec_dev_init(struct cec_dev *cec_dev, struct module *);
  11464. +int cec_dev_add(struct cec_dev *cec_dev, struct device *, const char *name);
  11465. +void cec_dev_remove(struct cec_dev *cec_dev);
  11466. +
  11467. +#endif
  11468. diff -Nur linux-3.15-rc6.orig/include/linux/mmc/host.h linux-3.15-rc6/include/linux/mmc/host.h
  11469. --- linux-3.15-rc6.orig/include/linux/mmc/host.h 2014-05-21 23:42:02.000000000 +0200
  11470. +++ linux-3.15-rc6/include/linux/mmc/host.h 2014-05-23 11:26:48.400940347 +0200
  11471. @@ -278,6 +278,7 @@
  11472. #define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
  11473. MMC_CAP2_PACKED_WR)
  11474. #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
  11475. +#define MMC_CAP2_SDIO_NOTHREAD (1 << 15)
  11476. mmc_pm_flag_t pm_caps; /* supported pm features */
  11477. @@ -293,6 +294,11 @@
  11478. unsigned long clkgate_delay;
  11479. #endif
  11480. + /* card specific properties to deal with power and reset */
  11481. + struct regulator *card_regulator; /* External VCC needed by the card */
  11482. + struct gpio_desc *card_reset_gpios[2]; /* External resets, active low */
  11483. + struct clk *card_clk; /* External clock needed by the card */
  11484. +
  11485. /* host specific block data */
  11486. unsigned int max_seg_size; /* see blk_queue_max_segment_size */
  11487. unsigned short max_segs; /* see blk_queue_max_segments */
  11488. @@ -391,6 +397,8 @@
  11489. wake_up_process(host->sdio_irq_thread);
  11490. }
  11491. +void sdio_run_irqs(struct mmc_host *host);
  11492. +
  11493. #ifdef CONFIG_REGULATOR
  11494. int mmc_regulator_get_ocrmask(struct regulator *supply);
  11495. int mmc_regulator_set_ocr(struct mmc_host *mmc,
  11496. diff -Nur linux-3.15-rc6.orig/include/linux/mmc/sdhci.h linux-3.15-rc6/include/linux/mmc/sdhci.h
  11497. --- linux-3.15-rc6.orig/include/linux/mmc/sdhci.h 2014-05-21 23:42:02.000000000 +0200
  11498. +++ linux-3.15-rc6/include/linux/mmc/sdhci.h 2014-05-23 11:26:48.400940347 +0200
  11499. @@ -57,12 +57,8 @@
  11500. #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15)
  11501. /* Controller reports inverted write-protect state */
  11502. #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16)
  11503. -/* Controller has nonstandard clock management */
  11504. -#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17)
  11505. /* Controller does not like fast PIO transfers */
  11506. #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18)
  11507. -/* Controller losing signal/interrupt enable states after reset */
  11508. -#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
  11509. /* Controller has to be forced to use block size of 2048 bytes */
  11510. #define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
  11511. /* Controller cannot do multi-block transfers */
  11512. @@ -147,6 +143,7 @@
  11513. bool runtime_suspended; /* Host is runtime suspended */
  11514. bool bus_on; /* Bus power prevents runtime suspend */
  11515. + bool preset_enabled; /* Preset is enabled */
  11516. struct mmc_request *mrq; /* Current request */
  11517. struct mmc_command *cmd; /* Current command */
  11518. @@ -164,8 +161,7 @@
  11519. dma_addr_t adma_addr; /* Mapped ADMA descr. table */
  11520. dma_addr_t align_addr; /* Mapped bounce buffer */
  11521. - struct tasklet_struct card_tasklet; /* Tasklet structures */
  11522. - struct tasklet_struct finish_tasklet;
  11523. + struct tasklet_struct finish_tasklet; /* Tasklet structures */
  11524. struct timer_list timer; /* Timer for timeouts */
  11525. @@ -177,6 +173,13 @@
  11526. unsigned int ocr_avail_mmc;
  11527. u32 ocr_mask; /* available voltages */
  11528. + unsigned timing; /* Current timing */
  11529. +
  11530. + u32 thread_isr;
  11531. +
  11532. + /* cached registers */
  11533. + u32 ier;
  11534. +
  11535. wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */
  11536. unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
  11537. diff -Nur linux-3.15-rc6.orig/include/uapi/linux/cec-dev.h linux-3.15-rc6/include/uapi/linux/cec-dev.h
  11538. --- linux-3.15-rc6.orig/include/uapi/linux/cec-dev.h 1970-01-01 01:00:00.000000000 +0100
  11539. +++ linux-3.15-rc6/include/uapi/linux/cec-dev.h 2014-05-23 11:26:48.400940347 +0200
  11540. @@ -0,0 +1,34 @@
  11541. +#ifndef _UAPI_LINUX_CEC_DEV_H
  11542. +#define _UAPI_LINUX_CEC_DEV_H
  11543. +
  11544. +#include <linux/ioctl.h>
  11545. +#include <linux/types.h>
  11546. +
  11547. +#define MAX_MESSAGE_LEN 16
  11548. +
  11549. +enum {
  11550. + HDMICEC_IOC_MAGIC = 'H',
  11551. + /* This is wrong: we pass the argument as a number, not a pointer */
  11552. + HDMICEC_IOC_O_SETLOGICALADDRESS = _IOW(HDMICEC_IOC_MAGIC, 1, unsigned char),
  11553. + HDMICEC_IOC_SETLOGICALADDRESS = _IO(HDMICEC_IOC_MAGIC, 1),
  11554. + HDMICEC_IOC_STARTDEVICE = _IO(HDMICEC_IOC_MAGIC, 2),
  11555. + HDMICEC_IOC_STOPDEVICE = _IO(HDMICEC_IOC_MAGIC, 3),
  11556. + HDMICEC_IOC_GETPHYADDRESS = _IOR(HDMICEC_IOC_MAGIC, 4, unsigned char[4]),
  11557. +};
  11558. +
  11559. +enum {
  11560. + MESSAGE_TYPE_RECEIVE_SUCCESS = 1,
  11561. + MESSAGE_TYPE_NOACK,
  11562. + MESSAGE_TYPE_DISCONNECTED,
  11563. + MESSAGE_TYPE_CONNECTED,
  11564. + MESSAGE_TYPE_SEND_SUCCESS,
  11565. + MESSAGE_TYPE_SEND_ERROR,
  11566. +};
  11567. +
  11568. +struct cec_user_event {
  11569. + __u32 event_type;
  11570. + __u32 msg_len;
  11571. + __u8 msg[MAX_MESSAGE_LEN];
  11572. +};
  11573. +
  11574. +#endif
  11575. diff -Nur linux-3.15-rc6.orig/include/uapi/linux/videodev2.h linux-3.15-rc6/include/uapi/linux/videodev2.h
  11576. --- linux-3.15-rc6.orig/include/uapi/linux/videodev2.h 2014-05-21 23:42:02.000000000 +0200
  11577. +++ linux-3.15-rc6/include/uapi/linux/videodev2.h 2014-05-23 11:26:48.400940347 +0200
  11578. @@ -299,6 +299,7 @@
  11579. #define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */
  11580. #define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
  11581. #define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */
  11582. +#define V4L2_PIX_FMT_RGB666 v4l2_fourcc('R', 'G', 'B', 'H') /* 18 RGB-6-6-6 */
  11583. #define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
  11584. #define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */
  11585. #define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */
  11586. diff -Nur linux-3.15-rc6.orig/sound/soc/fsl/imx-pcm-dma.c linux-3.15-rc6/sound/soc/fsl/imx-pcm-dma.c
  11587. --- linux-3.15-rc6.orig/sound/soc/fsl/imx-pcm-dma.c 2014-05-21 23:42:02.000000000 +0200
  11588. +++ linux-3.15-rc6/sound/soc/fsl/imx-pcm-dma.c 2014-05-23 11:26:48.400940347 +0200
  11589. @@ -44,7 +44,7 @@
  11590. .buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
  11591. .period_bytes_min = 128,
  11592. .period_bytes_max = 65535, /* Limited by SDMA engine */
  11593. - .periods_min = 2,
  11594. + .periods_min = 4,
  11595. .periods_max = 255,
  11596. .fifo_size = 0,
  11597. };