cris.patch 399 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194
  1. diff -Nur linux-2.6.39.orig/arch/cris/arch-v10/drivers/axisflashmap.c linux-2.6.39/arch/cris/arch-v10/drivers/axisflashmap.c
  2. --- linux-2.6.39.orig/arch/cris/arch-v10/drivers/axisflashmap.c 2011-05-19 06:06:34.000000000 +0200
  3. +++ linux-2.6.39/arch/cris/arch-v10/drivers/axisflashmap.c 2011-07-28 16:16:35.633425525 +0200
  4. @@ -113,7 +113,7 @@
  5. /* If no partition-table was found, we use this default-set. */
  6. #define MAX_PARTITIONS 7
  7. -#define NUM_DEFAULT_PARTITIONS 3
  8. +#define NUM_DEFAULT_PARTITIONS 4
  9. /*
  10. * Default flash size is 2MB. CONFIG_ETRAX_PTABLE_SECTOR is most likely the
  11. @@ -122,19 +122,24 @@
  12. */
  13. static struct mtd_partition axis_default_partitions[NUM_DEFAULT_PARTITIONS] = {
  14. {
  15. - .name = "boot firmware",
  16. - .size = CONFIG_ETRAX_PTABLE_SECTOR,
  17. + .name = "kernel",
  18. + .size = 0x00,
  19. .offset = 0
  20. },
  21. {
  22. - .name = "kernel",
  23. - .size = 0x200000 - (6 * CONFIG_ETRAX_PTABLE_SECTOR),
  24. - .offset = CONFIG_ETRAX_PTABLE_SECTOR
  25. + .name = "rootfs",
  26. + .size = 0x200000 ,
  27. + .offset = 0x200000
  28. + },
  29. + {
  30. + .name = "cfgfs",
  31. + .size = 0x20000 ,
  32. + .offset = CONFIG_ETRAX_MTD_SIZE - 0x20000
  33. },
  34. {
  35. - .name = "filesystem",
  36. - .size = 5 * CONFIG_ETRAX_PTABLE_SECTOR,
  37. - .offset = 0x200000 - (5 * CONFIG_ETRAX_PTABLE_SECTOR)
  38. + .name = "linux",
  39. + .size = CONFIG_ETRAX_MTD_SIZE - 0x20000,
  40. + .offset = 0
  41. }
  42. };
  43. @@ -275,6 +280,11 @@
  44. struct partitiontable_entry *ptable;
  45. int use_default_ptable = 1; /* Until proven otherwise. */
  46. const char pmsg[] = " /dev/flash%d at 0x%08x, size 0x%08x\n";
  47. + unsigned int kernel_part_size = 0;
  48. + unsigned char *flash_mem = (unsigned char*)(FLASH_CACHED_ADDR);
  49. + unsigned int flash_scan_count = 0;
  50. + const char *part_magic = "ACME_PART_MAGIC";
  51. + unsigned int magic_len = strlen(part_magic);
  52. if (!(mymtd = flash_probe())) {
  53. /* There's no reason to use this module if no flash chip can
  54. @@ -286,6 +296,31 @@
  55. mymtd->name, mymtd->size);
  56. axisflash_mtd = mymtd;
  57. }
  58. + /* scan flash to findout where out partition starts */
  59. +
  60. + printk(KERN_INFO "Scanning flash for end of kernel magic\n");
  61. + for(flash_scan_count = 0; flash_scan_count < 100000; flash_scan_count++){
  62. + if(strncmp(&flash_mem[flash_scan_count], part_magic, magic_len - 1) == 0)
  63. + {
  64. + kernel_part_size = flash_mem[flash_scan_count + magic_len ];
  65. + kernel_part_size <<= 8;
  66. + kernel_part_size += flash_mem[flash_scan_count + magic_len + 2];
  67. + kernel_part_size <<= 8;
  68. + kernel_part_size += flash_mem[flash_scan_count + magic_len + 1];
  69. + kernel_part_size <<= 8;
  70. + kernel_part_size += flash_mem[flash_scan_count + magic_len + 3];
  71. + printk(KERN_INFO "Kernel ends at 0x%.08X\n", kernel_part_size);
  72. + flash_scan_count = 1100000;
  73. + }
  74. + }
  75. +
  76. +
  77. + if(kernel_part_size){
  78. + kernel_part_size = (kernel_part_size & 0xffff0000);
  79. + axis_default_partitions[0].size = kernel_part_size;
  80. + axis_default_partitions[1].size = mymtd->size - axis_default_partitions[0].size - axis_default_partitions[2].size;
  81. + axis_default_partitions[1].offset = axis_default_partitions[0].size;
  82. + }
  83. if (mymtd) {
  84. mymtd->owner = THIS_MODULE;
  85. @@ -354,21 +389,6 @@
  86. use_default_ptable = !ptable_ok;
  87. }
  88. - if (romfs_in_flash) {
  89. - /* Add an overlapping device for the root partition (romfs). */
  90. -
  91. - axis_partitions[pidx].name = "romfs";
  92. - axis_partitions[pidx].size = romfs_length;
  93. - axis_partitions[pidx].offset = romfs_start - FLASH_CACHED_ADDR;
  94. - axis_partitions[pidx].mask_flags |= MTD_WRITEABLE;
  95. -
  96. - printk(KERN_INFO
  97. - " Adding readonly flash partition for romfs image:\n");
  98. - printk(pmsg, pidx, axis_partitions[pidx].offset,
  99. - axis_partitions[pidx].size);
  100. - pidx++;
  101. - }
  102. -
  103. #ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE
  104. if (mymtd) {
  105. main_partition.size = mymtd->size;
  106. @@ -391,36 +411,6 @@
  107. if (err)
  108. panic("axisflashmap could not add MTD partitions!\n");
  109. }
  110. -
  111. - if (!romfs_in_flash) {
  112. - /* Create an RAM device for the root partition (romfs). */
  113. -
  114. -#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0)
  115. - /* No use trying to boot this kernel from RAM. Panic! */
  116. - printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM "
  117. - "device due to kernel (mis)configuration!\n");
  118. - panic("This kernel cannot boot from RAM!\n");
  119. -#else
  120. - struct mtd_info *mtd_ram;
  121. -
  122. - mtd_ram = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
  123. - if (!mtd_ram)
  124. - panic("axisflashmap couldn't allocate memory for "
  125. - "mtd_info!\n");
  126. -
  127. - printk(KERN_INFO " Adding RAM partition for romfs image:\n");
  128. - printk(pmsg, pidx, (unsigned)romfs_start,
  129. - (unsigned)romfs_length);
  130. -
  131. - err = mtdram_init_device(mtd_ram,
  132. - (void *)romfs_start,
  133. - romfs_length,
  134. - "romfs");
  135. - if (err)
  136. - panic("axisflashmap could not initialize MTD RAM "
  137. - "device!\n");
  138. -#endif
  139. - }
  140. return err;
  141. }
  142. diff -Nur linux-2.6.39.orig/arch/cris/arch-v10/drivers/axisflashmap.c.orig linux-2.6.39/arch/cris/arch-v10/drivers/axisflashmap.c.orig
  143. --- linux-2.6.39.orig/arch/cris/arch-v10/drivers/axisflashmap.c.orig 1970-01-01 01:00:00.000000000 +0100
  144. +++ linux-2.6.39/arch/cris/arch-v10/drivers/axisflashmap.c.orig 2011-05-19 06:06:34.000000000 +0200
  145. @@ -0,0 +1,430 @@
  146. +/*
  147. + * Physical mapping layer for MTD using the Axis partitiontable format
  148. + *
  149. + * Copyright (c) 2001, 2002 Axis Communications AB
  150. + *
  151. + * This file is under the GPL.
  152. + *
  153. + * First partition is always sector 0 regardless of if we find a partitiontable
  154. + * or not. In the start of the next sector, there can be a partitiontable that
  155. + * tells us what other partitions to define. If there isn't, we use a default
  156. + * partition split defined below.
  157. + *
  158. + */
  159. +
  160. +#include <linux/module.h>
  161. +#include <linux/types.h>
  162. +#include <linux/kernel.h>
  163. +#include <linux/init.h>
  164. +#include <linux/slab.h>
  165. +
  166. +#include <linux/mtd/concat.h>
  167. +#include <linux/mtd/map.h>
  168. +#include <linux/mtd/mtd.h>
  169. +#include <linux/mtd/mtdram.h>
  170. +#include <linux/mtd/partitions.h>
  171. +
  172. +#include <asm/axisflashmap.h>
  173. +#include <asm/mmu.h>
  174. +#include <arch/sv_addr_ag.h>
  175. +
  176. +#ifdef CONFIG_CRIS_LOW_MAP
  177. +#define FLASH_UNCACHED_ADDR KSEG_8
  178. +#define FLASH_CACHED_ADDR KSEG_5
  179. +#else
  180. +#define FLASH_UNCACHED_ADDR KSEG_E
  181. +#define FLASH_CACHED_ADDR KSEG_F
  182. +#endif
  183. +
  184. +#if CONFIG_ETRAX_FLASH_BUSWIDTH==1
  185. +#define flash_data __u8
  186. +#elif CONFIG_ETRAX_FLASH_BUSWIDTH==2
  187. +#define flash_data __u16
  188. +#elif CONFIG_ETRAX_FLASH_BUSWIDTH==4
  189. +#define flash_data __u32
  190. +#endif
  191. +
  192. +/* From head.S */
  193. +extern unsigned long romfs_start, romfs_length, romfs_in_flash;
  194. +
  195. +/* The master mtd for the entire flash. */
  196. +struct mtd_info* axisflash_mtd = NULL;
  197. +
  198. +/* Map driver functions. */
  199. +
  200. +static map_word flash_read(struct map_info *map, unsigned long ofs)
  201. +{
  202. + map_word tmp;
  203. + tmp.x[0] = *(flash_data *)(map->map_priv_1 + ofs);
  204. + return tmp;
  205. +}
  206. +
  207. +static void flash_copy_from(struct map_info *map, void *to,
  208. + unsigned long from, ssize_t len)
  209. +{
  210. + memcpy(to, (void *)(map->map_priv_1 + from), len);
  211. +}
  212. +
  213. +static void flash_write(struct map_info *map, map_word d, unsigned long adr)
  214. +{
  215. + *(flash_data *)(map->map_priv_1 + adr) = (flash_data)d.x[0];
  216. +}
  217. +
  218. +/*
  219. + * The map for chip select e0.
  220. + *
  221. + * We run into tricky coherence situations if we mix cached with uncached
  222. + * accesses to we only use the uncached version here.
  223. + *
  224. + * The size field is the total size where the flash chips may be mapped on the
  225. + * chip select. MTD probes should find all devices there and it does not matter
  226. + * if there are unmapped gaps or aliases (mirrors of flash devices). The MTD
  227. + * probes will ignore them.
  228. + *
  229. + * The start address in map_priv_1 is in virtual memory so we cannot use
  230. + * MEM_CSE0_START but must rely on that FLASH_UNCACHED_ADDR is the start
  231. + * address of cse0.
  232. + */
  233. +static struct map_info map_cse0 = {
  234. + .name = "cse0",
  235. + .size = MEM_CSE0_SIZE,
  236. + .bankwidth = CONFIG_ETRAX_FLASH_BUSWIDTH,
  237. + .read = flash_read,
  238. + .copy_from = flash_copy_from,
  239. + .write = flash_write,
  240. + .map_priv_1 = FLASH_UNCACHED_ADDR
  241. +};
  242. +
  243. +/*
  244. + * The map for chip select e1.
  245. + *
  246. + * If there was a gap between cse0 and cse1, map_priv_1 would get the wrong
  247. + * address, but there isn't.
  248. + */
  249. +static struct map_info map_cse1 = {
  250. + .name = "cse1",
  251. + .size = MEM_CSE1_SIZE,
  252. + .bankwidth = CONFIG_ETRAX_FLASH_BUSWIDTH,
  253. + .read = flash_read,
  254. + .copy_from = flash_copy_from,
  255. + .write = flash_write,
  256. + .map_priv_1 = FLASH_UNCACHED_ADDR + MEM_CSE0_SIZE
  257. +};
  258. +
  259. +/* If no partition-table was found, we use this default-set. */
  260. +#define MAX_PARTITIONS 7
  261. +#define NUM_DEFAULT_PARTITIONS 3
  262. +
  263. +/*
  264. + * Default flash size is 2MB. CONFIG_ETRAX_PTABLE_SECTOR is most likely the
  265. + * size of one flash block and "filesystem"-partition needs 5 blocks to be able
  266. + * to use JFFS.
  267. + */
  268. +static struct mtd_partition axis_default_partitions[NUM_DEFAULT_PARTITIONS] = {
  269. + {
  270. + .name = "boot firmware",
  271. + .size = CONFIG_ETRAX_PTABLE_SECTOR,
  272. + .offset = 0
  273. + },
  274. + {
  275. + .name = "kernel",
  276. + .size = 0x200000 - (6 * CONFIG_ETRAX_PTABLE_SECTOR),
  277. + .offset = CONFIG_ETRAX_PTABLE_SECTOR
  278. + },
  279. + {
  280. + .name = "filesystem",
  281. + .size = 5 * CONFIG_ETRAX_PTABLE_SECTOR,
  282. + .offset = 0x200000 - (5 * CONFIG_ETRAX_PTABLE_SECTOR)
  283. + }
  284. +};
  285. +
  286. +/* Initialize the ones normally used. */
  287. +static struct mtd_partition axis_partitions[MAX_PARTITIONS] = {
  288. + {
  289. + .name = "part0",
  290. + .size = CONFIG_ETRAX_PTABLE_SECTOR,
  291. + .offset = 0
  292. + },
  293. + {
  294. + .name = "part1",
  295. + .size = 0,
  296. + .offset = 0
  297. + },
  298. + {
  299. + .name = "part2",
  300. + .size = 0,
  301. + .offset = 0
  302. + },
  303. + {
  304. + .name = "part3",
  305. + .size = 0,
  306. + .offset = 0
  307. + },
  308. + {
  309. + .name = "part4",
  310. + .size = 0,
  311. + .offset = 0
  312. + },
  313. + {
  314. + .name = "part5",
  315. + .size = 0,
  316. + .offset = 0
  317. + },
  318. + {
  319. + .name = "part6",
  320. + .size = 0,
  321. + .offset = 0
  322. + },
  323. +};
  324. +
  325. +#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE
  326. +/* Main flash device */
  327. +static struct mtd_partition main_partition = {
  328. + .name = "main",
  329. + .size = 0,
  330. + .offset = 0
  331. +};
  332. +#endif
  333. +
  334. +/*
  335. + * Probe a chip select for AMD-compatible (JEDEC) or CFI-compatible flash
  336. + * chips in that order (because the amd_flash-driver is faster).
  337. + */
  338. +static struct mtd_info *probe_cs(struct map_info *map_cs)
  339. +{
  340. + struct mtd_info *mtd_cs = NULL;
  341. +
  342. + printk(KERN_INFO
  343. + "%s: Probing a 0x%08lx bytes large window at 0x%08lx.\n",
  344. + map_cs->name, map_cs->size, map_cs->map_priv_1);
  345. +
  346. +#ifdef CONFIG_MTD_CFI
  347. + mtd_cs = do_map_probe("cfi_probe", map_cs);
  348. +#endif
  349. +#ifdef CONFIG_MTD_JEDECPROBE
  350. + if (!mtd_cs)
  351. + mtd_cs = do_map_probe("jedec_probe", map_cs);
  352. +#endif
  353. +
  354. + return mtd_cs;
  355. +}
  356. +
  357. +/*
  358. + * Probe each chip select individually for flash chips. If there are chips on
  359. + * both cse0 and cse1, the mtd_info structs will be concatenated to one struct
  360. + * so that MTD partitions can cross chip boundries.
  361. + *
  362. + * The only known restriction to how you can mount your chips is that each
  363. + * chip select must hold similar flash chips. But you need external hardware
  364. + * to do that anyway and you can put totally different chips on cse0 and cse1
  365. + * so it isn't really much of a restriction.
  366. + */
  367. +static struct mtd_info *flash_probe(void)
  368. +{
  369. + struct mtd_info *mtd_cse0;
  370. + struct mtd_info *mtd_cse1;
  371. + struct mtd_info *mtd_cse;
  372. +
  373. + mtd_cse0 = probe_cs(&map_cse0);
  374. + mtd_cse1 = probe_cs(&map_cse1);
  375. +
  376. + if (!mtd_cse0 && !mtd_cse1) {
  377. + /* No chip found. */
  378. + return NULL;
  379. + }
  380. +
  381. + if (mtd_cse0 && mtd_cse1) {
  382. + struct mtd_info *mtds[] = { mtd_cse0, mtd_cse1 };
  383. +
  384. + /* Since the concatenation layer adds a small overhead we
  385. + * could try to figure out if the chips in cse0 and cse1 are
  386. + * identical and reprobe the whole cse0+cse1 window. But since
  387. + * flash chips are slow, the overhead is relatively small.
  388. + * So we use the MTD concatenation layer instead of further
  389. + * complicating the probing procedure.
  390. + */
  391. + mtd_cse = mtd_concat_create(mtds, ARRAY_SIZE(mtds),
  392. + "cse0+cse1");
  393. + if (!mtd_cse) {
  394. + printk(KERN_ERR "%s and %s: Concatenation failed!\n",
  395. + map_cse0.name, map_cse1.name);
  396. +
  397. + /* The best we can do now is to only use what we found
  398. + * at cse0.
  399. + */
  400. + mtd_cse = mtd_cse0;
  401. + map_destroy(mtd_cse1);
  402. + }
  403. + } else {
  404. + mtd_cse = mtd_cse0? mtd_cse0 : mtd_cse1;
  405. + }
  406. +
  407. + return mtd_cse;
  408. +}
  409. +
  410. +/*
  411. + * Probe the flash chip(s) and, if it succeeds, read the partition-table
  412. + * and register the partitions with MTD.
  413. + */
  414. +static int __init init_axis_flash(void)
  415. +{
  416. + struct mtd_info *mymtd;
  417. + int err = 0;
  418. + int pidx = 0;
  419. + struct partitiontable_head *ptable_head = NULL;
  420. + struct partitiontable_entry *ptable;
  421. + int use_default_ptable = 1; /* Until proven otherwise. */
  422. + const char pmsg[] = " /dev/flash%d at 0x%08x, size 0x%08x\n";
  423. +
  424. + if (!(mymtd = flash_probe())) {
  425. + /* There's no reason to use this module if no flash chip can
  426. + * be identified. Make sure that's understood.
  427. + */
  428. + printk(KERN_INFO "axisflashmap: Found no flash chip.\n");
  429. + } else {
  430. + printk(KERN_INFO "%s: 0x%08x bytes of flash memory.\n",
  431. + mymtd->name, mymtd->size);
  432. + axisflash_mtd = mymtd;
  433. + }
  434. +
  435. + if (mymtd) {
  436. + mymtd->owner = THIS_MODULE;
  437. + ptable_head = (struct partitiontable_head *)(FLASH_CACHED_ADDR +
  438. + CONFIG_ETRAX_PTABLE_SECTOR +
  439. + PARTITION_TABLE_OFFSET);
  440. + }
  441. + pidx++; /* First partition is always set to the default. */
  442. +
  443. + if (ptable_head && (ptable_head->magic == PARTITION_TABLE_MAGIC)
  444. + && (ptable_head->size <
  445. + (MAX_PARTITIONS * sizeof(struct partitiontable_entry) +
  446. + PARTITIONTABLE_END_MARKER_SIZE))
  447. + && (*(unsigned long*)((void*)ptable_head + sizeof(*ptable_head) +
  448. + ptable_head->size -
  449. + PARTITIONTABLE_END_MARKER_SIZE)
  450. + == PARTITIONTABLE_END_MARKER)) {
  451. + /* Looks like a start, sane length and end of a
  452. + * partition table, lets check csum etc.
  453. + */
  454. + int ptable_ok = 0;
  455. + struct partitiontable_entry *max_addr =
  456. + (struct partitiontable_entry *)
  457. + ((unsigned long)ptable_head + sizeof(*ptable_head) +
  458. + ptable_head->size);
  459. + unsigned long offset = CONFIG_ETRAX_PTABLE_SECTOR;
  460. + unsigned char *p;
  461. + unsigned long csum = 0;
  462. +
  463. + ptable = (struct partitiontable_entry *)
  464. + ((unsigned long)ptable_head + sizeof(*ptable_head));
  465. +
  466. + /* Lets be PARANOID, and check the checksum. */
  467. + p = (unsigned char*) ptable;
  468. +
  469. + while (p <= (unsigned char*)max_addr) {
  470. + csum += *p++;
  471. + csum += *p++;
  472. + csum += *p++;
  473. + csum += *p++;
  474. + }
  475. + ptable_ok = (csum == ptable_head->checksum);
  476. +
  477. + /* Read the entries and use/show the info. */
  478. + printk(KERN_INFO " Found a%s partition table at 0x%p-0x%p.\n",
  479. + (ptable_ok ? " valid" : "n invalid"), ptable_head,
  480. + max_addr);
  481. +
  482. + /* We have found a working bootblock. Now read the
  483. + * partition table. Scan the table. It ends when
  484. + * there is 0xffffffff, that is, empty flash.
  485. + */
  486. + while (ptable_ok
  487. + && ptable->offset != 0xffffffff
  488. + && ptable < max_addr
  489. + && pidx < MAX_PARTITIONS) {
  490. +
  491. + axis_partitions[pidx].offset = offset + ptable->offset;
  492. + axis_partitions[pidx].size = ptable->size;
  493. +
  494. + printk(pmsg, pidx, axis_partitions[pidx].offset,
  495. + axis_partitions[pidx].size);
  496. + pidx++;
  497. + ptable++;
  498. + }
  499. + use_default_ptable = !ptable_ok;
  500. + }
  501. +
  502. + if (romfs_in_flash) {
  503. + /* Add an overlapping device for the root partition (romfs). */
  504. +
  505. + axis_partitions[pidx].name = "romfs";
  506. + axis_partitions[pidx].size = romfs_length;
  507. + axis_partitions[pidx].offset = romfs_start - FLASH_CACHED_ADDR;
  508. + axis_partitions[pidx].mask_flags |= MTD_WRITEABLE;
  509. +
  510. + printk(KERN_INFO
  511. + " Adding readonly flash partition for romfs image:\n");
  512. + printk(pmsg, pidx, axis_partitions[pidx].offset,
  513. + axis_partitions[pidx].size);
  514. + pidx++;
  515. + }
  516. +
  517. +#ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE
  518. + if (mymtd) {
  519. + main_partition.size = mymtd->size;
  520. + err = add_mtd_partitions(mymtd, &main_partition, 1);
  521. + if (err)
  522. + panic("axisflashmap: Could not initialize "
  523. + "partition for whole main mtd device!\n");
  524. + }
  525. +#endif
  526. +
  527. + if (mymtd) {
  528. + if (use_default_ptable) {
  529. + printk(KERN_INFO " Using default partition table.\n");
  530. + err = add_mtd_partitions(mymtd, axis_default_partitions,
  531. + NUM_DEFAULT_PARTITIONS);
  532. + } else {
  533. + err = add_mtd_partitions(mymtd, axis_partitions, pidx);
  534. + }
  535. +
  536. + if (err)
  537. + panic("axisflashmap could not add MTD partitions!\n");
  538. + }
  539. +
  540. + if (!romfs_in_flash) {
  541. + /* Create an RAM device for the root partition (romfs). */
  542. +
  543. +#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0)
  544. + /* No use trying to boot this kernel from RAM. Panic! */
  545. + printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM "
  546. + "device due to kernel (mis)configuration!\n");
  547. + panic("This kernel cannot boot from RAM!\n");
  548. +#else
  549. + struct mtd_info *mtd_ram;
  550. +
  551. + mtd_ram = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
  552. + if (!mtd_ram)
  553. + panic("axisflashmap couldn't allocate memory for "
  554. + "mtd_info!\n");
  555. +
  556. + printk(KERN_INFO " Adding RAM partition for romfs image:\n");
  557. + printk(pmsg, pidx, (unsigned)romfs_start,
  558. + (unsigned)romfs_length);
  559. +
  560. + err = mtdram_init_device(mtd_ram,
  561. + (void *)romfs_start,
  562. + romfs_length,
  563. + "romfs");
  564. + if (err)
  565. + panic("axisflashmap could not initialize MTD RAM "
  566. + "device!\n");
  567. +#endif
  568. + }
  569. + return err;
  570. +}
  571. +
  572. +/* This adds the above to the kernels init-call chain. */
  573. +module_init(init_axis_flash);
  574. +
  575. +EXPORT_SYMBOL(axisflash_mtd);
  576. diff -Nur linux-2.6.39.orig/arch/cris/arch-v10/drivers/ds1302.c linux-2.6.39/arch/cris/arch-v10/drivers/ds1302.c
  577. --- linux-2.6.39.orig/arch/cris/arch-v10/drivers/ds1302.c 2011-05-19 06:06:34.000000000 +0200
  578. +++ linux-2.6.39/arch/cris/arch-v10/drivers/ds1302.c 2011-07-28 16:16:35.863415658 +0200
  579. @@ -22,6 +22,7 @@
  580. #include <linux/mutex.h>
  581. #include <linux/bcd.h>
  582. #include <linux/capability.h>
  583. +#include <linux/device.h>
  584. #include <asm/uaccess.h>
  585. #include <asm/system.h>
  586. @@ -501,6 +502,10 @@
  587. return 0;
  588. }
  589. +#ifdef CONFIG_SYSFS
  590. +static struct class *rtc_class;
  591. +#endif
  592. +
  593. static int __init ds1302_register(void)
  594. {
  595. ds1302_init();
  596. @@ -509,6 +514,12 @@
  597. ds1302_name, RTC_MAJOR_NR);
  598. return -1;
  599. }
  600. + #ifdef CONFIG_SYSFS
  601. + rtc_class = class_create(THIS_MODULE, "rtc");
  602. + class_device_create(rtc_class, NULL, MKDEV(RTC_MAJOR_NR, 0),
  603. + NULL, "rtc");
  604. + #endif
  605. +
  606. return 0;
  607. }
  608. diff -Nur linux-2.6.39.orig/arch/cris/arch-v10/drivers/gpio.c linux-2.6.39/arch/cris/arch-v10/drivers/gpio.c
  609. --- linux-2.6.39.orig/arch/cris/arch-v10/drivers/gpio.c 2011-05-19 06:06:34.000000000 +0200
  610. +++ linux-2.6.39/arch/cris/arch-v10/drivers/gpio.c 2011-07-28 16:16:36.023425394 +0200
  611. @@ -20,6 +20,7 @@
  612. #include <linux/poll.h>
  613. #include <linux/init.h>
  614. #include <linux/interrupt.h>
  615. +#include <linux/device.h>
  616. #include <asm/etraxgpio.h>
  617. #include <arch/svinto.h>
  618. @@ -798,6 +799,10 @@
  619. /* main driver initialization routine, called from mem.c */
  620. +#ifdef CONFIG_SYSFS
  621. +static struct class *gpio_class;
  622. +#endif
  623. +
  624. static int __init gpio_init(void)
  625. {
  626. int res;
  627. @@ -811,6 +816,13 @@
  628. return res;
  629. }
  630. +#ifdef CONFIG_SYSFS
  631. + gpio_class = class_create(THIS_MODULE, "gpio");
  632. + device_create(gpio_class, NULL, MKDEV(GPIO_MAJOR, 0), NULL, "gpioa");
  633. + device_create(gpio_class, NULL, MKDEV(GPIO_MAJOR, 1), NULL, "gpiob");
  634. + device_create(gpio_class, NULL, MKDEV(GPIO_MAJOR, 2), NULL, "leds");
  635. + device_create(gpio_class, NULL, MKDEV(GPIO_MAJOR, 3), NULL, "gpiog");
  636. +#endif
  637. /* Clear all leds */
  638. #if defined (CONFIG_ETRAX_CSP0_LEDS) || defined (CONFIG_ETRAX_PA_LEDS) || defined (CONFIG_ETRAX_PB_LEDS)
  639. CRIS_LED_NETWORK_SET(0);
  640. diff -Nur linux-2.6.39.orig/arch/cris/arch-v10/lib/hw_settings.S linux-2.6.39/arch/cris/arch-v10/lib/hw_settings.S
  641. --- linux-2.6.39.orig/arch/cris/arch-v10/lib/hw_settings.S 2011-05-19 06:06:34.000000000 +0200
  642. +++ linux-2.6.39/arch/cris/arch-v10/lib/hw_settings.S 2011-07-28 16:16:36.163758404 +0200
  643. @@ -58,3 +58,5 @@
  644. .dword R_PORT_PB_SET
  645. .dword PB_SET_VALUE
  646. .dword 0 ; No more register values
  647. + .ascii "ACME_PART_MAGIC"
  648. + .dword 0xdeadc0de
  649. diff -Nur linux-2.6.39.orig/arch/cris/arch-v10/mm/init.c linux-2.6.39/arch/cris/arch-v10/mm/init.c
  650. --- linux-2.6.39.orig/arch/cris/arch-v10/mm/init.c 2011-05-19 06:06:34.000000000 +0200
  651. +++ linux-2.6.39/arch/cris/arch-v10/mm/init.c 2011-07-28 16:16:36.313421347 +0200
  652. @@ -184,6 +184,9 @@
  653. free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
  654. }
  655. +void free_initrd_mem(unsigned long start, unsigned long end)
  656. +{
  657. +}
  658. /* Initialize remaps of some I/O-ports. It is important that this
  659. * is called before any driver is initialized.
  660. diff -Nur linux-2.6.39.orig/arch/cris/boot/compressed/Makefile linux-2.6.39/arch/cris/boot/compressed/Makefile
  661. --- linux-2.6.39.orig/arch/cris/boot/compressed/Makefile 2011-05-19 06:06:34.000000000 +0200
  662. +++ linux-2.6.39/arch/cris/boot/compressed/Makefile 2011-07-28 16:16:36.453421314 +0200
  663. @@ -18,7 +18,7 @@
  664. OBJECTS-$(CONFIG_ETRAX_ARCH_V32) = $(obj)/head_v32.o
  665. OBJECTS-$(CONFIG_ETRAX_ARCH_V10) = $(obj)/head_v10.o
  666. OBJECTS= $(OBJECTS-y) $(obj)/misc.o
  667. -OBJCOPYFLAGS = -O binary --remove-section=.bss
  668. +#OBJCOPYFLAGS = -O binary --remove-section=.bss
  669. quiet_cmd_image = BUILD $@
  670. cmd_image = cat $(obj)/decompress.bin $(obj)/piggy.gz > $@
  671. diff -Nur linux-2.6.39.orig/arch/cris/boot/Makefile linux-2.6.39/arch/cris/boot/Makefile
  672. --- linux-2.6.39.orig/arch/cris/boot/Makefile 2011-05-19 06:06:34.000000000 +0200
  673. +++ linux-2.6.39/arch/cris/boot/Makefile 2011-07-28 16:16:36.573671907 +0200
  674. @@ -5,7 +5,7 @@
  675. objcopyflags-$(CONFIG_ETRAX_ARCH_V10) += -R .note -R .comment
  676. objcopyflags-$(CONFIG_ETRAX_ARCH_V32) += --remove-section=.bss --remove-section=.note.gnu.build-id
  677. -OBJCOPYFLAGS = -O binary $(objcopyflags-y)
  678. +#OBJCOPYFLAGS = -O binary $(objcopyflags-y)
  679. subdir- := compressed rescue
  680. @@ -17,7 +17,6 @@
  681. $(obj)/compressed/vmlinux: $(obj)/Image FORCE
  682. $(Q)$(MAKE) $(build)=$(obj)/compressed $@
  683. - $(Q)$(MAKE) $(build)=$(obj)/rescue $(obj)/rescue/rescue.bin
  684. $(obj)/zImage: $(obj)/compressed/vmlinux
  685. @cp $< $@
  686. diff -Nur linux-2.6.39.orig/arch/cris/Kconfig linux-2.6.39/arch/cris/Kconfig
  687. --- linux-2.6.39.orig/arch/cris/Kconfig 2011-05-19 06:06:34.000000000 +0200
  688. +++ linux-2.6.39/arch/cris/Kconfig 2011-07-28 16:16:36.713417234 +0200
  689. @@ -168,6 +168,12 @@
  690. help
  691. Size of DRAM (decimal in MB) typically 2, 8 or 16.
  692. +config ETRAX_MTD_SIZE
  693. + hex "MTD size (hex)"
  694. + default "0x00800000"
  695. + help
  696. + Size of MTD device typically 4 or 8 MB.
  697. +
  698. config ETRAX_VMEM_SIZE
  699. int "Video memory size (dec, in MB)"
  700. depends on ETRAX_ARCH_V32 && !ETRAXFS
  701. @@ -273,7 +279,7 @@
  702. select MTD_CFI_AMDSTD
  703. select MTD_JEDECPROBE if ETRAX_ARCH_V32
  704. select MTD_CHAR
  705. - select MTD_BLOCK
  706. + select MTD_BLOCK_RO
  707. select MTD_PARTITIONS
  708. select MTD_COMPLEX_MAPPINGS
  709. help
  710. @@ -660,6 +666,11 @@
  711. source "drivers/ide/Kconfig"
  712. +#mysteriously part of this standard linux driver was removed from cris build! - info@crisos.org
  713. +source "drivers/scsi/Kconfig"
  714. +
  715. +source "drivers/media/Kconfig"
  716. +
  717. source "drivers/net/Kconfig"
  718. source "drivers/i2c/Kconfig"
  719. @@ -675,6 +686,8 @@
  720. source "fs/Kconfig"
  721. +source "sound/Kconfig"
  722. +
  723. source "drivers/usb/Kconfig"
  724. source "drivers/uwb/Kconfig"
  725. diff -Nur linux-2.6.39.orig/arch/cris/Kconfig.orig linux-2.6.39/arch/cris/Kconfig.orig
  726. --- linux-2.6.39.orig/arch/cris/Kconfig.orig 1970-01-01 01:00:00.000000000 +0100
  727. +++ linux-2.6.39/arch/cris/Kconfig.orig 2011-05-19 06:06:34.000000000 +0200
  728. @@ -0,0 +1,690 @@
  729. +config MMU
  730. + bool
  731. + default y
  732. +
  733. +config ZONE_DMA
  734. + bool
  735. + default y
  736. +
  737. +config RWSEM_GENERIC_SPINLOCK
  738. + bool
  739. + default y
  740. +
  741. +config RWSEM_XCHGADD_ALGORITHM
  742. + bool
  743. +
  744. +config GENERIC_CMOS_UPDATE
  745. + def_bool y
  746. +
  747. +config ARCH_USES_GETTIMEOFFSET
  748. + def_bool n
  749. +
  750. +config GENERIC_IOMAP
  751. + bool
  752. + default y
  753. +
  754. +config ARCH_HAS_ILOG2_U32
  755. + bool
  756. + default n
  757. +
  758. +config ARCH_HAS_ILOG2_U64
  759. + bool
  760. + default n
  761. +
  762. +config GENERIC_FIND_NEXT_BIT
  763. + bool
  764. + default y
  765. +
  766. +config GENERIC_HWEIGHT
  767. + bool
  768. + default y
  769. +
  770. +config GENERIC_CALIBRATE_DELAY
  771. + bool
  772. + default y
  773. +
  774. +config NO_IOPORT
  775. + def_bool y
  776. +
  777. +config FORCE_MAX_ZONEORDER
  778. + int
  779. + default 6
  780. +
  781. +config CRIS
  782. + bool
  783. + default y
  784. + select HAVE_IDE
  785. + select HAVE_GENERIC_HARDIRQS
  786. + select GENERIC_IRQ_SHOW
  787. +
  788. +config HZ
  789. + int
  790. + default 100
  791. +
  792. +source "init/Kconfig"
  793. +
  794. +source "kernel/Kconfig.freezer"
  795. +
  796. +menu "General setup"
  797. +
  798. +source "fs/Kconfig.binfmt"
  799. +
  800. +config ETRAX_CMDLINE
  801. + string "Kernel command line"
  802. + default "root=/dev/mtdblock3"
  803. + help
  804. + Pass additional commands to the kernel.
  805. +
  806. +config ETRAX_WATCHDOG
  807. + bool "Enable ETRAX watchdog"
  808. + help
  809. + Enable the built-in watchdog timer support on ETRAX based embedded
  810. + network computers.
  811. +
  812. +config ETRAX_WATCHDOG_NICE_DOGGY
  813. + bool "Disable watchdog during Oops printouts"
  814. + depends on ETRAX_WATCHDOG
  815. + help
  816. + By enabling this you make sure that the watchdog does not bite while
  817. + printing oopses. Recommended for development systems but not for
  818. + production releases.
  819. +
  820. +config ETRAX_FAST_TIMER
  821. + bool "Enable ETRAX fast timer API"
  822. + help
  823. + This options enables the API to a fast timer implementation using
  824. + timer1 to get sub jiffie resolution timers (primarily one-shot
  825. + timers).
  826. + This is needed if CONFIG_ETRAX_SERIAL_FAST_TIMER is enabled.
  827. +
  828. +config ETRAX_KMALLOCED_MODULES
  829. + bool "Enable module allocation with kmalloc"
  830. + help
  831. + Enable module allocation with kmalloc instead of vmalloc.
  832. +
  833. +config OOM_REBOOT
  834. + bool "Enable reboot at out of memory"
  835. +
  836. +source "kernel/Kconfig.preempt"
  837. +
  838. +source mm/Kconfig
  839. +
  840. +endmenu
  841. +
  842. +menu "Hardware setup"
  843. +
  844. +choice
  845. + prompt "Processor type"
  846. + default ETRAX100LX
  847. +
  848. +config ETRAX100LX
  849. + bool "ETRAX-100LX-v1"
  850. + select ARCH_USES_GETTIMEOFFSET
  851. + help
  852. + Support version 1 of the ETRAX 100LX.
  853. +
  854. +config ETRAX100LX_V2
  855. + bool "ETRAX-100LX-v2"
  856. + select ARCH_USES_GETTIMEOFFSET
  857. + help
  858. + Support version 2 of the ETRAX 100LX.
  859. +
  860. +config SVINTO_SIM
  861. + bool "ETRAX-100LX-for-xsim-simulator"
  862. + select ARCH_USES_GETTIMEOFFSET
  863. + help
  864. + Support the xsim ETRAX Simulator.
  865. +
  866. +config ETRAXFS
  867. + bool "ETRAX-FS-V32"
  868. + help
  869. + Support CRIS V32.
  870. +
  871. +config CRIS_MACH_ARTPEC3
  872. + bool "ARTPEC-3"
  873. + help
  874. + Support Axis ARTPEC-3.
  875. +
  876. +endchoice
  877. +
  878. +config ETRAX_VCS_SIM
  879. + bool "VCS Simulator"
  880. + help
  881. + Setup hardware to be run in the VCS simulator.
  882. +
  883. +config ETRAX_ARCH_V10
  884. + bool
  885. + default y if ETRAX100LX || ETRAX100LX_V2
  886. + default n if !(ETRAX100LX || ETRAX100LX_V2)
  887. +
  888. +config ETRAX_ARCH_V32
  889. + bool
  890. + default y if (ETRAXFS || CRIS_MACH_ARTPEC3)
  891. + default n if !(ETRAXFS || CRIS_MACH_ARTPEC3)
  892. +
  893. +config ETRAX_DRAM_SIZE
  894. + int "DRAM size (dec, in MB)"
  895. + default "8"
  896. + help
  897. + Size of DRAM (decimal in MB) typically 2, 8 or 16.
  898. +
  899. +config ETRAX_VMEM_SIZE
  900. + int "Video memory size (dec, in MB)"
  901. + depends on ETRAX_ARCH_V32 && !ETRAXFS
  902. + default 8 if !ETRAXFS
  903. + help
  904. + Size of Video accessible memory (decimal, in MB).
  905. +
  906. +config ETRAX_FLASH_BUSWIDTH
  907. + int "Buswidth of NOR flash in bytes"
  908. + default "2"
  909. + help
  910. + Width in bytes of the NOR Flash bus (1, 2 or 4). Is usually 2.
  911. +
  912. +config ETRAX_NANDFLASH_BUSWIDTH
  913. + int "Buswidth of NAND flash in bytes"
  914. + default "1"
  915. + help
  916. + Width in bytes of the NAND flash (1 or 2).
  917. +
  918. +config ETRAX_FLASH1_SIZE
  919. + int "FLASH1 size (dec, in MB. 0 = Unknown)"
  920. + default "0"
  921. +
  922. +choice
  923. + prompt "Product debug-port"
  924. + default ETRAX_DEBUG_PORT0
  925. +
  926. +config ETRAX_DEBUG_PORT0
  927. + bool "Serial-0"
  928. + help
  929. + Choose a serial port for the ETRAX debug console. Default to
  930. + port 0.
  931. +
  932. +config ETRAX_DEBUG_PORT1
  933. + bool "Serial-1"
  934. + help
  935. + Use serial port 1 for the console.
  936. +
  937. +config ETRAX_DEBUG_PORT2
  938. + bool "Serial-2"
  939. + help
  940. + Use serial port 2 for the console.
  941. +
  942. +config ETRAX_DEBUG_PORT3
  943. + bool "Serial-3"
  944. + help
  945. + Use serial port 3 for the console.
  946. +
  947. +config ETRAX_DEBUG_PORT_NULL
  948. + bool "disabled"
  949. + help
  950. + Disable serial-port debugging.
  951. +
  952. +endchoice
  953. +
  954. +choice
  955. + prompt "Kernel GDB port"
  956. + depends on ETRAX_KGDB
  957. + default ETRAX_KGDB_PORT0
  958. + help
  959. + Choose a serial port for kernel debugging. NOTE: This port should
  960. + not be enabled under Drivers for built-in interfaces (as it has its
  961. + own initialization code) and should not be the same as the debug port.
  962. +
  963. +config ETRAX_KGDB_PORT0
  964. + bool "Serial-0"
  965. + help
  966. + Use serial port 0 for kernel debugging.
  967. +
  968. +config ETRAX_KGDB_PORT1
  969. + bool "Serial-1"
  970. + help
  971. + Use serial port 1 for kernel debugging.
  972. +
  973. +config ETRAX_KGDB_PORT2
  974. + bool "Serial-2"
  975. + help
  976. + Use serial port 2 for kernel debugging.
  977. +
  978. +config ETRAX_KGDB_PORT3
  979. + bool "Serial-3"
  980. + help
  981. + Use serial port 3 for kernel debugging.
  982. +
  983. +endchoice
  984. +
  985. +source arch/cris/arch-v10/Kconfig
  986. +source arch/cris/arch-v32/Kconfig
  987. +
  988. +endmenu
  989. +
  990. +source "net/Kconfig"
  991. +
  992. +# bring in ETRAX built-in drivers
  993. +menu "Drivers for built-in interfaces"
  994. +source arch/cris/arch-v10/drivers/Kconfig
  995. +source arch/cris/arch-v32/drivers/Kconfig
  996. +
  997. +config ETRAX_AXISFLASHMAP
  998. + bool "Axis flash-map support"
  999. + select MTD
  1000. + select MTD_CFI
  1001. + select MTD_CFI_AMDSTD
  1002. + select MTD_JEDECPROBE if ETRAX_ARCH_V32
  1003. + select MTD_CHAR
  1004. + select MTD_BLOCK
  1005. + select MTD_PARTITIONS
  1006. + select MTD_COMPLEX_MAPPINGS
  1007. + help
  1008. + This option enables MTD mapping of flash devices. Needed to use
  1009. + flash memories. If unsure, say Y.
  1010. +
  1011. +config ETRAX_RTC
  1012. + bool "Real Time Clock support"
  1013. + depends on ETRAX_I2C
  1014. + help
  1015. + Enables drivers for the Real-Time Clock battery-backed chips on
  1016. + some products. The kernel reads the time when booting, and
  1017. + the date can be set using ioctl(fd, RTC_SET_TIME, &rt) with rt a
  1018. + rtc_time struct (see <file:include/asm-cris/rtc.h>) on the /dev/rtc
  1019. + device. You can check the time with cat /proc/rtc, but
  1020. + normal time reading should be done using libc function time and
  1021. + friends.
  1022. +
  1023. +choice
  1024. + prompt "RTC chip"
  1025. + depends on ETRAX_RTC
  1026. + default ETRAX_DS1302
  1027. +
  1028. +config ETRAX_DS1302
  1029. + depends on ETRAX_ARCH_V10
  1030. + bool "DS1302"
  1031. + help
  1032. + Enables the driver for the DS1302 Real-Time Clock battery-backed
  1033. + chip on some products.
  1034. +
  1035. +config ETRAX_PCF8563
  1036. + bool "PCF8563"
  1037. + help
  1038. + Enables the driver for the PCF8563 Real-Time Clock battery-backed
  1039. + chip on some products.
  1040. +
  1041. +endchoice
  1042. +
  1043. +config ETRAX_SYNCHRONOUS_SERIAL
  1044. + bool "Synchronous serial-port support"
  1045. + help
  1046. + Select this to enable the synchronous serial port driver.
  1047. +
  1048. +config ETRAX_SYNCHRONOUS_SERIAL_PORT0
  1049. + bool "Synchronous serial port 0 enabled"
  1050. + depends on ETRAX_SYNCHRONOUS_SERIAL
  1051. + help
  1052. + Enabled synchronous serial port 0.
  1053. +
  1054. +config ETRAX_SYNCHRONOUS_SERIAL0_DMA
  1055. + bool "Enable DMA on synchronous serial port 0."
  1056. + depends on ETRAX_SYNCHRONOUS_SERIAL_PORT0
  1057. + help
  1058. + A synchronous serial port can run in manual or DMA mode.
  1059. + Selecting this option will make it run in DMA mode.
  1060. +
  1061. +config ETRAX_SYNCHRONOUS_SERIAL_PORT1
  1062. + bool "Synchronous serial port 1 enabled"
  1063. + depends on ETRAX_SYNCHRONOUS_SERIAL && (ETRAXFS || ETRAX_ARCH_V10)
  1064. + help
  1065. + Enabled synchronous serial port 1.
  1066. +
  1067. +config ETRAX_SYNCHRONOUS_SERIAL1_DMA
  1068. + bool "Enable DMA on synchronous serial port 1."
  1069. + depends on ETRAX_SYNCHRONOUS_SERIAL_PORT1
  1070. + help
  1071. + A synchronous serial port can run in manual or DMA mode.
  1072. + Selecting this option will make it run in DMA mode.
  1073. +
  1074. +choice
  1075. + prompt "Network LED behavior"
  1076. + depends on ETRAX_ETHERNET
  1077. + default ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY
  1078. +
  1079. +config ETRAX_NETWORK_LED_ON_WHEN_LINK
  1080. + bool "LED_on_when_link"
  1081. + help
  1082. + Selecting LED_on_when_link will light the LED when there is a
  1083. + connection and will flash off when there is activity.
  1084. +
  1085. + Selecting LED_on_when_activity will light the LED only when
  1086. + there is activity.
  1087. +
  1088. + This setting will also affect the behaviour of other activity LEDs
  1089. + e.g. Bluetooth.
  1090. +
  1091. +config ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY
  1092. + bool "LED_on_when_activity"
  1093. + help
  1094. + Selecting LED_on_when_link will light the LED when there is a
  1095. + connection and will flash off when there is activity.
  1096. +
  1097. + Selecting LED_on_when_activity will light the LED only when
  1098. + there is activity.
  1099. +
  1100. + This setting will also affect the behaviour of other activity LEDs
  1101. + e.g. Bluetooth.
  1102. +
  1103. +endchoice
  1104. +
  1105. +choice
  1106. + prompt "Ser0 DMA out channel"
  1107. + depends on ETRAX_SERIAL_PORT0
  1108. + default ETRAX_SERIAL_PORT0_DMA6_OUT if ETRAX_ARCH_V32
  1109. + default ETRAX_SERIAL_PORT0_NO_DMA_OUT if ETRAX_ARCH_V10
  1110. +
  1111. +config ETRAX_SERIAL_PORT0_NO_DMA_OUT
  1112. + bool "Ser0 uses no DMA for output"
  1113. + help
  1114. + Do not use DMA for ser0 output.
  1115. +
  1116. +config ETRAX_SERIAL_PORT0_DMA6_OUT
  1117. + bool "Ser0 uses DMA6 for output"
  1118. + depends on ETRAXFS
  1119. + help
  1120. + Enables the DMA6 output channel for ser0 (ttyS0).
  1121. + If you do not enable DMA, an interrupt for each character will be
  1122. + used when transmitting data.
  1123. + Normally you want to use DMA, unless you use the DMA channel for
  1124. + something else.
  1125. +
  1126. +config ETRAX_SERIAL_PORT0_DMA0_OUT
  1127. + bool "Ser0 uses DMA0 for output"
  1128. + depends on CRIS_MACH_ARTPEC3
  1129. + help
  1130. + Enables the DMA0 output channel for ser0 (ttyS0).
  1131. + If you do not enable DMA, an interrupt for each character will be
  1132. + used when transmitting data.
  1133. + Normally you want to use DMA, unless you use the DMA channel for
  1134. + something else.
  1135. +
  1136. +endchoice
  1137. +
  1138. +choice
  1139. + prompt "Ser0 DMA in channel "
  1140. + depends on ETRAX_SERIAL_PORT0
  1141. + default ETRAX_SERIAL_PORT0_NO_DMA_IN if ETRAX_ARCH_V32
  1142. + default ETRAX_SERIAL_PORT0_DMA7_IN if ETRAX_ARCH_V10
  1143. + help
  1144. + What DMA channel to use for ser0.
  1145. +
  1146. +config ETRAX_SERIAL_PORT0_NO_DMA_IN
  1147. + bool "Ser0 uses no DMA for input"
  1148. + help
  1149. + Do not use DMA for ser0 input.
  1150. +
  1151. +config ETRAX_SERIAL_PORT0_DMA7_IN
  1152. + bool "Ser0 uses DMA7 for input"
  1153. + depends on ETRAXFS
  1154. + help
  1155. + Enables the DMA7 input channel for ser0 (ttyS0).
  1156. + If you do not enable DMA, an interrupt for each character will be
  1157. + used when receiving data.
  1158. + Normally you want to use DMA, unless you use the DMA channel for
  1159. + something else.
  1160. +
  1161. +config ETRAX_SERIAL_PORT0_DMA1_IN
  1162. + bool "Ser0 uses DMA1 for input"
  1163. + depends on CRIS_MACH_ARTPEC3
  1164. + help
  1165. + Enables the DMA1 input channel for ser0 (ttyS0).
  1166. + If you do not enable DMA, an interrupt for each character will be
  1167. + used when receiving data.
  1168. + Normally you want to use DMA, unless you use the DMA channel for
  1169. + something else.
  1170. +
  1171. +endchoice
  1172. +
  1173. +choice
  1174. + prompt "Ser1 DMA in channel "
  1175. + depends on ETRAX_SERIAL_PORT1
  1176. + default ETRAX_SERIAL_PORT1_NO_DMA_IN if ETRAX_ARCH_V32
  1177. + default ETRAX_SERIAL_PORT1_DMA9_IN if ETRAX_ARCH_V10
  1178. + help
  1179. + What DMA channel to use for ser1.
  1180. +
  1181. +config ETRAX_SERIAL_PORT1_NO_DMA_IN
  1182. + bool "Ser1 uses no DMA for input"
  1183. + help
  1184. + Do not use DMA for ser1 input.
  1185. +
  1186. +config ETRAX_SERIAL_PORT1_DMA5_IN
  1187. + bool "Ser1 uses DMA5 for input"
  1188. + depends on ETRAX_ARCH_V32
  1189. + help
  1190. + Enables the DMA5 input channel for ser1 (ttyS1).
  1191. + If you do not enable DMA, an interrupt for each character will be
  1192. + used when receiving data.
  1193. + Normally you want this on, unless you use the DMA channel for
  1194. + something else.
  1195. +
  1196. +config ETRAX_SERIAL_PORT1_DMA9_IN
  1197. + depends on ETRAX_ARCH_V10
  1198. + bool "Ser1 uses DMA9 for input"
  1199. +
  1200. +endchoice
  1201. +
  1202. +
  1203. +choice
  1204. + prompt "Ser1 DMA out channel"
  1205. + depends on ETRAX_SERIAL_PORT1
  1206. + default ETRAX_SERIAL_PORT1_NO_DMA_OUT if ETRAX_ARCH_V32
  1207. + default ETRAX_SERIAL_PORT1_DMA8_OUT if ETRAX_ARCH_V10
  1208. + help
  1209. + What DMA channel to use for ser1.
  1210. +
  1211. +config ETRAX_SERIAL_PORT1_NO_DMA_OUT
  1212. + bool "Ser1 uses no DMA for output"
  1213. + help
  1214. + Do not use DMA for ser1 output.
  1215. +
  1216. +config ETRAX_SERIAL_PORT1_DMA8_OUT
  1217. + depends on ETRAX_ARCH_V10
  1218. + bool "Ser1 uses DMA8 for output"
  1219. +
  1220. +config ETRAX_SERIAL_PORT1_DMA4_OUT
  1221. + depends on ETRAX_ARCH_V32
  1222. + bool "Ser1 uses DMA4 for output"
  1223. + help
  1224. + Enables the DMA4 output channel for ser1 (ttyS1).
  1225. + If you do not enable DMA, an interrupt for each character will be
  1226. + used when transmitting data.
  1227. + Normally you want this on, unless you use the DMA channel for
  1228. + something else.
  1229. +
  1230. +endchoice
  1231. +
  1232. +choice
  1233. + prompt "Ser2 DMA out channel"
  1234. + depends on ETRAX_SERIAL_PORT2
  1235. + default ETRAX_SERIAL_PORT2_NO_DMA_OUT if ETRAX_ARCH_V32
  1236. + default ETRAX_SERIAL_PORT2_DMA2_OUT if ETRAX_ARCH_V10
  1237. +
  1238. +config ETRAX_SERIAL_PORT2_NO_DMA_OUT
  1239. + bool "Ser2 uses no DMA for output"
  1240. + help
  1241. + Do not use DMA for ser2 output.
  1242. +
  1243. +config ETRAX_SERIAL_PORT2_DMA2_OUT
  1244. + bool "Ser2 uses DMA2 for output"
  1245. + depends on ETRAXFS || ETRAX_ARCH_V10
  1246. + help
  1247. + Enables the DMA2 output channel for ser2 (ttyS2).
  1248. + If you do not enable DMA, an interrupt for each character will be
  1249. + used when transmitting data.
  1250. + Normally you want to use DMA, unless you use the DMA channel for
  1251. + something else.
  1252. +
  1253. +config ETRAX_SERIAL_PORT2_DMA6_OUT
  1254. + bool "Ser2 uses DMA6 for output"
  1255. + depends on CRIS_MACH_ARTPEC3
  1256. + help
  1257. + Enables the DMA6 output channel for ser2 (ttyS2).
  1258. + If you do not enable DMA, an interrupt for each character will be
  1259. + used when transmitting data.
  1260. + Normally you want to use DMA, unless you use the DMA channel for
  1261. + something else.
  1262. +
  1263. +endchoice
  1264. +
  1265. +choice
  1266. + prompt "Ser2 DMA in channel"
  1267. + depends on ETRAX_SERIAL_PORT2
  1268. + default ETRAX_SERIAL_PORT2_NO_DMA_IN if ETRAX_ARCH_V32
  1269. + default ETRAX_SERIAL_PORT2_DMA3_IN if ETRAX_ARCH_V10
  1270. + help
  1271. + What DMA channel to use for ser2.
  1272. +
  1273. +config ETRAX_SERIAL_PORT2_NO_DMA_IN
  1274. + bool "Ser2 uses no DMA for input"
  1275. + help
  1276. + Do not use DMA for ser2 input.
  1277. +
  1278. +config ETRAX_SERIAL_PORT2_DMA3_IN
  1279. + bool "Ser2 uses DMA3 for input"
  1280. + depends on ETRAXFS || ETRAX_ARCH_V10
  1281. + help
  1282. + Enables the DMA3 input channel for ser2 (ttyS2).
  1283. + If you do not enable DMA, an interrupt for each character will be
  1284. + used when receiving data.
  1285. + Normally you want to use DMA, unless you use the DMA channel for
  1286. + something else.
  1287. +
  1288. +config ETRAX_SERIAL_PORT2_DMA7_IN
  1289. + bool "Ser2 uses DMA7 for input"
  1290. + depends on CRIS_MACH_ARTPEC3
  1291. + help
  1292. + Enables the DMA7 input channel for ser2 (ttyS2).
  1293. + If you do not enable DMA, an interrupt for each character will be
  1294. + used when receiving data.
  1295. + Normally you want to use DMA, unless you use the DMA channel for
  1296. + something else.
  1297. +
  1298. +endchoice
  1299. +
  1300. +choice
  1301. + prompt "Ser3 DMA in channel"
  1302. + depends on ETRAX_SERIAL_PORT3
  1303. + default ETRAX_SERIAL_PORT3_NO_DMA_IN if ETRAX_ARCH_V32
  1304. + default ETRAX_SERIAL_PORT3_DMA5_IN if ETRAX_ARCH_V10
  1305. + help
  1306. + What DMA channel to use for ser3.
  1307. +
  1308. +config ETRAX_SERIAL_PORT3_NO_DMA_IN
  1309. + bool "Ser3 uses no DMA for input"
  1310. + help
  1311. + Do not use DMA for ser3 input.
  1312. +
  1313. +config ETRAX_SERIAL_PORT3_DMA5_IN
  1314. + depends on ETRAX_ARCH_V10
  1315. + bool "DMA 5"
  1316. +
  1317. +config ETRAX_SERIAL_PORT3_DMA9_IN
  1318. + bool "Ser3 uses DMA9 for input"
  1319. + depends on ETRAXFS
  1320. + help
  1321. + Enables the DMA9 input channel for ser3 (ttyS3).
  1322. + If you do not enable DMA, an interrupt for each character will be
  1323. + used when receiving data.
  1324. + Normally you want to use DMA, unless you use the DMA channel for
  1325. + something else.
  1326. +
  1327. +config ETRAX_SERIAL_PORT3_DMA3_IN
  1328. + bool "Ser3 uses DMA3 for input"
  1329. + depends on CRIS_MACH_ARTPEC3
  1330. + help
  1331. + Enables the DMA3 input channel for ser3 (ttyS3).
  1332. + If you do not enable DMA, an interrupt for each character will be
  1333. + used when receiving data.
  1334. + Normally you want to use DMA, unless you use the DMA channel for
  1335. + something else.
  1336. +
  1337. +endchoice
  1338. +
  1339. +choice
  1340. + prompt "Ser3 DMA out channel"
  1341. + depends on ETRAX_SERIAL_PORT3
  1342. + default ETRAX_SERIAL_PORT3_NO_DMA_OUT if ETRAX_ARCH_V32
  1343. + default ETRAX_SERIAL_PORT3_DMA4_OUT if ETRAX_ARCH_V10
  1344. +
  1345. +config ETRAX_SERIAL_PORT3_NO_DMA_OUT
  1346. + bool "Ser3 uses no DMA for output"
  1347. + help
  1348. + Do not use DMA for ser3 output.
  1349. +
  1350. +config ETRAX_SERIAL_PORT3_DMA4_OUT
  1351. + depends on ETRAX_ARCH_V10
  1352. + bool "DMA 4"
  1353. +
  1354. +config ETRAX_SERIAL_PORT3_DMA8_OUT
  1355. + bool "Ser3 uses DMA8 for output"
  1356. + depends on ETRAXFS
  1357. + help
  1358. + Enables the DMA8 output channel for ser3 (ttyS3).
  1359. + If you do not enable DMA, an interrupt for each character will be
  1360. + used when transmitting data.
  1361. + Normally you want to use DMA, unless you use the DMA channel for
  1362. + something else.
  1363. +
  1364. +config ETRAX_SERIAL_PORT3_DMA2_OUT
  1365. + bool "Ser3 uses DMA2 for output"
  1366. + depends on CRIS_MACH_ARTPEC3
  1367. + help
  1368. + Enables the DMA2 output channel for ser3 (ttyS3).
  1369. + If you do not enable DMA, an interrupt for each character will be
  1370. + used when transmitting data.
  1371. + Normally you want to use DMA, unless you use the DMA channel for
  1372. + something else.
  1373. +
  1374. +endchoice
  1375. +
  1376. +endmenu
  1377. +
  1378. +source "drivers/base/Kconfig"
  1379. +
  1380. +# standard linux drivers
  1381. +source "drivers/mtd/Kconfig"
  1382. +
  1383. +source "drivers/parport/Kconfig"
  1384. +
  1385. +source "drivers/pnp/Kconfig"
  1386. +
  1387. +source "drivers/block/Kconfig"
  1388. +
  1389. +source "drivers/ide/Kconfig"
  1390. +
  1391. +source "drivers/net/Kconfig"
  1392. +
  1393. +source "drivers/i2c/Kconfig"
  1394. +
  1395. +source "drivers/rtc/Kconfig"
  1396. +
  1397. +#
  1398. +# input before char - char/joystick depends on it. As does USB.
  1399. +#
  1400. +source "drivers/input/Kconfig"
  1401. +
  1402. +source "drivers/char/Kconfig"
  1403. +
  1404. +source "fs/Kconfig"
  1405. +
  1406. +source "drivers/usb/Kconfig"
  1407. +
  1408. +source "drivers/uwb/Kconfig"
  1409. +
  1410. +source "drivers/staging/Kconfig"
  1411. +
  1412. +source "arch/cris/Kconfig.debug"
  1413. +
  1414. +source "security/Kconfig"
  1415. +
  1416. +source "crypto/Kconfig"
  1417. +
  1418. +source "lib/Kconfig"
  1419. diff -Nur linux-2.6.39.orig/arch/cris/Makefile linux-2.6.39/arch/cris/Makefile
  1420. --- linux-2.6.39.orig/arch/cris/Makefile 2011-05-19 06:06:34.000000000 +0200
  1421. +++ linux-2.6.39/arch/cris/Makefile 2011-07-28 16:16:36.883415879 +0200
  1422. @@ -40,10 +40,10 @@
  1423. LD = $(CROSS_COMPILE)ld -mcrislinux
  1424. -OBJCOPYFLAGS := -O binary -R .note -R .comment -S
  1425. +OBJCOPYFLAGS := -O binary -R .bss -R .note -R .note.gnu.build-id -R .comment -S
  1426. KBUILD_AFLAGS += -mlinux -march=$(arch-y) $(inc)
  1427. -KBUILD_CFLAGS += -mlinux -march=$(arch-y) -pipe $(inc)
  1428. +KBUILD_CFLAGS += -mlinux -march=$(arch-y) -pipe -fno-peephole2 $(inc)
  1429. KBUILD_CPPFLAGS += $(inc)
  1430. ifdef CONFIG_FRAME_POINTER
  1431. diff -Nur linux-2.6.39.orig/arch/cris/mm/init.c linux-2.6.39/arch/cris/mm/init.c
  1432. --- linux-2.6.39.orig/arch/cris/mm/init.c 2011-05-19 06:06:34.000000000 +0200
  1433. +++ linux-2.6.39/arch/cris/mm/init.c 2011-07-28 16:16:37.013424379 +0200
  1434. @@ -16,6 +16,7 @@
  1435. DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
  1436. unsigned long empty_zero_page;
  1437. +EXPORT_SYMBOL(empty_zero_page);
  1438. extern char _stext, _edata, _etext; /* From linkerscript */
  1439. extern char __init_begin, __init_end;
  1440. @@ -81,3 +82,10 @@
  1441. printk (KERN_INFO "Freeing unused kernel memory: %luk freed\n",
  1442. (unsigned long)((&__init_end - &__init_begin) >> 10));
  1443. }
  1444. +
  1445. +#ifdef CONFIG_BLK_DEV_INITRD
  1446. +void free_initrd_mem(unsigned long start, unsigned long end)
  1447. +{
  1448. + return 0;
  1449. +}
  1450. +#endif
  1451. diff -Nur linux-2.6.39.orig/drivers/net/cris/eth_v10.c linux-2.6.39/drivers/net/cris/eth_v10.c
  1452. --- linux-2.6.39.orig/drivers/net/cris/eth_v10.c 2011-05-19 06:06:34.000000000 +0200
  1453. +++ linux-2.6.39/drivers/net/cris/eth_v10.c 2011-07-28 16:16:37.184155914 +0200
  1454. @@ -1714,7 +1714,7 @@
  1455. static void
  1456. e100_netpoll(struct net_device* netdev)
  1457. {
  1458. - e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
  1459. + e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev);
  1460. }
  1461. #endif
  1462. diff -Nur linux-2.6.39.orig/drivers/net/cris/eth_v10.c.orig linux-2.6.39/drivers/net/cris/eth_v10.c.orig
  1463. --- linux-2.6.39.orig/drivers/net/cris/eth_v10.c.orig 1970-01-01 01:00:00.000000000 +0100
  1464. +++ linux-2.6.39/drivers/net/cris/eth_v10.c.orig 2011-05-19 06:06:34.000000000 +0200
  1465. @@ -0,0 +1,1749 @@
  1466. +/*
  1467. + * e100net.c: A network driver for the ETRAX 100LX network controller.
  1468. + *
  1469. + * Copyright (c) 1998-2002 Axis Communications AB.
  1470. + *
  1471. + * The outline of this driver comes from skeleton.c.
  1472. + *
  1473. + */
  1474. +
  1475. +
  1476. +#include <linux/module.h>
  1477. +
  1478. +#include <linux/kernel.h>
  1479. +#include <linux/delay.h>
  1480. +#include <linux/types.h>
  1481. +#include <linux/fcntl.h>
  1482. +#include <linux/interrupt.h>
  1483. +#include <linux/ptrace.h>
  1484. +#include <linux/ioport.h>
  1485. +#include <linux/in.h>
  1486. +#include <linux/string.h>
  1487. +#include <linux/spinlock.h>
  1488. +#include <linux/errno.h>
  1489. +#include <linux/init.h>
  1490. +#include <linux/bitops.h>
  1491. +
  1492. +#include <linux/if.h>
  1493. +#include <linux/mii.h>
  1494. +#include <linux/netdevice.h>
  1495. +#include <linux/etherdevice.h>
  1496. +#include <linux/skbuff.h>
  1497. +#include <linux/ethtool.h>
  1498. +
  1499. +#include <arch/svinto.h>/* DMA and register descriptions */
  1500. +#include <asm/io.h> /* CRIS_LED_* I/O functions */
  1501. +#include <asm/irq.h>
  1502. +#include <asm/dma.h>
  1503. +#include <asm/system.h>
  1504. +#include <asm/ethernet.h>
  1505. +#include <asm/cache.h>
  1506. +#include <arch/io_interface_mux.h>
  1507. +
  1508. +//#define ETHDEBUG
  1509. +#define D(x)
  1510. +
  1511. +/*
  1512. + * The name of the card. Is used for messages and in the requests for
  1513. + * io regions, irqs and dma channels
  1514. + */
  1515. +
  1516. +static const char* cardname = "ETRAX 100LX built-in ethernet controller";
  1517. +
  1518. +/* A default ethernet address. Highlevel SW will set the real one later */
  1519. +
  1520. +static struct sockaddr default_mac = {
  1521. + 0,
  1522. + { 0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00 }
  1523. +};
  1524. +
  1525. +/* Information that need to be kept for each board. */
  1526. +struct net_local {
  1527. + struct mii_if_info mii_if;
  1528. +
  1529. + /* Tx control lock. This protects the transmit buffer ring
  1530. + * state along with the "tx full" state of the driver. This
  1531. + * means all netif_queue flow control actions are protected
  1532. + * by this lock as well.
  1533. + */
  1534. + spinlock_t lock;
  1535. +
  1536. + spinlock_t led_lock; /* Protect LED state */
  1537. + spinlock_t transceiver_lock; /* Protect transceiver state. */
  1538. +};
  1539. +
  1540. +typedef struct etrax_eth_descr
  1541. +{
  1542. + etrax_dma_descr descr;
  1543. + struct sk_buff* skb;
  1544. +} etrax_eth_descr;
  1545. +
  1546. +/* Some transceivers requires special handling */
  1547. +struct transceiver_ops
  1548. +{
  1549. + unsigned int oui;
  1550. + void (*check_speed)(struct net_device* dev);
  1551. + void (*check_duplex)(struct net_device* dev);
  1552. +};
  1553. +
  1554. +/* Duplex settings */
  1555. +enum duplex
  1556. +{
  1557. + half,
  1558. + full,
  1559. + autoneg
  1560. +};
  1561. +
  1562. +/* Dma descriptors etc. */
  1563. +
  1564. +#define MAX_MEDIA_DATA_SIZE 1522
  1565. +
  1566. +#define MIN_PACKET_LEN 46
  1567. +#define ETHER_HEAD_LEN 14
  1568. +
  1569. +/*
  1570. +** MDIO constants.
  1571. +*/
  1572. +#define MDIO_START 0x1
  1573. +#define MDIO_READ 0x2
  1574. +#define MDIO_WRITE 0x1
  1575. +#define MDIO_PREAMBLE 0xfffffffful
  1576. +
  1577. +/* Broadcom specific */
  1578. +#define MDIO_AUX_CTRL_STATUS_REG 0x18
  1579. +#define MDIO_BC_FULL_DUPLEX_IND 0x1
  1580. +#define MDIO_BC_SPEED 0x2
  1581. +
  1582. +/* TDK specific */
  1583. +#define MDIO_TDK_DIAGNOSTIC_REG 18
  1584. +#define MDIO_TDK_DIAGNOSTIC_RATE 0x400
  1585. +#define MDIO_TDK_DIAGNOSTIC_DPLX 0x800
  1586. +
  1587. +/*Intel LXT972A specific*/
  1588. +#define MDIO_INT_STATUS_REG_2 0x0011
  1589. +#define MDIO_INT_FULL_DUPLEX_IND (1 << 9)
  1590. +#define MDIO_INT_SPEED (1 << 14)
  1591. +
  1592. +/* Network flash constants */
  1593. +#define NET_FLASH_TIME (HZ/50) /* 20 ms */
  1594. +#define NET_FLASH_PAUSE (HZ/100) /* 10 ms */
  1595. +#define NET_LINK_UP_CHECK_INTERVAL (2*HZ) /* 2 s */
  1596. +#define NET_DUPLEX_CHECK_INTERVAL (2*HZ) /* 2 s */
  1597. +
  1598. +#define NO_NETWORK_ACTIVITY 0
  1599. +#define NETWORK_ACTIVITY 1
  1600. +
  1601. +#define NBR_OF_RX_DESC 32
  1602. +#define NBR_OF_TX_DESC 16
  1603. +
  1604. +/* Large packets are sent directly to upper layers while small packets are */
  1605. +/* copied (to reduce memory waste). The following constant decides the breakpoint */
  1606. +#define RX_COPYBREAK 256
  1607. +
  1608. +/* Due to a chip bug we need to flush the cache when descriptors are returned */
  1609. +/* to the DMA. To decrease performance impact we return descriptors in chunks. */
  1610. +/* The following constant determines the number of descriptors to return. */
  1611. +#define RX_QUEUE_THRESHOLD NBR_OF_RX_DESC/2
  1612. +
  1613. +#define GET_BIT(bit,val) (((val) >> (bit)) & 0x01)
  1614. +
  1615. +/* Define some macros to access ETRAX 100 registers */
  1616. +#define SETF(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
  1617. + IO_FIELD_(reg##_, field##_, val)
  1618. +#define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
  1619. + IO_STATE_(reg##_, field##_, _##val)
  1620. +
  1621. +static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to
  1622. + to be processed */
  1623. +static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */
  1624. +
  1625. +static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32)));
  1626. +
  1627. +static etrax_eth_descr* myFirstTxDesc; /* First packet not yet sent */
  1628. +static etrax_eth_descr* myLastTxDesc; /* End of send queue */
  1629. +static etrax_eth_descr* myNextTxDesc; /* Next descriptor to use */
  1630. +static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32)));
  1631. +
  1632. +static unsigned int network_rec_config_shadow = 0;
  1633. +
  1634. +static unsigned int network_tr_ctrl_shadow = 0;
  1635. +
  1636. +/* Network speed indication. */
  1637. +static DEFINE_TIMER(speed_timer, NULL, 0, 0);
  1638. +static DEFINE_TIMER(clear_led_timer, NULL, 0, 0);
  1639. +static int current_speed; /* Speed read from transceiver */
  1640. +static int current_speed_selection; /* Speed selected by user */
  1641. +static unsigned long led_next_time;
  1642. +static int led_active;
  1643. +static int rx_queue_len;
  1644. +
  1645. +/* Duplex */
  1646. +static DEFINE_TIMER(duplex_timer, NULL, 0, 0);
  1647. +static int full_duplex;
  1648. +static enum duplex current_duplex;
  1649. +
  1650. +/* Index to functions, as function prototypes. */
  1651. +
  1652. +static int etrax_ethernet_init(void);
  1653. +
  1654. +static int e100_open(struct net_device *dev);
  1655. +static int e100_set_mac_address(struct net_device *dev, void *addr);
  1656. +static int e100_send_packet(struct sk_buff *skb, struct net_device *dev);
  1657. +static irqreturn_t e100rxtx_interrupt(int irq, void *dev_id);
  1658. +static irqreturn_t e100nw_interrupt(int irq, void *dev_id);
  1659. +static void e100_rx(struct net_device *dev);
  1660. +static int e100_close(struct net_device *dev);
  1661. +static int e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
  1662. +static int e100_set_config(struct net_device* dev, struct ifmap* map);
  1663. +static void e100_tx_timeout(struct net_device *dev);
  1664. +static struct net_device_stats *e100_get_stats(struct net_device *dev);
  1665. +static void set_multicast_list(struct net_device *dev);
  1666. +static void e100_hardware_send_packet(struct net_local* np, char *buf, int length);
  1667. +static void update_rx_stats(struct net_device_stats *);
  1668. +static void update_tx_stats(struct net_device_stats *);
  1669. +static int e100_probe_transceiver(struct net_device* dev);
  1670. +
  1671. +static void e100_check_speed(unsigned long priv);
  1672. +static void e100_set_speed(struct net_device* dev, unsigned long speed);
  1673. +static void e100_check_duplex(unsigned long priv);
  1674. +static void e100_set_duplex(struct net_device* dev, enum duplex);
  1675. +static void e100_negotiate(struct net_device* dev);
  1676. +
  1677. +static int e100_get_mdio_reg(struct net_device *dev, int phy_id, int location);
  1678. +static void e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value);
  1679. +
  1680. +static void e100_send_mdio_cmd(unsigned short cmd, int write_cmd);
  1681. +static void e100_send_mdio_bit(unsigned char bit);
  1682. +static unsigned char e100_receive_mdio_bit(void);
  1683. +static void e100_reset_transceiver(struct net_device* net);
  1684. +
  1685. +static void e100_clear_network_leds(unsigned long dummy);
  1686. +static void e100_set_network_leds(int active);
  1687. +
  1688. +static const struct ethtool_ops e100_ethtool_ops;
  1689. +#if defined(CONFIG_ETRAX_NO_PHY)
  1690. +static void dummy_check_speed(struct net_device* dev);
  1691. +static void dummy_check_duplex(struct net_device* dev);
  1692. +#else
  1693. +static void broadcom_check_speed(struct net_device* dev);
  1694. +static void broadcom_check_duplex(struct net_device* dev);
  1695. +static void tdk_check_speed(struct net_device* dev);
  1696. +static void tdk_check_duplex(struct net_device* dev);
  1697. +static void intel_check_speed(struct net_device* dev);
  1698. +static void intel_check_duplex(struct net_device* dev);
  1699. +static void generic_check_speed(struct net_device* dev);
  1700. +static void generic_check_duplex(struct net_device* dev);
  1701. +#endif
  1702. +#ifdef CONFIG_NET_POLL_CONTROLLER
  1703. +static void e100_netpoll(struct net_device* dev);
  1704. +#endif
  1705. +
  1706. +static int autoneg_normal = 1;
  1707. +
  1708. +struct transceiver_ops transceivers[] =
  1709. +{
  1710. +#if defined(CONFIG_ETRAX_NO_PHY)
  1711. + {0x0000, dummy_check_speed, dummy_check_duplex} /* Dummy */
  1712. +#else
  1713. + {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */
  1714. + {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */
  1715. + {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */
  1716. + {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/
  1717. + {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */
  1718. +#endif
  1719. +};
  1720. +
  1721. +struct transceiver_ops* transceiver = &transceivers[0];
  1722. +
  1723. +static const struct net_device_ops e100_netdev_ops = {
  1724. + .ndo_open = e100_open,
  1725. + .ndo_stop = e100_close,
  1726. + .ndo_start_xmit = e100_send_packet,
  1727. + .ndo_tx_timeout = e100_tx_timeout,
  1728. + .ndo_get_stats = e100_get_stats,
  1729. + .ndo_set_multicast_list = set_multicast_list,
  1730. + .ndo_do_ioctl = e100_ioctl,
  1731. + .ndo_set_mac_address = e100_set_mac_address,
  1732. + .ndo_validate_addr = eth_validate_addr,
  1733. + .ndo_change_mtu = eth_change_mtu,
  1734. + .ndo_set_config = e100_set_config,
  1735. +#ifdef CONFIG_NET_POLL_CONTROLLER
  1736. + .ndo_poll_controller = e100_netpoll,
  1737. +#endif
  1738. +};
  1739. +
  1740. +#define tx_done(dev) (*R_DMA_CH0_CMD == 0)
  1741. +
  1742. +/*
  1743. + * Check for a network adaptor of this type, and return '0' if one exists.
  1744. + * If dev->base_addr == 0, probe all likely locations.
  1745. + * If dev->base_addr == 1, always return failure.
  1746. + * If dev->base_addr == 2, allocate space for the device and return success
  1747. + * (detachable devices only).
  1748. + */
  1749. +
  1750. +static int __init
  1751. +etrax_ethernet_init(void)
  1752. +{
  1753. + struct net_device *dev;
  1754. + struct net_local* np;
  1755. + int i, err;
  1756. +
  1757. + printk(KERN_INFO
  1758. + "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n");
  1759. +
  1760. + if (cris_request_io_interface(if_eth, cardname)) {
  1761. + printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n");
  1762. + return -EBUSY;
  1763. + }
  1764. +
  1765. + dev = alloc_etherdev(sizeof(struct net_local));
  1766. + if (!dev)
  1767. + return -ENOMEM;
  1768. +
  1769. + np = netdev_priv(dev);
  1770. +
  1771. + /* we do our own locking */
  1772. + dev->features |= NETIF_F_LLTX;
  1773. +
  1774. + dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */
  1775. +
  1776. + /* now setup our etrax specific stuff */
  1777. +
  1778. + dev->irq = NETWORK_DMA_RX_IRQ_NBR; /* we really use DMATX as well... */
  1779. + dev->dma = NETWORK_RX_DMA_NBR;
  1780. +
  1781. + /* fill in our handlers so the network layer can talk to us in the future */
  1782. +
  1783. + dev->ethtool_ops = &e100_ethtool_ops;
  1784. + dev->netdev_ops = &e100_netdev_ops;
  1785. +
  1786. + spin_lock_init(&np->lock);
  1787. + spin_lock_init(&np->led_lock);
  1788. + spin_lock_init(&np->transceiver_lock);
  1789. +
  1790. + /* Initialise the list of Etrax DMA-descriptors */
  1791. +
  1792. + /* Initialise receive descriptors */
  1793. +
  1794. + for (i = 0; i < NBR_OF_RX_DESC; i++) {
  1795. + /* Allocate two extra cachelines to make sure that buffer used
  1796. + * by DMA does not share cacheline with any other data (to
  1797. + * avoid cache bug)
  1798. + */
  1799. + RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
  1800. + if (!RxDescList[i].skb)
  1801. + return -ENOMEM;
  1802. + RxDescList[i].descr.ctrl = 0;
  1803. + RxDescList[i].descr.sw_len = MAX_MEDIA_DATA_SIZE;
  1804. + RxDescList[i].descr.next = virt_to_phys(&RxDescList[i + 1]);
  1805. + RxDescList[i].descr.buf = L1_CACHE_ALIGN(virt_to_phys(RxDescList[i].skb->data));
  1806. + RxDescList[i].descr.status = 0;
  1807. + RxDescList[i].descr.hw_len = 0;
  1808. + prepare_rx_descriptor(&RxDescList[i].descr);
  1809. + }
  1810. +
  1811. + RxDescList[NBR_OF_RX_DESC - 1].descr.ctrl = d_eol;
  1812. + RxDescList[NBR_OF_RX_DESC - 1].descr.next = virt_to_phys(&RxDescList[0]);
  1813. + rx_queue_len = 0;
  1814. +
  1815. + /* Initialize transmit descriptors */
  1816. + for (i = 0; i < NBR_OF_TX_DESC; i++) {
  1817. + TxDescList[i].descr.ctrl = 0;
  1818. + TxDescList[i].descr.sw_len = 0;
  1819. + TxDescList[i].descr.next = virt_to_phys(&TxDescList[i + 1].descr);
  1820. + TxDescList[i].descr.buf = 0;
  1821. + TxDescList[i].descr.status = 0;
  1822. + TxDescList[i].descr.hw_len = 0;
  1823. + TxDescList[i].skb = 0;
  1824. + }
  1825. +
  1826. + TxDescList[NBR_OF_TX_DESC - 1].descr.ctrl = d_eol;
  1827. + TxDescList[NBR_OF_TX_DESC - 1].descr.next = virt_to_phys(&TxDescList[0].descr);
  1828. +
  1829. + /* Initialise initial pointers */
  1830. +
  1831. + myNextRxDesc = &RxDescList[0];
  1832. + myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
  1833. + myFirstTxDesc = &TxDescList[0];
  1834. + myNextTxDesc = &TxDescList[0];
  1835. + myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1];
  1836. +
  1837. + /* Register device */
  1838. + err = register_netdev(dev);
  1839. + if (err) {
  1840. + free_netdev(dev);
  1841. + return err;
  1842. + }
  1843. +
  1844. + /* set the default MAC address */
  1845. +
  1846. + e100_set_mac_address(dev, &default_mac);
  1847. +
  1848. + /* Initialize speed indicator stuff. */
  1849. +
  1850. + current_speed = 10;
  1851. + current_speed_selection = 0; /* Auto */
  1852. + speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
  1853. + speed_timer.data = (unsigned long)dev;
  1854. + speed_timer.function = e100_check_speed;
  1855. +
  1856. + clear_led_timer.function = e100_clear_network_leds;
  1857. + clear_led_timer.data = (unsigned long)dev;
  1858. +
  1859. + full_duplex = 0;
  1860. + current_duplex = autoneg;
  1861. + duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
  1862. + duplex_timer.data = (unsigned long)dev;
  1863. + duplex_timer.function = e100_check_duplex;
  1864. +
  1865. + /* Initialize mii interface */
  1866. + np->mii_if.phy_id_mask = 0x1f;
  1867. + np->mii_if.reg_num_mask = 0x1f;
  1868. + np->mii_if.dev = dev;
  1869. + np->mii_if.mdio_read = e100_get_mdio_reg;
  1870. + np->mii_if.mdio_write = e100_set_mdio_reg;
  1871. +
  1872. + /* Initialize group address registers to make sure that no */
  1873. + /* unwanted addresses are matched */
  1874. + *R_NETWORK_GA_0 = 0x00000000;
  1875. + *R_NETWORK_GA_1 = 0x00000000;
  1876. +
  1877. + /* Initialize next time the led can flash */
  1878. + led_next_time = jiffies;
  1879. + return 0;
  1880. +}
  1881. +
  1882. +/* set MAC address of the interface. called from the core after a
  1883. + * SIOCSIFADDR ioctl, and from the bootup above.
  1884. + */
  1885. +
  1886. +static int
  1887. +e100_set_mac_address(struct net_device *dev, void *p)
  1888. +{
  1889. + struct net_local *np = netdev_priv(dev);
  1890. + struct sockaddr *addr = p;
  1891. +
  1892. + spin_lock(&np->lock); /* preemption protection */
  1893. +
  1894. + /* remember it */
  1895. +
  1896. + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  1897. +
  1898. + /* Write it to the hardware.
  1899. + * Note the way the address is wrapped:
  1900. + * *R_NETWORK_SA_0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24);
  1901. + * *R_NETWORK_SA_1 = a0_4 | (a0_5 << 8);
  1902. + */
  1903. +
  1904. + *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
  1905. + (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
  1906. + *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8);
  1907. + *R_NETWORK_SA_2 = 0;
  1908. +
  1909. + /* show it in the log as well */
  1910. +
  1911. + printk(KERN_INFO "%s: changed MAC to %pM\n", dev->name, dev->dev_addr);
  1912. +
  1913. + spin_unlock(&np->lock);
  1914. +
  1915. + return 0;
  1916. +}
  1917. +
  1918. +/*
  1919. + * Open/initialize the board. This is called (in the current kernel)
  1920. + * sometime after booting when the 'ifconfig' program is run.
  1921. + *
  1922. + * This routine should set everything up anew at each open, even
  1923. + * registers that "should" only need to be set once at boot, so that
  1924. + * there is non-reboot way to recover if something goes wrong.
  1925. + */
  1926. +
  1927. +static int
  1928. +e100_open(struct net_device *dev)
  1929. +{
  1930. + unsigned long flags;
  1931. +
  1932. + /* enable the MDIO output pin */
  1933. +
  1934. + *R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable);
  1935. +
  1936. + *R_IRQ_MASK0_CLR =
  1937. + IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) |
  1938. + IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) |
  1939. + IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr);
  1940. +
  1941. + /* clear dma0 and 1 eop and descr irq masks */
  1942. + *R_IRQ_MASK2_CLR =
  1943. + IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) |
  1944. + IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
  1945. + IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) |
  1946. + IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
  1947. +
  1948. + /* Reset and wait for the DMA channels */
  1949. +
  1950. + RESET_DMA(NETWORK_TX_DMA_NBR);
  1951. + RESET_DMA(NETWORK_RX_DMA_NBR);
  1952. + WAIT_DMA(NETWORK_TX_DMA_NBR);
  1953. + WAIT_DMA(NETWORK_RX_DMA_NBR);
  1954. +
  1955. + /* Initialise the etrax network controller */
  1956. +
  1957. + /* allocate the irq corresponding to the receiving DMA */
  1958. +
  1959. + if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt,
  1960. + IRQF_SAMPLE_RANDOM, cardname, (void *)dev)) {
  1961. + goto grace_exit0;
  1962. + }
  1963. +
  1964. + /* allocate the irq corresponding to the transmitting DMA */
  1965. +
  1966. + if (request_irq(NETWORK_DMA_TX_IRQ_NBR, e100rxtx_interrupt, 0,
  1967. + cardname, (void *)dev)) {
  1968. + goto grace_exit1;
  1969. + }
  1970. +
  1971. + /* allocate the irq corresponding to the network errors etc */
  1972. +
  1973. + if (request_irq(NETWORK_STATUS_IRQ_NBR, e100nw_interrupt, 0,
  1974. + cardname, (void *)dev)) {
  1975. + goto grace_exit2;
  1976. + }
  1977. +
  1978. + /*
  1979. + * Always allocate the DMA channels after the IRQ,
  1980. + * and clean up on failure.
  1981. + */
  1982. +
  1983. + if (cris_request_dma(NETWORK_TX_DMA_NBR,
  1984. + cardname,
  1985. + DMA_VERBOSE_ON_ERROR,
  1986. + dma_eth)) {
  1987. + goto grace_exit3;
  1988. + }
  1989. +
  1990. + if (cris_request_dma(NETWORK_RX_DMA_NBR,
  1991. + cardname,
  1992. + DMA_VERBOSE_ON_ERROR,
  1993. + dma_eth)) {
  1994. + goto grace_exit4;
  1995. + }
  1996. +
  1997. + /* give the HW an idea of what MAC address we want */
  1998. +
  1999. + *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
  2000. + (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
  2001. + *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8);
  2002. + *R_NETWORK_SA_2 = 0;
  2003. +
  2004. +#if 0
  2005. + /* use promiscuous mode for testing */
  2006. + *R_NETWORK_GA_0 = 0xffffffff;
  2007. + *R_NETWORK_GA_1 = 0xffffffff;
  2008. +
  2009. + *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */
  2010. +#else
  2011. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522);
  2012. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive);
  2013. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable);
  2014. + SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
  2015. + *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
  2016. +#endif
  2017. +
  2018. + *R_NETWORK_GEN_CONFIG =
  2019. + IO_STATE(R_NETWORK_GEN_CONFIG, phy, mii_clk) |
  2020. + IO_STATE(R_NETWORK_GEN_CONFIG, enable, on);
  2021. +
  2022. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
  2023. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, delay, none);
  2024. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cancel, dont);
  2025. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cd, enable);
  2026. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, retry, enable);
  2027. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, pad, enable);
  2028. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable);
  2029. + *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
  2030. +
  2031. + local_irq_save(flags);
  2032. +
  2033. + /* enable the irq's for ethernet DMA */
  2034. +
  2035. + *R_IRQ_MASK2_SET =
  2036. + IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) |
  2037. + IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set);
  2038. +
  2039. + *R_IRQ_MASK0_SET =
  2040. + IO_STATE(R_IRQ_MASK0_SET, overrun, set) |
  2041. + IO_STATE(R_IRQ_MASK0_SET, underrun, set) |
  2042. + IO_STATE(R_IRQ_MASK0_SET, excessive_col, set);
  2043. +
  2044. + /* make sure the irqs are cleared */
  2045. +
  2046. + *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
  2047. + *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do);
  2048. +
  2049. + /* make sure the rec and transmit error counters are cleared */
  2050. +
  2051. + (void)*R_REC_COUNTERS; /* dummy read */
  2052. + (void)*R_TR_COUNTERS; /* dummy read */
  2053. +
  2054. + /* start the receiving DMA channel so we can receive packets from now on */
  2055. +
  2056. + *R_DMA_CH1_FIRST = virt_to_phys(myNextRxDesc);
  2057. + *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, start);
  2058. +
  2059. + /* Set up transmit DMA channel so it can be restarted later */
  2060. +
  2061. + *R_DMA_CH0_FIRST = 0;
  2062. + *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
  2063. + netif_start_queue(dev);
  2064. +
  2065. + local_irq_restore(flags);
  2066. +
  2067. + /* Probe for transceiver */
  2068. + if (e100_probe_transceiver(dev))
  2069. + goto grace_exit5;
  2070. +
  2071. + /* Start duplex/speed timers */
  2072. + add_timer(&speed_timer);
  2073. + add_timer(&duplex_timer);
  2074. +
  2075. + /* We are now ready to accept transmit requeusts from
  2076. + * the queueing layer of the networking.
  2077. + */
  2078. + netif_carrier_on(dev);
  2079. +
  2080. + return 0;
  2081. +
  2082. +grace_exit5:
  2083. + cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
  2084. +grace_exit4:
  2085. + cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
  2086. +grace_exit3:
  2087. + free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
  2088. +grace_exit2:
  2089. + free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
  2090. +grace_exit1:
  2091. + free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev);
  2092. +grace_exit0:
  2093. + return -EAGAIN;
  2094. +}
  2095. +
  2096. +#if defined(CONFIG_ETRAX_NO_PHY)
  2097. +static void
  2098. +dummy_check_speed(struct net_device* dev)
  2099. +{
  2100. + current_speed = 100;
  2101. +}
  2102. +#else
  2103. +static void
  2104. +generic_check_speed(struct net_device* dev)
  2105. +{
  2106. + unsigned long data;
  2107. + struct net_local *np = netdev_priv(dev);
  2108. +
  2109. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
  2110. + if ((data & ADVERTISE_100FULL) ||
  2111. + (data & ADVERTISE_100HALF))
  2112. + current_speed = 100;
  2113. + else
  2114. + current_speed = 10;
  2115. +}
  2116. +
  2117. +static void
  2118. +tdk_check_speed(struct net_device* dev)
  2119. +{
  2120. + unsigned long data;
  2121. + struct net_local *np = netdev_priv(dev);
  2122. +
  2123. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  2124. + MDIO_TDK_DIAGNOSTIC_REG);
  2125. + current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
  2126. +}
  2127. +
  2128. +static void
  2129. +broadcom_check_speed(struct net_device* dev)
  2130. +{
  2131. + unsigned long data;
  2132. + struct net_local *np = netdev_priv(dev);
  2133. +
  2134. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  2135. + MDIO_AUX_CTRL_STATUS_REG);
  2136. + current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
  2137. +}
  2138. +
  2139. +static void
  2140. +intel_check_speed(struct net_device* dev)
  2141. +{
  2142. + unsigned long data;
  2143. + struct net_local *np = netdev_priv(dev);
  2144. +
  2145. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  2146. + MDIO_INT_STATUS_REG_2);
  2147. + current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
  2148. +}
  2149. +#endif
  2150. +static void
  2151. +e100_check_speed(unsigned long priv)
  2152. +{
  2153. + struct net_device* dev = (struct net_device*)priv;
  2154. + struct net_local *np = netdev_priv(dev);
  2155. + static int led_initiated = 0;
  2156. + unsigned long data;
  2157. + int old_speed = current_speed;
  2158. +
  2159. + spin_lock(&np->transceiver_lock);
  2160. +
  2161. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR);
  2162. + if (!(data & BMSR_LSTATUS)) {
  2163. + current_speed = 0;
  2164. + } else {
  2165. + transceiver->check_speed(dev);
  2166. + }
  2167. +
  2168. + spin_lock(&np->led_lock);
  2169. + if ((old_speed != current_speed) || !led_initiated) {
  2170. + led_initiated = 1;
  2171. + e100_set_network_leds(NO_NETWORK_ACTIVITY);
  2172. + if (current_speed)
  2173. + netif_carrier_on(dev);
  2174. + else
  2175. + netif_carrier_off(dev);
  2176. + }
  2177. + spin_unlock(&np->led_lock);
  2178. +
  2179. + /* Reinitialize the timer. */
  2180. + speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
  2181. + add_timer(&speed_timer);
  2182. +
  2183. + spin_unlock(&np->transceiver_lock);
  2184. +}
  2185. +
  2186. +static void
  2187. +e100_negotiate(struct net_device* dev)
  2188. +{
  2189. + struct net_local *np = netdev_priv(dev);
  2190. + unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  2191. + MII_ADVERTISE);
  2192. +
  2193. + /* Discard old speed and duplex settings */
  2194. + data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL |
  2195. + ADVERTISE_10HALF | ADVERTISE_10FULL);
  2196. +
  2197. + switch (current_speed_selection) {
  2198. + case 10:
  2199. + if (current_duplex == full)
  2200. + data |= ADVERTISE_10FULL;
  2201. + else if (current_duplex == half)
  2202. + data |= ADVERTISE_10HALF;
  2203. + else
  2204. + data |= ADVERTISE_10HALF | ADVERTISE_10FULL;
  2205. + break;
  2206. +
  2207. + case 100:
  2208. + if (current_duplex == full)
  2209. + data |= ADVERTISE_100FULL;
  2210. + else if (current_duplex == half)
  2211. + data |= ADVERTISE_100HALF;
  2212. + else
  2213. + data |= ADVERTISE_100HALF | ADVERTISE_100FULL;
  2214. + break;
  2215. +
  2216. + case 0: /* Auto */
  2217. + if (current_duplex == full)
  2218. + data |= ADVERTISE_100FULL | ADVERTISE_10FULL;
  2219. + else if (current_duplex == half)
  2220. + data |= ADVERTISE_100HALF | ADVERTISE_10HALF;
  2221. + else
  2222. + data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
  2223. + ADVERTISE_100HALF | ADVERTISE_100FULL;
  2224. + break;
  2225. +
  2226. + default: /* assume autoneg speed and duplex */
  2227. + data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
  2228. + ADVERTISE_100HALF | ADVERTISE_100FULL;
  2229. + break;
  2230. + }
  2231. +
  2232. + e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
  2233. +
  2234. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
  2235. + if (autoneg_normal) {
  2236. + /* Renegotiate with link partner */
  2237. + data |= BMCR_ANENABLE | BMCR_ANRESTART;
  2238. + } else {
  2239. + /* Don't negotiate speed or duplex */
  2240. + data &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
  2241. +
  2242. + /* Set speed and duplex static */
  2243. + if (current_speed_selection == 10)
  2244. + data &= ~BMCR_SPEED100;
  2245. + else
  2246. + data |= BMCR_SPEED100;
  2247. +
  2248. + if (current_duplex != full)
  2249. + data &= ~BMCR_FULLDPLX;
  2250. + else
  2251. + data |= BMCR_FULLDPLX;
  2252. + }
  2253. + e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
  2254. +}
  2255. +
  2256. +static void
  2257. +e100_set_speed(struct net_device* dev, unsigned long speed)
  2258. +{
  2259. + struct net_local *np = netdev_priv(dev);
  2260. +
  2261. + spin_lock(&np->transceiver_lock);
  2262. + if (speed != current_speed_selection) {
  2263. + current_speed_selection = speed;
  2264. + e100_negotiate(dev);
  2265. + }
  2266. + spin_unlock(&np->transceiver_lock);
  2267. +}
  2268. +
  2269. +static void
  2270. +e100_check_duplex(unsigned long priv)
  2271. +{
  2272. + struct net_device *dev = (struct net_device *)priv;
  2273. + struct net_local *np = netdev_priv(dev);
  2274. + int old_duplex;
  2275. +
  2276. + spin_lock(&np->transceiver_lock);
  2277. + old_duplex = full_duplex;
  2278. + transceiver->check_duplex(dev);
  2279. + if (old_duplex != full_duplex) {
  2280. + /* Duplex changed */
  2281. + SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
  2282. + *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
  2283. + }
  2284. +
  2285. + /* Reinitialize the timer. */
  2286. + duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
  2287. + add_timer(&duplex_timer);
  2288. + np->mii_if.full_duplex = full_duplex;
  2289. + spin_unlock(&np->transceiver_lock);
  2290. +}
  2291. +#if defined(CONFIG_ETRAX_NO_PHY)
  2292. +static void
  2293. +dummy_check_duplex(struct net_device* dev)
  2294. +{
  2295. + full_duplex = 1;
  2296. +}
  2297. +#else
  2298. +static void
  2299. +generic_check_duplex(struct net_device* dev)
  2300. +{
  2301. + unsigned long data;
  2302. + struct net_local *np = netdev_priv(dev);
  2303. +
  2304. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
  2305. + if ((data & ADVERTISE_10FULL) ||
  2306. + (data & ADVERTISE_100FULL))
  2307. + full_duplex = 1;
  2308. + else
  2309. + full_duplex = 0;
  2310. +}
  2311. +
  2312. +static void
  2313. +tdk_check_duplex(struct net_device* dev)
  2314. +{
  2315. + unsigned long data;
  2316. + struct net_local *np = netdev_priv(dev);
  2317. +
  2318. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  2319. + MDIO_TDK_DIAGNOSTIC_REG);
  2320. + full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
  2321. +}
  2322. +
  2323. +static void
  2324. +broadcom_check_duplex(struct net_device* dev)
  2325. +{
  2326. + unsigned long data;
  2327. + struct net_local *np = netdev_priv(dev);
  2328. +
  2329. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  2330. + MDIO_AUX_CTRL_STATUS_REG);
  2331. + full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
  2332. +}
  2333. +
  2334. +static void
  2335. +intel_check_duplex(struct net_device* dev)
  2336. +{
  2337. + unsigned long data;
  2338. + struct net_local *np = netdev_priv(dev);
  2339. +
  2340. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  2341. + MDIO_INT_STATUS_REG_2);
  2342. + full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
  2343. +}
  2344. +#endif
  2345. +static void
  2346. +e100_set_duplex(struct net_device* dev, enum duplex new_duplex)
  2347. +{
  2348. + struct net_local *np = netdev_priv(dev);
  2349. +
  2350. + spin_lock(&np->transceiver_lock);
  2351. + if (new_duplex != current_duplex) {
  2352. + current_duplex = new_duplex;
  2353. + e100_negotiate(dev);
  2354. + }
  2355. + spin_unlock(&np->transceiver_lock);
  2356. +}
  2357. +
  2358. +static int
  2359. +e100_probe_transceiver(struct net_device* dev)
  2360. +{
  2361. + int ret = 0;
  2362. +
  2363. +#if !defined(CONFIG_ETRAX_NO_PHY)
  2364. + unsigned int phyid_high;
  2365. + unsigned int phyid_low;
  2366. + unsigned int oui;
  2367. + struct transceiver_ops* ops = NULL;
  2368. + struct net_local *np = netdev_priv(dev);
  2369. +
  2370. + spin_lock(&np->transceiver_lock);
  2371. +
  2372. + /* Probe MDIO physical address */
  2373. + for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31;
  2374. + np->mii_if.phy_id++) {
  2375. + if (e100_get_mdio_reg(dev,
  2376. + np->mii_if.phy_id, MII_BMSR) != 0xffff)
  2377. + break;
  2378. + }
  2379. + if (np->mii_if.phy_id == 32) {
  2380. + ret = -ENODEV;
  2381. + goto out;
  2382. + }
  2383. +
  2384. + /* Get manufacturer */
  2385. + phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1);
  2386. + phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2);
  2387. + oui = (phyid_high << 6) | (phyid_low >> 10);
  2388. +
  2389. + for (ops = &transceivers[0]; ops->oui; ops++) {
  2390. + if (ops->oui == oui)
  2391. + break;
  2392. + }
  2393. + transceiver = ops;
  2394. +out:
  2395. + spin_unlock(&np->transceiver_lock);
  2396. +#endif
  2397. + return ret;
  2398. +}
  2399. +
  2400. +static int
  2401. +e100_get_mdio_reg(struct net_device *dev, int phy_id, int location)
  2402. +{
  2403. + unsigned short cmd; /* Data to be sent on MDIO port */
  2404. + int data; /* Data read from MDIO */
  2405. + int bitCounter;
  2406. +
  2407. + /* Start of frame, OP Code, Physical Address, Register Address */
  2408. + cmd = (MDIO_START << 14) | (MDIO_READ << 12) | (phy_id << 7) |
  2409. + (location << 2);
  2410. +
  2411. + e100_send_mdio_cmd(cmd, 0);
  2412. +
  2413. + data = 0;
  2414. +
  2415. + /* Data... */
  2416. + for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
  2417. + data |= (e100_receive_mdio_bit() << bitCounter);
  2418. + }
  2419. +
  2420. + return data;
  2421. +}
  2422. +
  2423. +static void
  2424. +e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value)
  2425. +{
  2426. + int bitCounter;
  2427. + unsigned short cmd;
  2428. +
  2429. + cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (phy_id << 7) |
  2430. + (location << 2);
  2431. +
  2432. + e100_send_mdio_cmd(cmd, 1);
  2433. +
  2434. + /* Data... */
  2435. + for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
  2436. + e100_send_mdio_bit(GET_BIT(bitCounter, value));
  2437. + }
  2438. +
  2439. +}
  2440. +
  2441. +static void
  2442. +e100_send_mdio_cmd(unsigned short cmd, int write_cmd)
  2443. +{
  2444. + int bitCounter;
  2445. + unsigned char data = 0x2;
  2446. +
  2447. + /* Preamble */
  2448. + for (bitCounter = 31; bitCounter>= 0; bitCounter--)
  2449. + e100_send_mdio_bit(GET_BIT(bitCounter, MDIO_PREAMBLE));
  2450. +
  2451. + for (bitCounter = 15; bitCounter >= 2; bitCounter--)
  2452. + e100_send_mdio_bit(GET_BIT(bitCounter, cmd));
  2453. +
  2454. + /* Turnaround */
  2455. + for (bitCounter = 1; bitCounter >= 0 ; bitCounter--)
  2456. + if (write_cmd)
  2457. + e100_send_mdio_bit(GET_BIT(bitCounter, data));
  2458. + else
  2459. + e100_receive_mdio_bit();
  2460. +}
  2461. +
  2462. +static void
  2463. +e100_send_mdio_bit(unsigned char bit)
  2464. +{
  2465. + *R_NETWORK_MGM_CTRL =
  2466. + IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) |
  2467. + IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit);
  2468. + udelay(1);
  2469. + *R_NETWORK_MGM_CTRL =
  2470. + IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) |
  2471. + IO_MASK(R_NETWORK_MGM_CTRL, mdck) |
  2472. + IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit);
  2473. + udelay(1);
  2474. +}
  2475. +
  2476. +static unsigned char
  2477. +e100_receive_mdio_bit()
  2478. +{
  2479. + unsigned char bit;
  2480. + *R_NETWORK_MGM_CTRL = 0;
  2481. + bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT);
  2482. + udelay(1);
  2483. + *R_NETWORK_MGM_CTRL = IO_MASK(R_NETWORK_MGM_CTRL, mdck);
  2484. + udelay(1);
  2485. + return bit;
  2486. +}
  2487. +
  2488. +static void
  2489. +e100_reset_transceiver(struct net_device* dev)
  2490. +{
  2491. + struct net_local *np = netdev_priv(dev);
  2492. + unsigned short cmd;
  2493. + unsigned short data;
  2494. + int bitCounter;
  2495. +
  2496. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
  2497. +
  2498. + cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2);
  2499. +
  2500. + e100_send_mdio_cmd(cmd, 1);
  2501. +
  2502. + data |= 0x8000;
  2503. +
  2504. + for (bitCounter = 15; bitCounter >= 0 ; bitCounter--) {
  2505. + e100_send_mdio_bit(GET_BIT(bitCounter, data));
  2506. + }
  2507. +}
  2508. +
  2509. +/* Called by upper layers if they decide it took too long to complete
  2510. + * sending a packet - we need to reset and stuff.
  2511. + */
  2512. +
  2513. +static void
  2514. +e100_tx_timeout(struct net_device *dev)
  2515. +{
  2516. + struct net_local *np = netdev_priv(dev);
  2517. + unsigned long flags;
  2518. +
  2519. + spin_lock_irqsave(&np->lock, flags);
  2520. +
  2521. + printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
  2522. + tx_done(dev) ? "IRQ problem" : "network cable problem");
  2523. +
  2524. + /* remember we got an error */
  2525. +
  2526. + dev->stats.tx_errors++;
  2527. +
  2528. + /* reset the TX DMA in case it has hung on something */
  2529. +
  2530. + RESET_DMA(NETWORK_TX_DMA_NBR);
  2531. + WAIT_DMA(NETWORK_TX_DMA_NBR);
  2532. +
  2533. + /* Reset the transceiver. */
  2534. +
  2535. + e100_reset_transceiver(dev);
  2536. +
  2537. + /* and get rid of the packets that never got an interrupt */
  2538. + while (myFirstTxDesc != myNextTxDesc) {
  2539. + dev_kfree_skb(myFirstTxDesc->skb);
  2540. + myFirstTxDesc->skb = 0;
  2541. + myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
  2542. + }
  2543. +
  2544. + /* Set up transmit DMA channel so it can be restarted later */
  2545. + *R_DMA_CH0_FIRST = 0;
  2546. + *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
  2547. +
  2548. + /* tell the upper layers we're ok again */
  2549. +
  2550. + netif_wake_queue(dev);
  2551. + spin_unlock_irqrestore(&np->lock, flags);
  2552. +}
  2553. +
  2554. +
  2555. +/* This will only be invoked if the driver is _not_ in XOFF state.
  2556. + * What this means is that we need not check it, and that this
  2557. + * invariant will hold if we make sure that the netif_*_queue()
  2558. + * calls are done at the proper times.
  2559. + */
  2560. +
  2561. +static int
  2562. +e100_send_packet(struct sk_buff *skb, struct net_device *dev)
  2563. +{
  2564. + struct net_local *np = netdev_priv(dev);
  2565. + unsigned char *buf = skb->data;
  2566. + unsigned long flags;
  2567. +
  2568. +#ifdef ETHDEBUG
  2569. + printk("send packet len %d\n", length);
  2570. +#endif
  2571. + spin_lock_irqsave(&np->lock, flags); /* protect from tx_interrupt and ourself */
  2572. +
  2573. + myNextTxDesc->skb = skb;
  2574. +
  2575. + dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
  2576. +
  2577. + e100_hardware_send_packet(np, buf, skb->len);
  2578. +
  2579. + myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next);
  2580. +
  2581. + /* Stop queue if full */
  2582. + if (myNextTxDesc == myFirstTxDesc) {
  2583. + netif_stop_queue(dev);
  2584. + }
  2585. +
  2586. + spin_unlock_irqrestore(&np->lock, flags);
  2587. +
  2588. + return NETDEV_TX_OK;
  2589. +}
  2590. +
  2591. +/*
  2592. + * The typical workload of the driver:
  2593. + * Handle the network interface interrupts.
  2594. + */
  2595. +
  2596. +static irqreturn_t
  2597. +e100rxtx_interrupt(int irq, void *dev_id)
  2598. +{
  2599. + struct net_device *dev = (struct net_device *)dev_id;
  2600. + struct net_local *np = netdev_priv(dev);
  2601. + unsigned long irqbits;
  2602. +
  2603. + /*
  2604. + * Note that both rx and tx interrupts are blocked at this point,
  2605. + * regardless of which got us here.
  2606. + */
  2607. +
  2608. + irqbits = *R_IRQ_MASK2_RD;
  2609. +
  2610. + /* Handle received packets */
  2611. + if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) {
  2612. + /* acknowledge the eop interrupt */
  2613. +
  2614. + *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do);
  2615. +
  2616. + /* check if one or more complete packets were indeed received */
  2617. +
  2618. + while ((*R_DMA_CH1_FIRST != virt_to_phys(myNextRxDesc)) &&
  2619. + (myNextRxDesc != myLastRxDesc)) {
  2620. + /* Take out the buffer and give it to the OS, then
  2621. + * allocate a new buffer to put a packet in.
  2622. + */
  2623. + e100_rx(dev);
  2624. + dev->stats.rx_packets++;
  2625. + /* restart/continue on the channel, for safety */
  2626. + *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
  2627. + /* clear dma channel 1 eop/descr irq bits */
  2628. + *R_DMA_CH1_CLR_INTR =
  2629. + IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do) |
  2630. + IO_STATE(R_DMA_CH1_CLR_INTR, clr_descr, do);
  2631. +
  2632. + /* now, we might have gotten another packet
  2633. + so we have to loop back and check if so */
  2634. + }
  2635. + }
  2636. +
  2637. + /* Report any packets that have been sent */
  2638. + while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
  2639. + (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
  2640. + dev->stats.tx_bytes += myFirstTxDesc->skb->len;
  2641. + dev->stats.tx_packets++;
  2642. +
  2643. + /* dma is ready with the transmission of the data in tx_skb, so now
  2644. + we can release the skb memory */
  2645. + dev_kfree_skb_irq(myFirstTxDesc->skb);
  2646. + myFirstTxDesc->skb = 0;
  2647. + myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
  2648. + /* Wake up queue. */
  2649. + netif_wake_queue(dev);
  2650. + }
  2651. +
  2652. + if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) {
  2653. + /* acknowledge the eop interrupt. */
  2654. + *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
  2655. + }
  2656. +
  2657. + return IRQ_HANDLED;
  2658. +}
  2659. +
  2660. +static irqreturn_t
  2661. +e100nw_interrupt(int irq, void *dev_id)
  2662. +{
  2663. + struct net_device *dev = (struct net_device *)dev_id;
  2664. + unsigned long irqbits = *R_IRQ_MASK0_RD;
  2665. +
  2666. + /* check for underrun irq */
  2667. + if (irqbits & IO_STATE(R_IRQ_MASK0_RD, underrun, active)) {
  2668. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
  2669. + *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
  2670. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
  2671. + dev->stats.tx_errors++;
  2672. + D(printk("ethernet receiver underrun!\n"));
  2673. + }
  2674. +
  2675. + /* check for overrun irq */
  2676. + if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
  2677. + update_rx_stats(&dev->stats); /* this will ack the irq */
  2678. + D(printk("ethernet receiver overrun!\n"));
  2679. + }
  2680. + /* check for excessive collision irq */
  2681. + if (irqbits & IO_STATE(R_IRQ_MASK0_RD, excessive_col, active)) {
  2682. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
  2683. + *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
  2684. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
  2685. + dev->stats.tx_errors++;
  2686. + D(printk("ethernet excessive collisions!\n"));
  2687. + }
  2688. + return IRQ_HANDLED;
  2689. +}
  2690. +
  2691. +/* We have a good packet(s), get it/them out of the buffers. */
  2692. +static void
  2693. +e100_rx(struct net_device *dev)
  2694. +{
  2695. + struct sk_buff *skb;
  2696. + int length = 0;
  2697. + struct net_local *np = netdev_priv(dev);
  2698. + unsigned char *skb_data_ptr;
  2699. +#ifdef ETHDEBUG
  2700. + int i;
  2701. +#endif
  2702. + etrax_eth_descr *prevRxDesc; /* The descriptor right before myNextRxDesc */
  2703. + spin_lock(&np->led_lock);
  2704. + if (!led_active && time_after(jiffies, led_next_time)) {
  2705. + /* light the network leds depending on the current speed. */
  2706. + e100_set_network_leds(NETWORK_ACTIVITY);
  2707. +
  2708. + /* Set the earliest time we may clear the LED */
  2709. + led_next_time = jiffies + NET_FLASH_TIME;
  2710. + led_active = 1;
  2711. + mod_timer(&clear_led_timer, jiffies + HZ/10);
  2712. + }
  2713. + spin_unlock(&np->led_lock);
  2714. +
  2715. + length = myNextRxDesc->descr.hw_len - 4;
  2716. + dev->stats.rx_bytes += length;
  2717. +
  2718. +#ifdef ETHDEBUG
  2719. + printk("Got a packet of length %d:\n", length);
  2720. + /* dump the first bytes in the packet */
  2721. + skb_data_ptr = (unsigned char *)phys_to_virt(myNextRxDesc->descr.buf);
  2722. + for (i = 0; i < 8; i++) {
  2723. + printk("%d: %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", i * 8,
  2724. + skb_data_ptr[0],skb_data_ptr[1],skb_data_ptr[2],skb_data_ptr[3],
  2725. + skb_data_ptr[4],skb_data_ptr[5],skb_data_ptr[6],skb_data_ptr[7]);
  2726. + skb_data_ptr += 8;
  2727. + }
  2728. +#endif
  2729. +
  2730. + if (length < RX_COPYBREAK) {
  2731. + /* Small packet, copy data */
  2732. + skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
  2733. + if (!skb) {
  2734. + dev->stats.rx_errors++;
  2735. + printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
  2736. + goto update_nextrxdesc;
  2737. + }
  2738. +
  2739. + skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */
  2740. + skb_data_ptr = skb_push(skb, ETHER_HEAD_LEN); /* allocate room for the header */
  2741. +
  2742. +#ifdef ETHDEBUG
  2743. + printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n",
  2744. + skb->head, skb->data, skb_tail_pointer(skb),
  2745. + skb_end_pointer(skb));
  2746. + printk("copying packet to 0x%x.\n", skb_data_ptr);
  2747. +#endif
  2748. +
  2749. + memcpy(skb_data_ptr, phys_to_virt(myNextRxDesc->descr.buf), length);
  2750. + }
  2751. + else {
  2752. + /* Large packet, send directly to upper layers and allocate new
  2753. + * memory (aligned to cache line boundary to avoid bug).
  2754. + * Before sending the skb to upper layers we must make sure
  2755. + * that skb->data points to the aligned start of the packet.
  2756. + */
  2757. + int align;
  2758. + struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
  2759. + if (!new_skb) {
  2760. + dev->stats.rx_errors++;
  2761. + printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
  2762. + goto update_nextrxdesc;
  2763. + }
  2764. + skb = myNextRxDesc->skb;
  2765. + align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data;
  2766. + skb_put(skb, length + align);
  2767. + skb_pull(skb, align); /* Remove alignment bytes */
  2768. + myNextRxDesc->skb = new_skb;
  2769. + myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data));
  2770. + }
  2771. +
  2772. + skb->protocol = eth_type_trans(skb, dev);
  2773. +
  2774. + /* Send the packet to the upper layers */
  2775. + netif_rx(skb);
  2776. +
  2777. + update_nextrxdesc:
  2778. + /* Prepare for next packet */
  2779. + myNextRxDesc->descr.status = 0;
  2780. + prevRxDesc = myNextRxDesc;
  2781. + myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next);
  2782. +
  2783. + rx_queue_len++;
  2784. +
  2785. + /* Check if descriptors should be returned */
  2786. + if (rx_queue_len == RX_QUEUE_THRESHOLD) {
  2787. + flush_etrax_cache();
  2788. + prevRxDesc->descr.ctrl |= d_eol;
  2789. + myLastRxDesc->descr.ctrl &= ~d_eol;
  2790. + myLastRxDesc = prevRxDesc;
  2791. + rx_queue_len = 0;
  2792. + }
  2793. +}
  2794. +
  2795. +/* The inverse routine to net_open(). */
  2796. +static int
  2797. +e100_close(struct net_device *dev)
  2798. +{
  2799. + printk(KERN_INFO "Closing %s.\n", dev->name);
  2800. +
  2801. + netif_stop_queue(dev);
  2802. +
  2803. + *R_IRQ_MASK0_CLR =
  2804. + IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) |
  2805. + IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) |
  2806. + IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr);
  2807. +
  2808. + *R_IRQ_MASK2_CLR =
  2809. + IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) |
  2810. + IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
  2811. + IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) |
  2812. + IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
  2813. +
  2814. + /* Stop the receiver and the transmitter */
  2815. +
  2816. + RESET_DMA(NETWORK_TX_DMA_NBR);
  2817. + RESET_DMA(NETWORK_RX_DMA_NBR);
  2818. +
  2819. + /* Flush the Tx and disable Rx here. */
  2820. +
  2821. + free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev);
  2822. + free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
  2823. + free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
  2824. +
  2825. + cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
  2826. + cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
  2827. +
  2828. + /* Update the statistics here. */
  2829. +
  2830. + update_rx_stats(&dev->stats);
  2831. + update_tx_stats(&dev->stats);
  2832. +
  2833. + /* Stop speed/duplex timers */
  2834. + del_timer(&speed_timer);
  2835. + del_timer(&duplex_timer);
  2836. +
  2837. + return 0;
  2838. +}
  2839. +
  2840. +static int
  2841. +e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  2842. +{
  2843. + struct mii_ioctl_data *data = if_mii(ifr);
  2844. + struct net_local *np = netdev_priv(dev);
  2845. + int rc = 0;
  2846. + int old_autoneg;
  2847. +
  2848. + spin_lock(&np->lock); /* Preempt protection */
  2849. + switch (cmd) {
  2850. + /* The ioctls below should be considered obsolete but are */
  2851. + /* still present for compatibility with old scripts/apps */
  2852. + case SET_ETH_SPEED_10: /* 10 Mbps */
  2853. + e100_set_speed(dev, 10);
  2854. + break;
  2855. + case SET_ETH_SPEED_100: /* 100 Mbps */
  2856. + e100_set_speed(dev, 100);
  2857. + break;
  2858. + case SET_ETH_SPEED_AUTO: /* Auto-negotiate speed */
  2859. + e100_set_speed(dev, 0);
  2860. + break;
  2861. + case SET_ETH_DUPLEX_HALF: /* Half duplex */
  2862. + e100_set_duplex(dev, half);
  2863. + break;
  2864. + case SET_ETH_DUPLEX_FULL: /* Full duplex */
  2865. + e100_set_duplex(dev, full);
  2866. + break;
  2867. + case SET_ETH_DUPLEX_AUTO: /* Auto-negotiate duplex */
  2868. + e100_set_duplex(dev, autoneg);
  2869. + break;
  2870. + case SET_ETH_AUTONEG:
  2871. + old_autoneg = autoneg_normal;
  2872. + autoneg_normal = *(int*)data;
  2873. + if (autoneg_normal != old_autoneg)
  2874. + e100_negotiate(dev);
  2875. + break;
  2876. + default:
  2877. + rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr),
  2878. + cmd, NULL);
  2879. + break;
  2880. + }
  2881. + spin_unlock(&np->lock);
  2882. + return rc;
  2883. +}
  2884. +
  2885. +static int e100_get_settings(struct net_device *dev,
  2886. + struct ethtool_cmd *cmd)
  2887. +{
  2888. + struct net_local *np = netdev_priv(dev);
  2889. + int err;
  2890. +
  2891. + spin_lock_irq(&np->lock);
  2892. + err = mii_ethtool_gset(&np->mii_if, cmd);
  2893. + spin_unlock_irq(&np->lock);
  2894. +
  2895. + /* The PHY may support 1000baseT, but the Etrax100 does not. */
  2896. + cmd->supported &= ~(SUPPORTED_1000baseT_Half
  2897. + | SUPPORTED_1000baseT_Full);
  2898. + return err;
  2899. +}
  2900. +
  2901. +static int e100_set_settings(struct net_device *dev,
  2902. + struct ethtool_cmd *ecmd)
  2903. +{
  2904. + if (ecmd->autoneg == AUTONEG_ENABLE) {
  2905. + e100_set_duplex(dev, autoneg);
  2906. + e100_set_speed(dev, 0);
  2907. + } else {
  2908. + e100_set_duplex(dev, ecmd->duplex == DUPLEX_HALF ? half : full);
  2909. + e100_set_speed(dev, ecmd->speed == SPEED_10 ? 10: 100);
  2910. + }
  2911. +
  2912. + return 0;
  2913. +}
  2914. +
  2915. +static void e100_get_drvinfo(struct net_device *dev,
  2916. + struct ethtool_drvinfo *info)
  2917. +{
  2918. + strncpy(info->driver, "ETRAX 100LX", sizeof(info->driver) - 1);
  2919. + strncpy(info->version, "$Revision: 1.31 $", sizeof(info->version) - 1);
  2920. + strncpy(info->fw_version, "N/A", sizeof(info->fw_version) - 1);
  2921. + strncpy(info->bus_info, "N/A", sizeof(info->bus_info) - 1);
  2922. +}
  2923. +
  2924. +static int e100_nway_reset(struct net_device *dev)
  2925. +{
  2926. + if (current_duplex == autoneg && current_speed_selection == 0)
  2927. + e100_negotiate(dev);
  2928. + return 0;
  2929. +}
  2930. +
  2931. +static const struct ethtool_ops e100_ethtool_ops = {
  2932. + .get_settings = e100_get_settings,
  2933. + .set_settings = e100_set_settings,
  2934. + .get_drvinfo = e100_get_drvinfo,
  2935. + .nway_reset = e100_nway_reset,
  2936. + .get_link = ethtool_op_get_link,
  2937. +};
  2938. +
  2939. +static int
  2940. +e100_set_config(struct net_device *dev, struct ifmap *map)
  2941. +{
  2942. + struct net_local *np = netdev_priv(dev);
  2943. +
  2944. + spin_lock(&np->lock); /* Preempt protection */
  2945. +
  2946. + switch(map->port) {
  2947. + case IF_PORT_UNKNOWN:
  2948. + /* Use autoneg */
  2949. + e100_set_speed(dev, 0);
  2950. + e100_set_duplex(dev, autoneg);
  2951. + break;
  2952. + case IF_PORT_10BASET:
  2953. + e100_set_speed(dev, 10);
  2954. + e100_set_duplex(dev, autoneg);
  2955. + break;
  2956. + case IF_PORT_100BASET:
  2957. + case IF_PORT_100BASETX:
  2958. + e100_set_speed(dev, 100);
  2959. + e100_set_duplex(dev, autoneg);
  2960. + break;
  2961. + case IF_PORT_100BASEFX:
  2962. + case IF_PORT_10BASE2:
  2963. + case IF_PORT_AUI:
  2964. + spin_unlock(&np->lock);
  2965. + return -EOPNOTSUPP;
  2966. + break;
  2967. + default:
  2968. + printk(KERN_ERR "%s: Invalid media selected", dev->name);
  2969. + spin_unlock(&np->lock);
  2970. + return -EINVAL;
  2971. + }
  2972. + spin_unlock(&np->lock);
  2973. + return 0;
  2974. +}
  2975. +
  2976. +static void
  2977. +update_rx_stats(struct net_device_stats *es)
  2978. +{
  2979. + unsigned long r = *R_REC_COUNTERS;
  2980. + /* update stats relevant to reception errors */
  2981. + es->rx_fifo_errors += IO_EXTRACT(R_REC_COUNTERS, congestion, r);
  2982. + es->rx_crc_errors += IO_EXTRACT(R_REC_COUNTERS, crc_error, r);
  2983. + es->rx_frame_errors += IO_EXTRACT(R_REC_COUNTERS, alignment_error, r);
  2984. + es->rx_length_errors += IO_EXTRACT(R_REC_COUNTERS, oversize, r);
  2985. +}
  2986. +
  2987. +static void
  2988. +update_tx_stats(struct net_device_stats *es)
  2989. +{
  2990. + unsigned long r = *R_TR_COUNTERS;
  2991. + /* update stats relevant to transmission errors */
  2992. + es->collisions +=
  2993. + IO_EXTRACT(R_TR_COUNTERS, single_col, r) +
  2994. + IO_EXTRACT(R_TR_COUNTERS, multiple_col, r);
  2995. +}
  2996. +
  2997. +/*
  2998. + * Get the current statistics.
  2999. + * This may be called with the card open or closed.
  3000. + */
  3001. +static struct net_device_stats *
  3002. +e100_get_stats(struct net_device *dev)
  3003. +{
  3004. + struct net_local *lp = netdev_priv(dev);
  3005. + unsigned long flags;
  3006. +
  3007. + spin_lock_irqsave(&lp->lock, flags);
  3008. +
  3009. + update_rx_stats(&dev->stats);
  3010. + update_tx_stats(&dev->stats);
  3011. +
  3012. + spin_unlock_irqrestore(&lp->lock, flags);
  3013. + return &dev->stats;
  3014. +}
  3015. +
  3016. +/*
  3017. + * Set or clear the multicast filter for this adaptor.
  3018. + * num_addrs == -1 Promiscuous mode, receive all packets
  3019. + * num_addrs == 0 Normal mode, clear multicast list
  3020. + * num_addrs > 0 Multicast mode, receive normal and MC packets,
  3021. + * and do best-effort filtering.
  3022. + */
  3023. +static void
  3024. +set_multicast_list(struct net_device *dev)
  3025. +{
  3026. + struct net_local *lp = netdev_priv(dev);
  3027. + int num_addr = netdev_mc_count(dev);
  3028. + unsigned long int lo_bits;
  3029. + unsigned long int hi_bits;
  3030. +
  3031. + spin_lock(&lp->lock);
  3032. + if (dev->flags & IFF_PROMISC) {
  3033. + /* promiscuous mode */
  3034. + lo_bits = 0xfffffffful;
  3035. + hi_bits = 0xfffffffful;
  3036. +
  3037. + /* Enable individual receive */
  3038. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, receive);
  3039. + *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
  3040. + } else if (dev->flags & IFF_ALLMULTI) {
  3041. + /* enable all multicasts */
  3042. + lo_bits = 0xfffffffful;
  3043. + hi_bits = 0xfffffffful;
  3044. +
  3045. + /* Disable individual receive */
  3046. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
  3047. + *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
  3048. + } else if (num_addr == 0) {
  3049. + /* Normal, clear the mc list */
  3050. + lo_bits = 0x00000000ul;
  3051. + hi_bits = 0x00000000ul;
  3052. +
  3053. + /* Disable individual receive */
  3054. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
  3055. + *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
  3056. + } else {
  3057. + /* MC mode, receive normal and MC packets */
  3058. + char hash_ix;
  3059. + struct netdev_hw_addr *ha;
  3060. + char *baddr;
  3061. +
  3062. + lo_bits = 0x00000000ul;
  3063. + hi_bits = 0x00000000ul;
  3064. + netdev_for_each_mc_addr(ha, dev) {
  3065. + /* Calculate the hash index for the GA registers */
  3066. +
  3067. + hash_ix = 0;
  3068. + baddr = ha->addr;
  3069. + hash_ix ^= (*baddr) & 0x3f;
  3070. + hash_ix ^= ((*baddr) >> 6) & 0x03;
  3071. + ++baddr;
  3072. + hash_ix ^= ((*baddr) << 2) & 0x03c;
  3073. + hash_ix ^= ((*baddr) >> 4) & 0xf;
  3074. + ++baddr;
  3075. + hash_ix ^= ((*baddr) << 4) & 0x30;
  3076. + hash_ix ^= ((*baddr) >> 2) & 0x3f;
  3077. + ++baddr;
  3078. + hash_ix ^= (*baddr) & 0x3f;
  3079. + hash_ix ^= ((*baddr) >> 6) & 0x03;
  3080. + ++baddr;
  3081. + hash_ix ^= ((*baddr) << 2) & 0x03c;
  3082. + hash_ix ^= ((*baddr) >> 4) & 0xf;
  3083. + ++baddr;
  3084. + hash_ix ^= ((*baddr) << 4) & 0x30;
  3085. + hash_ix ^= ((*baddr) >> 2) & 0x3f;
  3086. +
  3087. + hash_ix &= 0x3f;
  3088. +
  3089. + if (hash_ix >= 32) {
  3090. + hi_bits |= (1 << (hash_ix-32));
  3091. + } else {
  3092. + lo_bits |= (1 << hash_ix);
  3093. + }
  3094. + }
  3095. + /* Disable individual receive */
  3096. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
  3097. + *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
  3098. + }
  3099. + *R_NETWORK_GA_0 = lo_bits;
  3100. + *R_NETWORK_GA_1 = hi_bits;
  3101. + spin_unlock(&lp->lock);
  3102. +}
  3103. +
  3104. +void
  3105. +e100_hardware_send_packet(struct net_local *np, char *buf, int length)
  3106. +{
  3107. + D(printk("e100 send pack, buf 0x%x len %d\n", buf, length));
  3108. +
  3109. + spin_lock(&np->led_lock);
  3110. + if (!led_active && time_after(jiffies, led_next_time)) {
  3111. + /* light the network leds depending on the current speed. */
  3112. + e100_set_network_leds(NETWORK_ACTIVITY);
  3113. +
  3114. + /* Set the earliest time we may clear the LED */
  3115. + led_next_time = jiffies + NET_FLASH_TIME;
  3116. + led_active = 1;
  3117. + mod_timer(&clear_led_timer, jiffies + HZ/10);
  3118. + }
  3119. + spin_unlock(&np->led_lock);
  3120. +
  3121. + /* configure the tx dma descriptor */
  3122. + myNextTxDesc->descr.sw_len = length;
  3123. + myNextTxDesc->descr.ctrl = d_eop | d_eol | d_wait;
  3124. + myNextTxDesc->descr.buf = virt_to_phys(buf);
  3125. +
  3126. + /* Move end of list */
  3127. + myLastTxDesc->descr.ctrl &= ~d_eol;
  3128. + myLastTxDesc = myNextTxDesc;
  3129. +
  3130. + /* Restart DMA channel */
  3131. + *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, restart);
  3132. +}
  3133. +
  3134. +static void
  3135. +e100_clear_network_leds(unsigned long dummy)
  3136. +{
  3137. + struct net_device *dev = (struct net_device *)dummy;
  3138. + struct net_local *np = netdev_priv(dev);
  3139. +
  3140. + spin_lock(&np->led_lock);
  3141. +
  3142. + if (led_active && time_after(jiffies, led_next_time)) {
  3143. + e100_set_network_leds(NO_NETWORK_ACTIVITY);
  3144. +
  3145. + /* Set the earliest time we may set the LED */
  3146. + led_next_time = jiffies + NET_FLASH_PAUSE;
  3147. + led_active = 0;
  3148. + }
  3149. +
  3150. + spin_unlock(&np->led_lock);
  3151. +}
  3152. +
  3153. +static void
  3154. +e100_set_network_leds(int active)
  3155. +{
  3156. +#if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK)
  3157. + int light_leds = (active == NO_NETWORK_ACTIVITY);
  3158. +#elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY)
  3159. + int light_leds = (active == NETWORK_ACTIVITY);
  3160. +#else
  3161. +#error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY"
  3162. +#endif
  3163. +
  3164. + if (!current_speed) {
  3165. + /* Make LED red, link is down */
  3166. + CRIS_LED_NETWORK_SET(CRIS_LED_OFF);
  3167. + } else if (light_leds) {
  3168. + if (current_speed == 10) {
  3169. + CRIS_LED_NETWORK_SET(CRIS_LED_ORANGE);
  3170. + } else {
  3171. + CRIS_LED_NETWORK_SET(CRIS_LED_GREEN);
  3172. + }
  3173. + } else {
  3174. + CRIS_LED_NETWORK_SET(CRIS_LED_OFF);
  3175. + }
  3176. +}
  3177. +
  3178. +#ifdef CONFIG_NET_POLL_CONTROLLER
  3179. +static void
  3180. +e100_netpoll(struct net_device* netdev)
  3181. +{
  3182. + e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
  3183. +}
  3184. +#endif
  3185. +
  3186. +static int
  3187. +etrax_init_module(void)
  3188. +{
  3189. + return etrax_ethernet_init();
  3190. +}
  3191. +
  3192. +static int __init
  3193. +e100_boot_setup(char* str)
  3194. +{
  3195. + struct sockaddr sa = {0};
  3196. + int i;
  3197. +
  3198. + /* Parse the colon separated Ethernet station address */
  3199. + for (i = 0; i < ETH_ALEN; i++) {
  3200. + unsigned int tmp;
  3201. + if (sscanf(str + 3*i, "%2x", &tmp) != 1) {
  3202. + printk(KERN_WARNING "Malformed station address");
  3203. + return 0;
  3204. + }
  3205. + sa.sa_data[i] = (char)tmp;
  3206. + }
  3207. +
  3208. + default_mac = sa;
  3209. + return 1;
  3210. +}
  3211. +
  3212. +__setup("etrax100_eth=", e100_boot_setup);
  3213. +
  3214. +module_init(etrax_init_module);
  3215. diff -Nur linux-2.6.39.orig/drivers/tty/serial/crisv10.c linux-2.6.39/drivers/tty/serial/crisv10.c
  3216. --- linux-2.6.39.orig/drivers/tty/serial/crisv10.c 2011-05-19 06:06:34.000000000 +0200
  3217. +++ linux-2.6.39/drivers/tty/serial/crisv10.c 2011-07-28 16:27:57.623883501 +0200
  3218. @@ -26,6 +26,7 @@
  3219. #include <linux/kernel.h>
  3220. #include <linux/mutex.h>
  3221. #include <linux/bitops.h>
  3222. +#include <linux/device.h>
  3223. #include <linux/seq_file.h>
  3224. #include <linux/delay.h>
  3225. #include <linux/module.h>
  3226. @@ -4430,6 +4431,7 @@
  3227. #endif
  3228. };
  3229. +static struct class *rs_class;
  3230. static int __init rs_init(void)
  3231. {
  3232. int i;
  3233. @@ -4564,6 +4566,24 @@
  3234. #endif
  3235. #endif /* CONFIG_SVINTO_SIM */
  3236. + rs_class = class_create(THIS_MODULE, "rs_tty");
  3237. +#ifdef CONFIG_ETRAX_SERIAL_PORT0
  3238. + device_create(rs_class, NULL,
  3239. + MKDEV(TTY_MAJOR, 64), NULL, "ttyS0");
  3240. +#endif
  3241. +#ifdef CONFIG_ETRAX_SERIAL_PORT1
  3242. + device_create(rs_class, NULL,
  3243. + MKDEV(TTY_MAJOR, 65), NULL, "ttyS1");
  3244. +#endif
  3245. +#ifdef CONFIG_ETRAX_SERIAL_PORT2
  3246. + device_create(rs_class, NULL,
  3247. + MKDEV(TTY_MAJOR, 66), NULL, "ttyS2");
  3248. +#endif
  3249. +#ifdef CONFIG_ETRAX_SERIAL_PORT3
  3250. + device_create(rs_class, NULL,
  3251. + MKDEV(TTY_MAJOR, 67), NULL, "ttyS3");
  3252. +#endif
  3253. +
  3254. return 0;
  3255. }
  3256. diff -Nur linux-2.6.39.orig/drivers/tty/serial/crisv10.c.orig linux-2.6.39/drivers/tty/serial/crisv10.c.orig
  3257. --- linux-2.6.39.orig/drivers/tty/serial/crisv10.c.orig 1970-01-01 01:00:00.000000000 +0100
  3258. +++ linux-2.6.39/drivers/tty/serial/crisv10.c.orig 2011-05-19 06:06:34.000000000 +0200
  3259. @@ -0,0 +1,4572 @@
  3260. +/*
  3261. + * Serial port driver for the ETRAX 100LX chip
  3262. + *
  3263. + * Copyright (C) 1998-2007 Axis Communications AB
  3264. + *
  3265. + * Many, many authors. Based once upon a time on serial.c for 16x50.
  3266. + *
  3267. + */
  3268. +
  3269. +static char *serial_version = "$Revision: 1.25 $";
  3270. +
  3271. +#include <linux/types.h>
  3272. +#include <linux/errno.h>
  3273. +#include <linux/signal.h>
  3274. +#include <linux/sched.h>
  3275. +#include <linux/timer.h>
  3276. +#include <linux/interrupt.h>
  3277. +#include <linux/tty.h>
  3278. +#include <linux/tty_flip.h>
  3279. +#include <linux/major.h>
  3280. +#include <linux/string.h>
  3281. +#include <linux/fcntl.h>
  3282. +#include <linux/mm.h>
  3283. +#include <linux/slab.h>
  3284. +#include <linux/init.h>
  3285. +#include <linux/kernel.h>
  3286. +#include <linux/mutex.h>
  3287. +#include <linux/bitops.h>
  3288. +#include <linux/seq_file.h>
  3289. +#include <linux/delay.h>
  3290. +#include <linux/module.h>
  3291. +#include <linux/uaccess.h>
  3292. +#include <linux/io.h>
  3293. +
  3294. +#include <asm/irq.h>
  3295. +#include <asm/dma.h>
  3296. +#include <asm/system.h>
  3297. +
  3298. +#include <arch/svinto.h>
  3299. +
  3300. +/* non-arch dependent serial structures are in linux/serial.h */
  3301. +#include <linux/serial.h>
  3302. +/* while we keep our own stuff (struct e100_serial) in a local .h file */
  3303. +#include "crisv10.h"
  3304. +#include <asm/fasttimer.h>
  3305. +#include <arch/io_interface_mux.h>
  3306. +
  3307. +#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
  3308. +#ifndef CONFIG_ETRAX_FAST_TIMER
  3309. +#error "Enable FAST_TIMER to use SERIAL_FAST_TIMER"
  3310. +#endif
  3311. +#endif
  3312. +
  3313. +#if defined(CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS) && \
  3314. + (CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS == 0)
  3315. +#error "RX_TIMEOUT_TICKS == 0 not allowed, use 1"
  3316. +#endif
  3317. +
  3318. +#if defined(CONFIG_ETRAX_RS485_ON_PA) && defined(CONFIG_ETRAX_RS485_ON_PORT_G)
  3319. +#error "Disable either CONFIG_ETRAX_RS485_ON_PA or CONFIG_ETRAX_RS485_ON_PORT_G"
  3320. +#endif
  3321. +
  3322. +/*
  3323. + * All of the compatibilty code so we can compile serial.c against
  3324. + * older kernels is hidden in serial_compat.h
  3325. + */
  3326. +#if defined(LOCAL_HEADERS)
  3327. +#include "serial_compat.h"
  3328. +#endif
  3329. +
  3330. +struct tty_driver *serial_driver;
  3331. +
  3332. +/* number of characters left in xmit buffer before we ask for more */
  3333. +#define WAKEUP_CHARS 256
  3334. +
  3335. +//#define SERIAL_DEBUG_INTR
  3336. +//#define SERIAL_DEBUG_OPEN
  3337. +//#define SERIAL_DEBUG_FLOW
  3338. +//#define SERIAL_DEBUG_DATA
  3339. +//#define SERIAL_DEBUG_THROTTLE
  3340. +//#define SERIAL_DEBUG_IO /* Debug for Extra control and status pins */
  3341. +//#define SERIAL_DEBUG_LINE 0 /* What serport we want to debug */
  3342. +
  3343. +/* Enable this to use serial interrupts to handle when you
  3344. + expect the first received event on the serial port to
  3345. + be an error, break or similar. Used to be able to flash IRMA
  3346. + from eLinux */
  3347. +#define SERIAL_HANDLE_EARLY_ERRORS
  3348. +
  3349. +/* Currently 16 descriptors x 128 bytes = 2048 bytes */
  3350. +#define SERIAL_DESCR_BUF_SIZE 256
  3351. +
  3352. +#define SERIAL_PRESCALE_BASE 3125000 /* 3.125MHz */
  3353. +#define DEF_BAUD_BASE SERIAL_PRESCALE_BASE
  3354. +
  3355. +/* We don't want to load the system with massive fast timer interrupt
  3356. + * on high baudrates so limit it to 250 us (4kHz) */
  3357. +#define MIN_FLUSH_TIME_USEC 250
  3358. +
  3359. +/* Add an x here to log a lot of timer stuff */
  3360. +#define TIMERD(x)
  3361. +/* Debug details of interrupt handling */
  3362. +#define DINTR1(x) /* irq on/off, errors */
  3363. +#define DINTR2(x) /* tx and rx */
  3364. +/* Debug flip buffer stuff */
  3365. +#define DFLIP(x)
  3366. +/* Debug flow control and overview of data flow */
  3367. +#define DFLOW(x)
  3368. +#define DBAUD(x)
  3369. +#define DLOG_INT_TRIG(x)
  3370. +
  3371. +//#define DEBUG_LOG_INCLUDED
  3372. +#ifndef DEBUG_LOG_INCLUDED
  3373. +#define DEBUG_LOG(line, string, value)
  3374. +#else
  3375. +struct debug_log_info
  3376. +{
  3377. + unsigned long time;
  3378. + unsigned long timer_data;
  3379. +// int line;
  3380. + const char *string;
  3381. + int value;
  3382. +};
  3383. +#define DEBUG_LOG_SIZE 4096
  3384. +
  3385. +struct debug_log_info debug_log[DEBUG_LOG_SIZE];
  3386. +int debug_log_pos = 0;
  3387. +
  3388. +#define DEBUG_LOG(_line, _string, _value) do { \
  3389. + if ((_line) == SERIAL_DEBUG_LINE) {\
  3390. + debug_log_func(_line, _string, _value); \
  3391. + }\
  3392. +}while(0)
  3393. +
  3394. +void debug_log_func(int line, const char *string, int value)
  3395. +{
  3396. + if (debug_log_pos < DEBUG_LOG_SIZE) {
  3397. + debug_log[debug_log_pos].time = jiffies;
  3398. + debug_log[debug_log_pos].timer_data = *R_TIMER_DATA;
  3399. +// debug_log[debug_log_pos].line = line;
  3400. + debug_log[debug_log_pos].string = string;
  3401. + debug_log[debug_log_pos].value = value;
  3402. + debug_log_pos++;
  3403. + }
  3404. + /*printk(string, value);*/
  3405. +}
  3406. +#endif
  3407. +
  3408. +#ifndef CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS
  3409. +/* Default number of timer ticks before flushing rx fifo
  3410. + * When using "little data, low latency applications: use 0
  3411. + * When using "much data applications (PPP)" use ~5
  3412. + */
  3413. +#define CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS 5
  3414. +#endif
  3415. +
  3416. +unsigned long timer_data_to_ns(unsigned long timer_data);
  3417. +
  3418. +static void change_speed(struct e100_serial *info);
  3419. +static void rs_throttle(struct tty_struct * tty);
  3420. +static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
  3421. +static int rs_write(struct tty_struct *tty,
  3422. + const unsigned char *buf, int count);
  3423. +#ifdef CONFIG_ETRAX_RS485
  3424. +static int e100_write_rs485(struct tty_struct *tty,
  3425. + const unsigned char *buf, int count);
  3426. +#endif
  3427. +static int get_lsr_info(struct e100_serial *info, unsigned int *value);
  3428. +
  3429. +
  3430. +#define DEF_BAUD 115200 /* 115.2 kbit/s */
  3431. +#define STD_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
  3432. +#define DEF_RX 0x20 /* or SERIAL_CTRL_W >> 8 */
  3433. +/* Default value of tx_ctrl register: has txd(bit 7)=1 (idle) as default */
  3434. +#define DEF_TX 0x80 /* or SERIAL_CTRL_B */
  3435. +
  3436. +/* offsets from R_SERIALx_CTRL */
  3437. +
  3438. +#define REG_DATA 0
  3439. +#define REG_DATA_STATUS32 0 /* this is the 32 bit register R_SERIALx_READ */
  3440. +#define REG_TR_DATA 0
  3441. +#define REG_STATUS 1
  3442. +#define REG_TR_CTRL 1
  3443. +#define REG_REC_CTRL 2
  3444. +#define REG_BAUD 3
  3445. +#define REG_XOFF 4 /* this is a 32 bit register */
  3446. +
  3447. +/* The bitfields are the same for all serial ports */
  3448. +#define SER_RXD_MASK IO_MASK(R_SERIAL0_STATUS, rxd)
  3449. +#define SER_DATA_AVAIL_MASK IO_MASK(R_SERIAL0_STATUS, data_avail)
  3450. +#define SER_FRAMING_ERR_MASK IO_MASK(R_SERIAL0_STATUS, framing_err)
  3451. +#define SER_PAR_ERR_MASK IO_MASK(R_SERIAL0_STATUS, par_err)
  3452. +#define SER_OVERRUN_MASK IO_MASK(R_SERIAL0_STATUS, overrun)
  3453. +
  3454. +#define SER_ERROR_MASK (SER_OVERRUN_MASK | SER_PAR_ERR_MASK | SER_FRAMING_ERR_MASK)
  3455. +
  3456. +/* Values for info->errorcode */
  3457. +#define ERRCODE_SET_BREAK (TTY_BREAK)
  3458. +#define ERRCODE_INSERT 0x100
  3459. +#define ERRCODE_INSERT_BREAK (ERRCODE_INSERT | TTY_BREAK)
  3460. +
  3461. +#define FORCE_EOP(info) *R_SET_EOP = 1U << info->iseteop;
  3462. +
  3463. +/*
  3464. + * General note regarding the use of IO_* macros in this file:
  3465. + *
  3466. + * We will use the bits defined for DMA channel 6 when using various
  3467. + * IO_* macros (e.g. IO_STATE, IO_MASK, IO_EXTRACT) and _assume_ they are
  3468. + * the same for all channels (which of course they are).
  3469. + *
  3470. + * We will also use the bits defined for serial port 0 when writing commands
  3471. + * to the different ports, as these bits too are the same for all ports.
  3472. + */
  3473. +
  3474. +
  3475. +/* Mask for the irqs possibly enabled in R_IRQ_MASK1_RD etc. */
  3476. +static const unsigned long e100_ser_int_mask = 0
  3477. +#ifdef CONFIG_ETRAX_SERIAL_PORT0
  3478. +| IO_MASK(R_IRQ_MASK1_RD, ser0_data) | IO_MASK(R_IRQ_MASK1_RD, ser0_ready)
  3479. +#endif
  3480. +#ifdef CONFIG_ETRAX_SERIAL_PORT1
  3481. +| IO_MASK(R_IRQ_MASK1_RD, ser1_data) | IO_MASK(R_IRQ_MASK1_RD, ser1_ready)
  3482. +#endif
  3483. +#ifdef CONFIG_ETRAX_SERIAL_PORT2
  3484. +| IO_MASK(R_IRQ_MASK1_RD, ser2_data) | IO_MASK(R_IRQ_MASK1_RD, ser2_ready)
  3485. +#endif
  3486. +#ifdef CONFIG_ETRAX_SERIAL_PORT3
  3487. +| IO_MASK(R_IRQ_MASK1_RD, ser3_data) | IO_MASK(R_IRQ_MASK1_RD, ser3_ready)
  3488. +#endif
  3489. +;
  3490. +unsigned long r_alt_ser_baudrate_shadow = 0;
  3491. +
  3492. +/* this is the data for the four serial ports in the etrax100 */
  3493. +/* DMA2(ser2), DMA4(ser3), DMA6(ser0) or DMA8(ser1) */
  3494. +/* R_DMA_CHx_CLR_INTR, R_DMA_CHx_FIRST, R_DMA_CHx_CMD */
  3495. +
  3496. +static struct e100_serial rs_table[] = {
  3497. + { .baud = DEF_BAUD,
  3498. + .ioport = (unsigned char *)R_SERIAL0_CTRL,
  3499. + .irq = 1U << 12, /* uses DMA 6 and 7 */
  3500. + .oclrintradr = R_DMA_CH6_CLR_INTR,
  3501. + .ofirstadr = R_DMA_CH6_FIRST,
  3502. + .ocmdadr = R_DMA_CH6_CMD,
  3503. + .ostatusadr = R_DMA_CH6_STATUS,
  3504. + .iclrintradr = R_DMA_CH7_CLR_INTR,
  3505. + .ifirstadr = R_DMA_CH7_FIRST,
  3506. + .icmdadr = R_DMA_CH7_CMD,
  3507. + .idescradr = R_DMA_CH7_DESCR,
  3508. + .flags = STD_FLAGS,
  3509. + .rx_ctrl = DEF_RX,
  3510. + .tx_ctrl = DEF_TX,
  3511. + .iseteop = 2,
  3512. + .dma_owner = dma_ser0,
  3513. + .io_if = if_serial_0,
  3514. +#ifdef CONFIG_ETRAX_SERIAL_PORT0
  3515. + .enabled = 1,
  3516. +#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
  3517. + .dma_out_enabled = 1,
  3518. + .dma_out_nbr = SER0_TX_DMA_NBR,
  3519. + .dma_out_irq_nbr = SER0_DMA_TX_IRQ_NBR,
  3520. + .dma_out_irq_flags = IRQF_DISABLED,
  3521. + .dma_out_irq_description = "serial 0 dma tr",
  3522. +#else
  3523. + .dma_out_enabled = 0,
  3524. + .dma_out_nbr = UINT_MAX,
  3525. + .dma_out_irq_nbr = 0,
  3526. + .dma_out_irq_flags = 0,
  3527. + .dma_out_irq_description = NULL,
  3528. +#endif
  3529. +#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
  3530. + .dma_in_enabled = 1,
  3531. + .dma_in_nbr = SER0_RX_DMA_NBR,
  3532. + .dma_in_irq_nbr = SER0_DMA_RX_IRQ_NBR,
  3533. + .dma_in_irq_flags = IRQF_DISABLED,
  3534. + .dma_in_irq_description = "serial 0 dma rec",
  3535. +#else
  3536. + .dma_in_enabled = 0,
  3537. + .dma_in_nbr = UINT_MAX,
  3538. + .dma_in_irq_nbr = 0,
  3539. + .dma_in_irq_flags = 0,
  3540. + .dma_in_irq_description = NULL,
  3541. +#endif
  3542. +#else
  3543. + .enabled = 0,
  3544. + .io_if_description = NULL,
  3545. + .dma_out_enabled = 0,
  3546. + .dma_in_enabled = 0
  3547. +#endif
  3548. +
  3549. +}, /* ttyS0 */
  3550. +#ifndef CONFIG_SVINTO_SIM
  3551. + { .baud = DEF_BAUD,
  3552. + .ioport = (unsigned char *)R_SERIAL1_CTRL,
  3553. + .irq = 1U << 16, /* uses DMA 8 and 9 */
  3554. + .oclrintradr = R_DMA_CH8_CLR_INTR,
  3555. + .ofirstadr = R_DMA_CH8_FIRST,
  3556. + .ocmdadr = R_DMA_CH8_CMD,
  3557. + .ostatusadr = R_DMA_CH8_STATUS,
  3558. + .iclrintradr = R_DMA_CH9_CLR_INTR,
  3559. + .ifirstadr = R_DMA_CH9_FIRST,
  3560. + .icmdadr = R_DMA_CH9_CMD,
  3561. + .idescradr = R_DMA_CH9_DESCR,
  3562. + .flags = STD_FLAGS,
  3563. + .rx_ctrl = DEF_RX,
  3564. + .tx_ctrl = DEF_TX,
  3565. + .iseteop = 3,
  3566. + .dma_owner = dma_ser1,
  3567. + .io_if = if_serial_1,
  3568. +#ifdef CONFIG_ETRAX_SERIAL_PORT1
  3569. + .enabled = 1,
  3570. + .io_if_description = "ser1",
  3571. +#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT
  3572. + .dma_out_enabled = 1,
  3573. + .dma_out_nbr = SER1_TX_DMA_NBR,
  3574. + .dma_out_irq_nbr = SER1_DMA_TX_IRQ_NBR,
  3575. + .dma_out_irq_flags = IRQF_DISABLED,
  3576. + .dma_out_irq_description = "serial 1 dma tr",
  3577. +#else
  3578. + .dma_out_enabled = 0,
  3579. + .dma_out_nbr = UINT_MAX,
  3580. + .dma_out_irq_nbr = 0,
  3581. + .dma_out_irq_flags = 0,
  3582. + .dma_out_irq_description = NULL,
  3583. +#endif
  3584. +#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN
  3585. + .dma_in_enabled = 1,
  3586. + .dma_in_nbr = SER1_RX_DMA_NBR,
  3587. + .dma_in_irq_nbr = SER1_DMA_RX_IRQ_NBR,
  3588. + .dma_in_irq_flags = IRQF_DISABLED,
  3589. + .dma_in_irq_description = "serial 1 dma rec",
  3590. +#else
  3591. + .dma_in_enabled = 0,
  3592. + .dma_in_enabled = 0,
  3593. + .dma_in_nbr = UINT_MAX,
  3594. + .dma_in_irq_nbr = 0,
  3595. + .dma_in_irq_flags = 0,
  3596. + .dma_in_irq_description = NULL,
  3597. +#endif
  3598. +#else
  3599. + .enabled = 0,
  3600. + .io_if_description = NULL,
  3601. + .dma_in_irq_nbr = 0,
  3602. + .dma_out_enabled = 0,
  3603. + .dma_in_enabled = 0
  3604. +#endif
  3605. +}, /* ttyS1 */
  3606. +
  3607. + { .baud = DEF_BAUD,
  3608. + .ioport = (unsigned char *)R_SERIAL2_CTRL,
  3609. + .irq = 1U << 4, /* uses DMA 2 and 3 */
  3610. + .oclrintradr = R_DMA_CH2_CLR_INTR,
  3611. + .ofirstadr = R_DMA_CH2_FIRST,
  3612. + .ocmdadr = R_DMA_CH2_CMD,
  3613. + .ostatusadr = R_DMA_CH2_STATUS,
  3614. + .iclrintradr = R_DMA_CH3_CLR_INTR,
  3615. + .ifirstadr = R_DMA_CH3_FIRST,
  3616. + .icmdadr = R_DMA_CH3_CMD,
  3617. + .idescradr = R_DMA_CH3_DESCR,
  3618. + .flags = STD_FLAGS,
  3619. + .rx_ctrl = DEF_RX,
  3620. + .tx_ctrl = DEF_TX,
  3621. + .iseteop = 0,
  3622. + .dma_owner = dma_ser2,
  3623. + .io_if = if_serial_2,
  3624. +#ifdef CONFIG_ETRAX_SERIAL_PORT2
  3625. + .enabled = 1,
  3626. + .io_if_description = "ser2",
  3627. +#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
  3628. + .dma_out_enabled = 1,
  3629. + .dma_out_nbr = SER2_TX_DMA_NBR,
  3630. + .dma_out_irq_nbr = SER2_DMA_TX_IRQ_NBR,
  3631. + .dma_out_irq_flags = IRQF_DISABLED,
  3632. + .dma_out_irq_description = "serial 2 dma tr",
  3633. +#else
  3634. + .dma_out_enabled = 0,
  3635. + .dma_out_nbr = UINT_MAX,
  3636. + .dma_out_irq_nbr = 0,
  3637. + .dma_out_irq_flags = 0,
  3638. + .dma_out_irq_description = NULL,
  3639. +#endif
  3640. +#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
  3641. + .dma_in_enabled = 1,
  3642. + .dma_in_nbr = SER2_RX_DMA_NBR,
  3643. + .dma_in_irq_nbr = SER2_DMA_RX_IRQ_NBR,
  3644. + .dma_in_irq_flags = IRQF_DISABLED,
  3645. + .dma_in_irq_description = "serial 2 dma rec",
  3646. +#else
  3647. + .dma_in_enabled = 0,
  3648. + .dma_in_nbr = UINT_MAX,
  3649. + .dma_in_irq_nbr = 0,
  3650. + .dma_in_irq_flags = 0,
  3651. + .dma_in_irq_description = NULL,
  3652. +#endif
  3653. +#else
  3654. + .enabled = 0,
  3655. + .io_if_description = NULL,
  3656. + .dma_out_enabled = 0,
  3657. + .dma_in_enabled = 0
  3658. +#endif
  3659. + }, /* ttyS2 */
  3660. +
  3661. + { .baud = DEF_BAUD,
  3662. + .ioport = (unsigned char *)R_SERIAL3_CTRL,
  3663. + .irq = 1U << 8, /* uses DMA 4 and 5 */
  3664. + .oclrintradr = R_DMA_CH4_CLR_INTR,
  3665. + .ofirstadr = R_DMA_CH4_FIRST,
  3666. + .ocmdadr = R_DMA_CH4_CMD,
  3667. + .ostatusadr = R_DMA_CH4_STATUS,
  3668. + .iclrintradr = R_DMA_CH5_CLR_INTR,
  3669. + .ifirstadr = R_DMA_CH5_FIRST,
  3670. + .icmdadr = R_DMA_CH5_CMD,
  3671. + .idescradr = R_DMA_CH5_DESCR,
  3672. + .flags = STD_FLAGS,
  3673. + .rx_ctrl = DEF_RX,
  3674. + .tx_ctrl = DEF_TX,
  3675. + .iseteop = 1,
  3676. + .dma_owner = dma_ser3,
  3677. + .io_if = if_serial_3,
  3678. +#ifdef CONFIG_ETRAX_SERIAL_PORT3
  3679. + .enabled = 1,
  3680. + .io_if_description = "ser3",
  3681. +#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT
  3682. + .dma_out_enabled = 1,
  3683. + .dma_out_nbr = SER3_TX_DMA_NBR,
  3684. + .dma_out_irq_nbr = SER3_DMA_TX_IRQ_NBR,
  3685. + .dma_out_irq_flags = IRQF_DISABLED,
  3686. + .dma_out_irq_description = "serial 3 dma tr",
  3687. +#else
  3688. + .dma_out_enabled = 0,
  3689. + .dma_out_nbr = UINT_MAX,
  3690. + .dma_out_irq_nbr = 0,
  3691. + .dma_out_irq_flags = 0,
  3692. + .dma_out_irq_description = NULL,
  3693. +#endif
  3694. +#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN
  3695. + .dma_in_enabled = 1,
  3696. + .dma_in_nbr = SER3_RX_DMA_NBR,
  3697. + .dma_in_irq_nbr = SER3_DMA_RX_IRQ_NBR,
  3698. + .dma_in_irq_flags = IRQF_DISABLED,
  3699. + .dma_in_irq_description = "serial 3 dma rec",
  3700. +#else
  3701. + .dma_in_enabled = 0,
  3702. + .dma_in_nbr = UINT_MAX,
  3703. + .dma_in_irq_nbr = 0,
  3704. + .dma_in_irq_flags = 0,
  3705. + .dma_in_irq_description = NULL
  3706. +#endif
  3707. +#else
  3708. + .enabled = 0,
  3709. + .io_if_description = NULL,
  3710. + .dma_out_enabled = 0,
  3711. + .dma_in_enabled = 0
  3712. +#endif
  3713. + } /* ttyS3 */
  3714. +#endif
  3715. +};
  3716. +
  3717. +
  3718. +#define NR_PORTS (sizeof(rs_table)/sizeof(struct e100_serial))
  3719. +
  3720. +#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
  3721. +static struct fast_timer fast_timers[NR_PORTS];
  3722. +#endif
  3723. +
  3724. +#ifdef CONFIG_ETRAX_SERIAL_PROC_ENTRY
  3725. +#define PROCSTAT(x) x
  3726. +struct ser_statistics_type {
  3727. + int overrun_cnt;
  3728. + int early_errors_cnt;
  3729. + int ser_ints_ok_cnt;
  3730. + int errors_cnt;
  3731. + unsigned long int processing_flip;
  3732. + unsigned long processing_flip_still_room;
  3733. + unsigned long int timeout_flush_cnt;
  3734. + int rx_dma_ints;
  3735. + int tx_dma_ints;
  3736. + int rx_tot;
  3737. + int tx_tot;
  3738. +};
  3739. +
  3740. +static struct ser_statistics_type ser_stat[NR_PORTS];
  3741. +
  3742. +#else
  3743. +
  3744. +#define PROCSTAT(x)
  3745. +
  3746. +#endif /* CONFIG_ETRAX_SERIAL_PROC_ENTRY */
  3747. +
  3748. +/* RS-485 */
  3749. +#if defined(CONFIG_ETRAX_RS485)
  3750. +#ifdef CONFIG_ETRAX_FAST_TIMER
  3751. +static struct fast_timer fast_timers_rs485[NR_PORTS];
  3752. +#endif
  3753. +#if defined(CONFIG_ETRAX_RS485_ON_PA)
  3754. +static int rs485_pa_bit = CONFIG_ETRAX_RS485_ON_PA_BIT;
  3755. +#endif
  3756. +#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
  3757. +static int rs485_port_g_bit = CONFIG_ETRAX_RS485_ON_PORT_G_BIT;
  3758. +#endif
  3759. +#endif
  3760. +
  3761. +/* Info and macros needed for each ports extra control/status signals. */
  3762. +#define E100_STRUCT_PORT(line, pinname) \
  3763. + ((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \
  3764. + (R_PORT_PA_DATA): ( \
  3765. + (CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \
  3766. + (R_PORT_PB_DATA):&dummy_ser[line]))
  3767. +
  3768. +#define E100_STRUCT_SHADOW(line, pinname) \
  3769. + ((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \
  3770. + (&port_pa_data_shadow): ( \
  3771. + (CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \
  3772. + (&port_pb_data_shadow):&dummy_ser[line]))
  3773. +#define E100_STRUCT_MASK(line, pinname) \
  3774. + ((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \
  3775. + (1<<CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT): ( \
  3776. + (CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \
  3777. + (1<<CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT):DUMMY_##pinname##_MASK))
  3778. +
  3779. +#define DUMMY_DTR_MASK 1
  3780. +#define DUMMY_RI_MASK 2
  3781. +#define DUMMY_DSR_MASK 4
  3782. +#define DUMMY_CD_MASK 8
  3783. +static unsigned char dummy_ser[NR_PORTS] = {0xFF, 0xFF, 0xFF,0xFF};
  3784. +
  3785. +/* If not all status pins are used or disabled, use mixed mode */
  3786. +#ifdef CONFIG_ETRAX_SERIAL_PORT0
  3787. +
  3788. +#define SER0_PA_BITSUM (CONFIG_ETRAX_SER0_DTR_ON_PA_BIT+CONFIG_ETRAX_SER0_RI_ON_PA_BIT+CONFIG_ETRAX_SER0_DSR_ON_PA_BIT+CONFIG_ETRAX_SER0_CD_ON_PA_BIT)
  3789. +
  3790. +#if SER0_PA_BITSUM != -4
  3791. +# if CONFIG_ETRAX_SER0_DTR_ON_PA_BIT == -1
  3792. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  3793. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  3794. +# endif
  3795. +# endif
  3796. +# if CONFIG_ETRAX_SER0_RI_ON_PA_BIT == -1
  3797. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  3798. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  3799. +# endif
  3800. +# endif
  3801. +# if CONFIG_ETRAX_SER0_DSR_ON_PA_BIT == -1
  3802. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  3803. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  3804. +# endif
  3805. +# endif
  3806. +# if CONFIG_ETRAX_SER0_CD_ON_PA_BIT == -1
  3807. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  3808. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  3809. +# endif
  3810. +# endif
  3811. +#endif
  3812. +
  3813. +#define SER0_PB_BITSUM (CONFIG_ETRAX_SER0_DTR_ON_PB_BIT+CONFIG_ETRAX_SER0_RI_ON_PB_BIT+CONFIG_ETRAX_SER0_DSR_ON_PB_BIT+CONFIG_ETRAX_SER0_CD_ON_PB_BIT)
  3814. +
  3815. +#if SER0_PB_BITSUM != -4
  3816. +# if CONFIG_ETRAX_SER0_DTR_ON_PB_BIT == -1
  3817. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  3818. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  3819. +# endif
  3820. +# endif
  3821. +# if CONFIG_ETRAX_SER0_RI_ON_PB_BIT == -1
  3822. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  3823. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  3824. +# endif
  3825. +# endif
  3826. +# if CONFIG_ETRAX_SER0_DSR_ON_PB_BIT == -1
  3827. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  3828. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  3829. +# endif
  3830. +# endif
  3831. +# if CONFIG_ETRAX_SER0_CD_ON_PB_BIT == -1
  3832. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  3833. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  3834. +# endif
  3835. +# endif
  3836. +#endif
  3837. +
  3838. +#endif /* PORT0 */
  3839. +
  3840. +
  3841. +#ifdef CONFIG_ETRAX_SERIAL_PORT1
  3842. +
  3843. +#define SER1_PA_BITSUM (CONFIG_ETRAX_SER1_DTR_ON_PA_BIT+CONFIG_ETRAX_SER1_RI_ON_PA_BIT+CONFIG_ETRAX_SER1_DSR_ON_PA_BIT+CONFIG_ETRAX_SER1_CD_ON_PA_BIT)
  3844. +
  3845. +#if SER1_PA_BITSUM != -4
  3846. +# if CONFIG_ETRAX_SER1_DTR_ON_PA_BIT == -1
  3847. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  3848. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  3849. +# endif
  3850. +# endif
  3851. +# if CONFIG_ETRAX_SER1_RI_ON_PA_BIT == -1
  3852. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  3853. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  3854. +# endif
  3855. +# endif
  3856. +# if CONFIG_ETRAX_SER1_DSR_ON_PA_BIT == -1
  3857. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  3858. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  3859. +# endif
  3860. +# endif
  3861. +# if CONFIG_ETRAX_SER1_CD_ON_PA_BIT == -1
  3862. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  3863. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  3864. +# endif
  3865. +# endif
  3866. +#endif
  3867. +
  3868. +#define SER1_PB_BITSUM (CONFIG_ETRAX_SER1_DTR_ON_PB_BIT+CONFIG_ETRAX_SER1_RI_ON_PB_BIT+CONFIG_ETRAX_SER1_DSR_ON_PB_BIT+CONFIG_ETRAX_SER1_CD_ON_PB_BIT)
  3869. +
  3870. +#if SER1_PB_BITSUM != -4
  3871. +# if CONFIG_ETRAX_SER1_DTR_ON_PB_BIT == -1
  3872. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  3873. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  3874. +# endif
  3875. +# endif
  3876. +# if CONFIG_ETRAX_SER1_RI_ON_PB_BIT == -1
  3877. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  3878. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  3879. +# endif
  3880. +# endif
  3881. +# if CONFIG_ETRAX_SER1_DSR_ON_PB_BIT == -1
  3882. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  3883. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  3884. +# endif
  3885. +# endif
  3886. +# if CONFIG_ETRAX_SER1_CD_ON_PB_BIT == -1
  3887. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  3888. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  3889. +# endif
  3890. +# endif
  3891. +#endif
  3892. +
  3893. +#endif /* PORT1 */
  3894. +
  3895. +#ifdef CONFIG_ETRAX_SERIAL_PORT2
  3896. +
  3897. +#define SER2_PA_BITSUM (CONFIG_ETRAX_SER2_DTR_ON_PA_BIT+CONFIG_ETRAX_SER2_RI_ON_PA_BIT+CONFIG_ETRAX_SER2_DSR_ON_PA_BIT+CONFIG_ETRAX_SER2_CD_ON_PA_BIT)
  3898. +
  3899. +#if SER2_PA_BITSUM != -4
  3900. +# if CONFIG_ETRAX_SER2_DTR_ON_PA_BIT == -1
  3901. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  3902. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  3903. +# endif
  3904. +# endif
  3905. +# if CONFIG_ETRAX_SER2_RI_ON_PA_BIT == -1
  3906. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  3907. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  3908. +# endif
  3909. +# endif
  3910. +# if CONFIG_ETRAX_SER2_DSR_ON_PA_BIT == -1
  3911. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  3912. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  3913. +# endif
  3914. +# endif
  3915. +# if CONFIG_ETRAX_SER2_CD_ON_PA_BIT == -1
  3916. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  3917. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  3918. +# endif
  3919. +# endif
  3920. +#endif
  3921. +
  3922. +#define SER2_PB_BITSUM (CONFIG_ETRAX_SER2_DTR_ON_PB_BIT+CONFIG_ETRAX_SER2_RI_ON_PB_BIT+CONFIG_ETRAX_SER2_DSR_ON_PB_BIT+CONFIG_ETRAX_SER2_CD_ON_PB_BIT)
  3923. +
  3924. +#if SER2_PB_BITSUM != -4
  3925. +# if CONFIG_ETRAX_SER2_DTR_ON_PB_BIT == -1
  3926. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  3927. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  3928. +# endif
  3929. +# endif
  3930. +# if CONFIG_ETRAX_SER2_RI_ON_PB_BIT == -1
  3931. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  3932. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  3933. +# endif
  3934. +# endif
  3935. +# if CONFIG_ETRAX_SER2_DSR_ON_PB_BIT == -1
  3936. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  3937. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  3938. +# endif
  3939. +# endif
  3940. +# if CONFIG_ETRAX_SER2_CD_ON_PB_BIT == -1
  3941. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  3942. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  3943. +# endif
  3944. +# endif
  3945. +#endif
  3946. +
  3947. +#endif /* PORT2 */
  3948. +
  3949. +#ifdef CONFIG_ETRAX_SERIAL_PORT3
  3950. +
  3951. +#define SER3_PA_BITSUM (CONFIG_ETRAX_SER3_DTR_ON_PA_BIT+CONFIG_ETRAX_SER3_RI_ON_PA_BIT+CONFIG_ETRAX_SER3_DSR_ON_PA_BIT+CONFIG_ETRAX_SER3_CD_ON_PA_BIT)
  3952. +
  3953. +#if SER3_PA_BITSUM != -4
  3954. +# if CONFIG_ETRAX_SER3_DTR_ON_PA_BIT == -1
  3955. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  3956. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  3957. +# endif
  3958. +# endif
  3959. +# if CONFIG_ETRAX_SER3_RI_ON_PA_BIT == -1
  3960. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  3961. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  3962. +# endif
  3963. +# endif
  3964. +# if CONFIG_ETRAX_SER3_DSR_ON_PA_BIT == -1
  3965. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  3966. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  3967. +# endif
  3968. +# endif
  3969. +# if CONFIG_ETRAX_SER3_CD_ON_PA_BIT == -1
  3970. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  3971. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  3972. +# endif
  3973. +# endif
  3974. +#endif
  3975. +
  3976. +#define SER3_PB_BITSUM (CONFIG_ETRAX_SER3_DTR_ON_PB_BIT+CONFIG_ETRAX_SER3_RI_ON_PB_BIT+CONFIG_ETRAX_SER3_DSR_ON_PB_BIT+CONFIG_ETRAX_SER3_CD_ON_PB_BIT)
  3977. +
  3978. +#if SER3_PB_BITSUM != -4
  3979. +# if CONFIG_ETRAX_SER3_DTR_ON_PB_BIT == -1
  3980. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  3981. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  3982. +# endif
  3983. +# endif
  3984. +# if CONFIG_ETRAX_SER3_RI_ON_PB_BIT == -1
  3985. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  3986. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  3987. +# endif
  3988. +# endif
  3989. +# if CONFIG_ETRAX_SER3_DSR_ON_PB_BIT == -1
  3990. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  3991. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  3992. +# endif
  3993. +# endif
  3994. +# if CONFIG_ETRAX_SER3_CD_ON_PB_BIT == -1
  3995. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  3996. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  3997. +# endif
  3998. +# endif
  3999. +#endif
  4000. +
  4001. +#endif /* PORT3 */
  4002. +
  4003. +
  4004. +#if defined(CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED) || \
  4005. + defined(CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED) || \
  4006. + defined(CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED) || \
  4007. + defined(CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED)
  4008. +#define CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED
  4009. +#endif
  4010. +
  4011. +#ifdef CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED
  4012. +/* The pins can be mixed on PA and PB */
  4013. +#define CONTROL_PINS_PORT_NOT_USED(line) \
  4014. + &dummy_ser[line], &dummy_ser[line], \
  4015. + &dummy_ser[line], &dummy_ser[line], \
  4016. + &dummy_ser[line], &dummy_ser[line], \
  4017. + &dummy_ser[line], &dummy_ser[line], \
  4018. + DUMMY_DTR_MASK, DUMMY_RI_MASK, DUMMY_DSR_MASK, DUMMY_CD_MASK
  4019. +
  4020. +
  4021. +struct control_pins
  4022. +{
  4023. + volatile unsigned char *dtr_port;
  4024. + unsigned char *dtr_shadow;
  4025. + volatile unsigned char *ri_port;
  4026. + unsigned char *ri_shadow;
  4027. + volatile unsigned char *dsr_port;
  4028. + unsigned char *dsr_shadow;
  4029. + volatile unsigned char *cd_port;
  4030. + unsigned char *cd_shadow;
  4031. +
  4032. + unsigned char dtr_mask;
  4033. + unsigned char ri_mask;
  4034. + unsigned char dsr_mask;
  4035. + unsigned char cd_mask;
  4036. +};
  4037. +
  4038. +static const struct control_pins e100_modem_pins[NR_PORTS] =
  4039. +{
  4040. + /* Ser 0 */
  4041. + {
  4042. +#ifdef CONFIG_ETRAX_SERIAL_PORT0
  4043. + E100_STRUCT_PORT(0,DTR), E100_STRUCT_SHADOW(0,DTR),
  4044. + E100_STRUCT_PORT(0,RI), E100_STRUCT_SHADOW(0,RI),
  4045. + E100_STRUCT_PORT(0,DSR), E100_STRUCT_SHADOW(0,DSR),
  4046. + E100_STRUCT_PORT(0,CD), E100_STRUCT_SHADOW(0,CD),
  4047. + E100_STRUCT_MASK(0,DTR),
  4048. + E100_STRUCT_MASK(0,RI),
  4049. + E100_STRUCT_MASK(0,DSR),
  4050. + E100_STRUCT_MASK(0,CD)
  4051. +#else
  4052. + CONTROL_PINS_PORT_NOT_USED(0)
  4053. +#endif
  4054. + },
  4055. +
  4056. + /* Ser 1 */
  4057. + {
  4058. +#ifdef CONFIG_ETRAX_SERIAL_PORT1
  4059. + E100_STRUCT_PORT(1,DTR), E100_STRUCT_SHADOW(1,DTR),
  4060. + E100_STRUCT_PORT(1,RI), E100_STRUCT_SHADOW(1,RI),
  4061. + E100_STRUCT_PORT(1,DSR), E100_STRUCT_SHADOW(1,DSR),
  4062. + E100_STRUCT_PORT(1,CD), E100_STRUCT_SHADOW(1,CD),
  4063. + E100_STRUCT_MASK(1,DTR),
  4064. + E100_STRUCT_MASK(1,RI),
  4065. + E100_STRUCT_MASK(1,DSR),
  4066. + E100_STRUCT_MASK(1,CD)
  4067. +#else
  4068. + CONTROL_PINS_PORT_NOT_USED(1)
  4069. +#endif
  4070. + },
  4071. +
  4072. + /* Ser 2 */
  4073. + {
  4074. +#ifdef CONFIG_ETRAX_SERIAL_PORT2
  4075. + E100_STRUCT_PORT(2,DTR), E100_STRUCT_SHADOW(2,DTR),
  4076. + E100_STRUCT_PORT(2,RI), E100_STRUCT_SHADOW(2,RI),
  4077. + E100_STRUCT_PORT(2,DSR), E100_STRUCT_SHADOW(2,DSR),
  4078. + E100_STRUCT_PORT(2,CD), E100_STRUCT_SHADOW(2,CD),
  4079. + E100_STRUCT_MASK(2,DTR),
  4080. + E100_STRUCT_MASK(2,RI),
  4081. + E100_STRUCT_MASK(2,DSR),
  4082. + E100_STRUCT_MASK(2,CD)
  4083. +#else
  4084. + CONTROL_PINS_PORT_NOT_USED(2)
  4085. +#endif
  4086. + },
  4087. +
  4088. + /* Ser 3 */
  4089. + {
  4090. +#ifdef CONFIG_ETRAX_SERIAL_PORT3
  4091. + E100_STRUCT_PORT(3,DTR), E100_STRUCT_SHADOW(3,DTR),
  4092. + E100_STRUCT_PORT(3,RI), E100_STRUCT_SHADOW(3,RI),
  4093. + E100_STRUCT_PORT(3,DSR), E100_STRUCT_SHADOW(3,DSR),
  4094. + E100_STRUCT_PORT(3,CD), E100_STRUCT_SHADOW(3,CD),
  4095. + E100_STRUCT_MASK(3,DTR),
  4096. + E100_STRUCT_MASK(3,RI),
  4097. + E100_STRUCT_MASK(3,DSR),
  4098. + E100_STRUCT_MASK(3,CD)
  4099. +#else
  4100. + CONTROL_PINS_PORT_NOT_USED(3)
  4101. +#endif
  4102. + }
  4103. +};
  4104. +#else /* CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED */
  4105. +
  4106. +/* All pins are on either PA or PB for each serial port */
  4107. +#define CONTROL_PINS_PORT_NOT_USED(line) \
  4108. + &dummy_ser[line], &dummy_ser[line], \
  4109. + DUMMY_DTR_MASK, DUMMY_RI_MASK, DUMMY_DSR_MASK, DUMMY_CD_MASK
  4110. +
  4111. +
  4112. +struct control_pins
  4113. +{
  4114. + volatile unsigned char *port;
  4115. + unsigned char *shadow;
  4116. +
  4117. + unsigned char dtr_mask;
  4118. + unsigned char ri_mask;
  4119. + unsigned char dsr_mask;
  4120. + unsigned char cd_mask;
  4121. +};
  4122. +
  4123. +#define dtr_port port
  4124. +#define dtr_shadow shadow
  4125. +#define ri_port port
  4126. +#define ri_shadow shadow
  4127. +#define dsr_port port
  4128. +#define dsr_shadow shadow
  4129. +#define cd_port port
  4130. +#define cd_shadow shadow
  4131. +
  4132. +static const struct control_pins e100_modem_pins[NR_PORTS] =
  4133. +{
  4134. + /* Ser 0 */
  4135. + {
  4136. +#ifdef CONFIG_ETRAX_SERIAL_PORT0
  4137. + E100_STRUCT_PORT(0,DTR), E100_STRUCT_SHADOW(0,DTR),
  4138. + E100_STRUCT_MASK(0,DTR),
  4139. + E100_STRUCT_MASK(0,RI),
  4140. + E100_STRUCT_MASK(0,DSR),
  4141. + E100_STRUCT_MASK(0,CD)
  4142. +#else
  4143. + CONTROL_PINS_PORT_NOT_USED(0)
  4144. +#endif
  4145. + },
  4146. +
  4147. + /* Ser 1 */
  4148. + {
  4149. +#ifdef CONFIG_ETRAX_SERIAL_PORT1
  4150. + E100_STRUCT_PORT(1,DTR), E100_STRUCT_SHADOW(1,DTR),
  4151. + E100_STRUCT_MASK(1,DTR),
  4152. + E100_STRUCT_MASK(1,RI),
  4153. + E100_STRUCT_MASK(1,DSR),
  4154. + E100_STRUCT_MASK(1,CD)
  4155. +#else
  4156. + CONTROL_PINS_PORT_NOT_USED(1)
  4157. +#endif
  4158. + },
  4159. +
  4160. + /* Ser 2 */
  4161. + {
  4162. +#ifdef CONFIG_ETRAX_SERIAL_PORT2
  4163. + E100_STRUCT_PORT(2,DTR), E100_STRUCT_SHADOW(2,DTR),
  4164. + E100_STRUCT_MASK(2,DTR),
  4165. + E100_STRUCT_MASK(2,RI),
  4166. + E100_STRUCT_MASK(2,DSR),
  4167. + E100_STRUCT_MASK(2,CD)
  4168. +#else
  4169. + CONTROL_PINS_PORT_NOT_USED(2)
  4170. +#endif
  4171. + },
  4172. +
  4173. + /* Ser 3 */
  4174. + {
  4175. +#ifdef CONFIG_ETRAX_SERIAL_PORT3
  4176. + E100_STRUCT_PORT(3,DTR), E100_STRUCT_SHADOW(3,DTR),
  4177. + E100_STRUCT_MASK(3,DTR),
  4178. + E100_STRUCT_MASK(3,RI),
  4179. + E100_STRUCT_MASK(3,DSR),
  4180. + E100_STRUCT_MASK(3,CD)
  4181. +#else
  4182. + CONTROL_PINS_PORT_NOT_USED(3)
  4183. +#endif
  4184. + }
  4185. +};
  4186. +#endif /* !CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED */
  4187. +
  4188. +#define E100_RTS_MASK 0x20
  4189. +#define E100_CTS_MASK 0x40
  4190. +
  4191. +/* All serial port signals are active low:
  4192. + * active = 0 -> 3.3V to RS-232 driver -> -12V on RS-232 level
  4193. + * inactive = 1 -> 0V to RS-232 driver -> +12V on RS-232 level
  4194. + *
  4195. + * These macros returns the pin value: 0=0V, >=1 = 3.3V on ETRAX chip
  4196. + */
  4197. +
  4198. +/* Output */
  4199. +#define E100_RTS_GET(info) ((info)->rx_ctrl & E100_RTS_MASK)
  4200. +/* Input */
  4201. +#define E100_CTS_GET(info) ((info)->ioport[REG_STATUS] & E100_CTS_MASK)
  4202. +
  4203. +/* These are typically PA or PB and 0 means 0V, 1 means 3.3V */
  4204. +/* Is an output */
  4205. +#define E100_DTR_GET(info) ((*e100_modem_pins[(info)->line].dtr_shadow) & e100_modem_pins[(info)->line].dtr_mask)
  4206. +
  4207. +/* Normally inputs */
  4208. +#define E100_RI_GET(info) ((*e100_modem_pins[(info)->line].ri_port) & e100_modem_pins[(info)->line].ri_mask)
  4209. +#define E100_CD_GET(info) ((*e100_modem_pins[(info)->line].cd_port) & e100_modem_pins[(info)->line].cd_mask)
  4210. +
  4211. +/* Input */
  4212. +#define E100_DSR_GET(info) ((*e100_modem_pins[(info)->line].dsr_port) & e100_modem_pins[(info)->line].dsr_mask)
  4213. +
  4214. +
  4215. +/*
  4216. + * tmp_buf is used as a temporary buffer by serial_write. We need to
  4217. + * lock it in case the memcpy_fromfs blocks while swapping in a page,
  4218. + * and some other program tries to do a serial write at the same time.
  4219. + * Since the lock will only come under contention when the system is
  4220. + * swapping and available memory is low, it makes sense to share one
  4221. + * buffer across all the serial ports, since it significantly saves
  4222. + * memory if large numbers of serial ports are open.
  4223. + */
  4224. +static unsigned char *tmp_buf;
  4225. +static DEFINE_MUTEX(tmp_buf_mutex);
  4226. +
  4227. +/* Calculate the chartime depending on baudrate, numbor of bits etc. */
  4228. +static void update_char_time(struct e100_serial * info)
  4229. +{
  4230. + tcflag_t cflags = info->port.tty->termios->c_cflag;
  4231. + int bits;
  4232. +
  4233. + /* calc. number of bits / data byte */
  4234. + /* databits + startbit and 1 stopbit */
  4235. + if ((cflags & CSIZE) == CS7)
  4236. + bits = 9;
  4237. + else
  4238. + bits = 10;
  4239. +
  4240. + if (cflags & CSTOPB) /* 2 stopbits ? */
  4241. + bits++;
  4242. +
  4243. + if (cflags & PARENB) /* parity bit ? */
  4244. + bits++;
  4245. +
  4246. + /* calc timeout */
  4247. + info->char_time_usec = ((bits * 1000000) / info->baud) + 1;
  4248. + info->flush_time_usec = 4*info->char_time_usec;
  4249. + if (info->flush_time_usec < MIN_FLUSH_TIME_USEC)
  4250. + info->flush_time_usec = MIN_FLUSH_TIME_USEC;
  4251. +
  4252. +}
  4253. +
  4254. +/*
  4255. + * This function maps from the Bxxxx defines in asm/termbits.h into real
  4256. + * baud rates.
  4257. + */
  4258. +
  4259. +static int
  4260. +cflag_to_baud(unsigned int cflag)
  4261. +{
  4262. + static int baud_table[] = {
  4263. + 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400,
  4264. + 4800, 9600, 19200, 38400 };
  4265. +
  4266. + static int ext_baud_table[] = {
  4267. + 0, 57600, 115200, 230400, 460800, 921600, 1843200, 6250000,
  4268. + 0, 0, 0, 0, 0, 0, 0, 0 };
  4269. +
  4270. + if (cflag & CBAUDEX)
  4271. + return ext_baud_table[(cflag & CBAUD) & ~CBAUDEX];
  4272. + else
  4273. + return baud_table[cflag & CBAUD];
  4274. +}
  4275. +
  4276. +/* and this maps to an etrax100 hardware baud constant */
  4277. +
  4278. +static unsigned char
  4279. +cflag_to_etrax_baud(unsigned int cflag)
  4280. +{
  4281. + char retval;
  4282. +
  4283. + static char baud_table[] = {
  4284. + -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, 4, 5, 6, 7 };
  4285. +
  4286. + static char ext_baud_table[] = {
  4287. + -1, 8, 9, 10, 11, 12, 13, 14, -1, -1, -1, -1, -1, -1, -1, -1 };
  4288. +
  4289. + if (cflag & CBAUDEX)
  4290. + retval = ext_baud_table[(cflag & CBAUD) & ~CBAUDEX];
  4291. + else
  4292. + retval = baud_table[cflag & CBAUD];
  4293. +
  4294. + if (retval < 0) {
  4295. + printk(KERN_WARNING "serdriver tried setting invalid baud rate, flags %x.\n", cflag);
  4296. + retval = 5; /* choose default 9600 instead */
  4297. + }
  4298. +
  4299. + return retval | (retval << 4); /* choose same for both TX and RX */
  4300. +}
  4301. +
  4302. +
  4303. +/* Various static support functions */
  4304. +
  4305. +/* Functions to set or clear DTR/RTS on the requested line */
  4306. +/* It is complicated by the fact that RTS is a serial port register, while
  4307. + * DTR might not be implemented in the HW at all, and if it is, it can be on
  4308. + * any general port.
  4309. + */
  4310. +
  4311. +
  4312. +static inline void
  4313. +e100_dtr(struct e100_serial *info, int set)
  4314. +{
  4315. +#ifndef CONFIG_SVINTO_SIM
  4316. + unsigned char mask = e100_modem_pins[info->line].dtr_mask;
  4317. +
  4318. +#ifdef SERIAL_DEBUG_IO
  4319. + printk("ser%i dtr %i mask: 0x%02X\n", info->line, set, mask);
  4320. + printk("ser%i shadow before 0x%02X get: %i\n",
  4321. + info->line, *e100_modem_pins[info->line].dtr_shadow,
  4322. + E100_DTR_GET(info));
  4323. +#endif
  4324. + /* DTR is active low */
  4325. + {
  4326. + unsigned long flags;
  4327. +
  4328. + local_irq_save(flags);
  4329. + *e100_modem_pins[info->line].dtr_shadow &= ~mask;
  4330. + *e100_modem_pins[info->line].dtr_shadow |= (set ? 0 : mask);
  4331. + *e100_modem_pins[info->line].dtr_port = *e100_modem_pins[info->line].dtr_shadow;
  4332. + local_irq_restore(flags);
  4333. + }
  4334. +
  4335. +#ifdef SERIAL_DEBUG_IO
  4336. + printk("ser%i shadow after 0x%02X get: %i\n",
  4337. + info->line, *e100_modem_pins[info->line].dtr_shadow,
  4338. + E100_DTR_GET(info));
  4339. +#endif
  4340. +#endif
  4341. +}
  4342. +
  4343. +/* set = 0 means 3.3V on the pin, bitvalue: 0=active, 1=inactive
  4344. + * 0=0V , 1=3.3V
  4345. + */
  4346. +static inline void
  4347. +e100_rts(struct e100_serial *info, int set)
  4348. +{
  4349. +#ifndef CONFIG_SVINTO_SIM
  4350. + unsigned long flags;
  4351. + local_irq_save(flags);
  4352. + info->rx_ctrl &= ~E100_RTS_MASK;
  4353. + info->rx_ctrl |= (set ? 0 : E100_RTS_MASK); /* RTS is active low */
  4354. + info->ioport[REG_REC_CTRL] = info->rx_ctrl;
  4355. + local_irq_restore(flags);
  4356. +#ifdef SERIAL_DEBUG_IO
  4357. + printk("ser%i rts %i\n", info->line, set);
  4358. +#endif
  4359. +#endif
  4360. +}
  4361. +
  4362. +
  4363. +/* If this behaves as a modem, RI and CD is an output */
  4364. +static inline void
  4365. +e100_ri_out(struct e100_serial *info, int set)
  4366. +{
  4367. +#ifndef CONFIG_SVINTO_SIM
  4368. + /* RI is active low */
  4369. + {
  4370. + unsigned char mask = e100_modem_pins[info->line].ri_mask;
  4371. + unsigned long flags;
  4372. +
  4373. + local_irq_save(flags);
  4374. + *e100_modem_pins[info->line].ri_shadow &= ~mask;
  4375. + *e100_modem_pins[info->line].ri_shadow |= (set ? 0 : mask);
  4376. + *e100_modem_pins[info->line].ri_port = *e100_modem_pins[info->line].ri_shadow;
  4377. + local_irq_restore(flags);
  4378. + }
  4379. +#endif
  4380. +}
  4381. +static inline void
  4382. +e100_cd_out(struct e100_serial *info, int set)
  4383. +{
  4384. +#ifndef CONFIG_SVINTO_SIM
  4385. + /* CD is active low */
  4386. + {
  4387. + unsigned char mask = e100_modem_pins[info->line].cd_mask;
  4388. + unsigned long flags;
  4389. +
  4390. + local_irq_save(flags);
  4391. + *e100_modem_pins[info->line].cd_shadow &= ~mask;
  4392. + *e100_modem_pins[info->line].cd_shadow |= (set ? 0 : mask);
  4393. + *e100_modem_pins[info->line].cd_port = *e100_modem_pins[info->line].cd_shadow;
  4394. + local_irq_restore(flags);
  4395. + }
  4396. +#endif
  4397. +}
  4398. +
  4399. +static inline void
  4400. +e100_disable_rx(struct e100_serial *info)
  4401. +{
  4402. +#ifndef CONFIG_SVINTO_SIM
  4403. + /* disable the receiver */
  4404. + info->ioport[REG_REC_CTRL] =
  4405. + (info->rx_ctrl &= ~IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
  4406. +#endif
  4407. +}
  4408. +
  4409. +static inline void
  4410. +e100_enable_rx(struct e100_serial *info)
  4411. +{
  4412. +#ifndef CONFIG_SVINTO_SIM
  4413. + /* enable the receiver */
  4414. + info->ioport[REG_REC_CTRL] =
  4415. + (info->rx_ctrl |= IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
  4416. +#endif
  4417. +}
  4418. +
  4419. +/* the rx DMA uses both the dma_descr and the dma_eop interrupts */
  4420. +
  4421. +static inline void
  4422. +e100_disable_rxdma_irq(struct e100_serial *info)
  4423. +{
  4424. +#ifdef SERIAL_DEBUG_INTR
  4425. + printk("rxdma_irq(%d): 0\n",info->line);
  4426. +#endif
  4427. + DINTR1(DEBUG_LOG(info->line,"IRQ disable_rxdma_irq %i\n", info->line));
  4428. + *R_IRQ_MASK2_CLR = (info->irq << 2) | (info->irq << 3);
  4429. +}
  4430. +
  4431. +static inline void
  4432. +e100_enable_rxdma_irq(struct e100_serial *info)
  4433. +{
  4434. +#ifdef SERIAL_DEBUG_INTR
  4435. + printk("rxdma_irq(%d): 1\n",info->line);
  4436. +#endif
  4437. + DINTR1(DEBUG_LOG(info->line,"IRQ enable_rxdma_irq %i\n", info->line));
  4438. + *R_IRQ_MASK2_SET = (info->irq << 2) | (info->irq << 3);
  4439. +}
  4440. +
  4441. +/* the tx DMA uses only dma_descr interrupt */
  4442. +
  4443. +static void e100_disable_txdma_irq(struct e100_serial *info)
  4444. +{
  4445. +#ifdef SERIAL_DEBUG_INTR
  4446. + printk("txdma_irq(%d): 0\n",info->line);
  4447. +#endif
  4448. + DINTR1(DEBUG_LOG(info->line,"IRQ disable_txdma_irq %i\n", info->line));
  4449. + *R_IRQ_MASK2_CLR = info->irq;
  4450. +}
  4451. +
  4452. +static void e100_enable_txdma_irq(struct e100_serial *info)
  4453. +{
  4454. +#ifdef SERIAL_DEBUG_INTR
  4455. + printk("txdma_irq(%d): 1\n",info->line);
  4456. +#endif
  4457. + DINTR1(DEBUG_LOG(info->line,"IRQ enable_txdma_irq %i\n", info->line));
  4458. + *R_IRQ_MASK2_SET = info->irq;
  4459. +}
  4460. +
  4461. +static void e100_disable_txdma_channel(struct e100_serial *info)
  4462. +{
  4463. + unsigned long flags;
  4464. +
  4465. + /* Disable output DMA channel for the serial port in question
  4466. + * ( set to something other than serialX)
  4467. + */
  4468. + local_irq_save(flags);
  4469. + DFLOW(DEBUG_LOG(info->line, "disable_txdma_channel %i\n", info->line));
  4470. + if (info->line == 0) {
  4471. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma6)) ==
  4472. + IO_STATE(R_GEN_CONFIG, dma6, serial0)) {
  4473. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma6);
  4474. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma6, unused);
  4475. + }
  4476. + } else if (info->line == 1) {
  4477. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma8)) ==
  4478. + IO_STATE(R_GEN_CONFIG, dma8, serial1)) {
  4479. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma8);
  4480. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma8, usb);
  4481. + }
  4482. + } else if (info->line == 2) {
  4483. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma2)) ==
  4484. + IO_STATE(R_GEN_CONFIG, dma2, serial2)) {
  4485. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma2);
  4486. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma2, par0);
  4487. + }
  4488. + } else if (info->line == 3) {
  4489. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma4)) ==
  4490. + IO_STATE(R_GEN_CONFIG, dma4, serial3)) {
  4491. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma4);
  4492. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, par1);
  4493. + }
  4494. + }
  4495. + *R_GEN_CONFIG = genconfig_shadow;
  4496. + local_irq_restore(flags);
  4497. +}
  4498. +
  4499. +
  4500. +static void e100_enable_txdma_channel(struct e100_serial *info)
  4501. +{
  4502. + unsigned long flags;
  4503. +
  4504. + local_irq_save(flags);
  4505. + DFLOW(DEBUG_LOG(info->line, "enable_txdma_channel %i\n", info->line));
  4506. + /* Enable output DMA channel for the serial port in question */
  4507. + if (info->line == 0) {
  4508. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma6);
  4509. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma6, serial0);
  4510. + } else if (info->line == 1) {
  4511. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma8);
  4512. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma8, serial1);
  4513. + } else if (info->line == 2) {
  4514. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma2);
  4515. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma2, serial2);
  4516. + } else if (info->line == 3) {
  4517. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma4);
  4518. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, serial3);
  4519. + }
  4520. + *R_GEN_CONFIG = genconfig_shadow;
  4521. + local_irq_restore(flags);
  4522. +}
  4523. +
  4524. +static void e100_disable_rxdma_channel(struct e100_serial *info)
  4525. +{
  4526. + unsigned long flags;
  4527. +
  4528. + /* Disable input DMA channel for the serial port in question
  4529. + * ( set to something other than serialX)
  4530. + */
  4531. + local_irq_save(flags);
  4532. + if (info->line == 0) {
  4533. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma7)) ==
  4534. + IO_STATE(R_GEN_CONFIG, dma7, serial0)) {
  4535. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7);
  4536. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma7, unused);
  4537. + }
  4538. + } else if (info->line == 1) {
  4539. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma9)) ==
  4540. + IO_STATE(R_GEN_CONFIG, dma9, serial1)) {
  4541. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma9);
  4542. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma9, usb);
  4543. + }
  4544. + } else if (info->line == 2) {
  4545. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma3)) ==
  4546. + IO_STATE(R_GEN_CONFIG, dma3, serial2)) {
  4547. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma3);
  4548. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma3, par0);
  4549. + }
  4550. + } else if (info->line == 3) {
  4551. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma5)) ==
  4552. + IO_STATE(R_GEN_CONFIG, dma5, serial3)) {
  4553. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma5);
  4554. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, par1);
  4555. + }
  4556. + }
  4557. + *R_GEN_CONFIG = genconfig_shadow;
  4558. + local_irq_restore(flags);
  4559. +}
  4560. +
  4561. +
  4562. +static void e100_enable_rxdma_channel(struct e100_serial *info)
  4563. +{
  4564. + unsigned long flags;
  4565. +
  4566. + local_irq_save(flags);
  4567. + /* Enable input DMA channel for the serial port in question */
  4568. + if (info->line == 0) {
  4569. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7);
  4570. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma7, serial0);
  4571. + } else if (info->line == 1) {
  4572. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma9);
  4573. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma9, serial1);
  4574. + } else if (info->line == 2) {
  4575. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma3);
  4576. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma3, serial2);
  4577. + } else if (info->line == 3) {
  4578. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma5);
  4579. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, serial3);
  4580. + }
  4581. + *R_GEN_CONFIG = genconfig_shadow;
  4582. + local_irq_restore(flags);
  4583. +}
  4584. +
  4585. +#ifdef SERIAL_HANDLE_EARLY_ERRORS
  4586. +/* in order to detect and fix errors on the first byte
  4587. + we have to use the serial interrupts as well. */
  4588. +
  4589. +static inline void
  4590. +e100_disable_serial_data_irq(struct e100_serial *info)
  4591. +{
  4592. +#ifdef SERIAL_DEBUG_INTR
  4593. + printk("ser_irq(%d): 0\n",info->line);
  4594. +#endif
  4595. + DINTR1(DEBUG_LOG(info->line,"IRQ disable data_irq %i\n", info->line));
  4596. + *R_IRQ_MASK1_CLR = (1U << (8+2*info->line));
  4597. +}
  4598. +
  4599. +static inline void
  4600. +e100_enable_serial_data_irq(struct e100_serial *info)
  4601. +{
  4602. +#ifdef SERIAL_DEBUG_INTR
  4603. + printk("ser_irq(%d): 1\n",info->line);
  4604. + printk("**** %d = %d\n",
  4605. + (8+2*info->line),
  4606. + (1U << (8+2*info->line)));
  4607. +#endif
  4608. + DINTR1(DEBUG_LOG(info->line,"IRQ enable data_irq %i\n", info->line));
  4609. + *R_IRQ_MASK1_SET = (1U << (8+2*info->line));
  4610. +}
  4611. +#endif
  4612. +
  4613. +static inline void
  4614. +e100_disable_serial_tx_ready_irq(struct e100_serial *info)
  4615. +{
  4616. +#ifdef SERIAL_DEBUG_INTR
  4617. + printk("ser_tx_irq(%d): 0\n",info->line);
  4618. +#endif
  4619. + DINTR1(DEBUG_LOG(info->line,"IRQ disable ready_irq %i\n", info->line));
  4620. + *R_IRQ_MASK1_CLR = (1U << (8+1+2*info->line));
  4621. +}
  4622. +
  4623. +static inline void
  4624. +e100_enable_serial_tx_ready_irq(struct e100_serial *info)
  4625. +{
  4626. +#ifdef SERIAL_DEBUG_INTR
  4627. + printk("ser_tx_irq(%d): 1\n",info->line);
  4628. + printk("**** %d = %d\n",
  4629. + (8+1+2*info->line),
  4630. + (1U << (8+1+2*info->line)));
  4631. +#endif
  4632. + DINTR2(DEBUG_LOG(info->line,"IRQ enable ready_irq %i\n", info->line));
  4633. + *R_IRQ_MASK1_SET = (1U << (8+1+2*info->line));
  4634. +}
  4635. +
  4636. +static inline void e100_enable_rx_irq(struct e100_serial *info)
  4637. +{
  4638. + if (info->uses_dma_in)
  4639. + e100_enable_rxdma_irq(info);
  4640. + else
  4641. + e100_enable_serial_data_irq(info);
  4642. +}
  4643. +static inline void e100_disable_rx_irq(struct e100_serial *info)
  4644. +{
  4645. + if (info->uses_dma_in)
  4646. + e100_disable_rxdma_irq(info);
  4647. + else
  4648. + e100_disable_serial_data_irq(info);
  4649. +}
  4650. +
  4651. +#if defined(CONFIG_ETRAX_RS485)
  4652. +/* Enable RS-485 mode on selected port. This is UGLY. */
  4653. +static int
  4654. +e100_enable_rs485(struct tty_struct *tty, struct serial_rs485 *r)
  4655. +{
  4656. + struct e100_serial * info = (struct e100_serial *)tty->driver_data;
  4657. +
  4658. +#if defined(CONFIG_ETRAX_RS485_ON_PA)
  4659. + *R_PORT_PA_DATA = port_pa_data_shadow |= (1 << rs485_pa_bit);
  4660. +#endif
  4661. +#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
  4662. + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
  4663. + rs485_port_g_bit, 1);
  4664. +#endif
  4665. +#if defined(CONFIG_ETRAX_RS485_LTC1387)
  4666. + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
  4667. + CONFIG_ETRAX_RS485_LTC1387_DXEN_PORT_G_BIT, 1);
  4668. + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
  4669. + CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 1);
  4670. +#endif
  4671. +
  4672. + info->rs485 = *r;
  4673. +
  4674. + /* Maximum delay before RTS equal to 1000 */
  4675. + if (info->rs485.delay_rts_before_send >= 1000)
  4676. + info->rs485.delay_rts_before_send = 1000;
  4677. +
  4678. +/* printk("rts: on send = %i, after = %i, enabled = %i",
  4679. + info->rs485.rts_on_send,
  4680. + info->rs485.rts_after_sent,
  4681. + info->rs485.enabled
  4682. + );
  4683. +*/
  4684. + return 0;
  4685. +}
  4686. +
  4687. +static int
  4688. +e100_write_rs485(struct tty_struct *tty,
  4689. + const unsigned char *buf, int count)
  4690. +{
  4691. + struct e100_serial * info = (struct e100_serial *)tty->driver_data;
  4692. + int old_value = (info->rs485.flags) & SER_RS485_ENABLED;
  4693. +
  4694. + /* rs485 is always implicitly enabled if we're using the ioctl()
  4695. + * but it doesn't have to be set in the serial_rs485
  4696. + * (to be backward compatible with old apps)
  4697. + * So we store, set and restore it.
  4698. + */
  4699. + info->rs485.flags |= SER_RS485_ENABLED;
  4700. + /* rs_write now deals with RS485 if enabled */
  4701. + count = rs_write(tty, buf, count);
  4702. + if (!old_value)
  4703. + info->rs485.flags &= ~(SER_RS485_ENABLED);
  4704. + return count;
  4705. +}
  4706. +
  4707. +#ifdef CONFIG_ETRAX_FAST_TIMER
  4708. +/* Timer function to toggle RTS when using FAST_TIMER */
  4709. +static void rs485_toggle_rts_timer_function(unsigned long data)
  4710. +{
  4711. + struct e100_serial *info = (struct e100_serial *)data;
  4712. +
  4713. + fast_timers_rs485[info->line].function = NULL;
  4714. + e100_rts(info, (info->rs485.flags & SER_RS485_RTS_AFTER_SEND));
  4715. +#if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER)
  4716. + e100_enable_rx(info);
  4717. + e100_enable_rx_irq(info);
  4718. +#endif
  4719. +}
  4720. +#endif
  4721. +#endif /* CONFIG_ETRAX_RS485 */
  4722. +
  4723. +/*
  4724. + * ------------------------------------------------------------
  4725. + * rs_stop() and rs_start()
  4726. + *
  4727. + * This routines are called before setting or resetting tty->stopped.
  4728. + * They enable or disable transmitter using the XOFF registers, as necessary.
  4729. + * ------------------------------------------------------------
  4730. + */
  4731. +
  4732. +static void
  4733. +rs_stop(struct tty_struct *tty)
  4734. +{
  4735. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  4736. + if (info) {
  4737. + unsigned long flags;
  4738. + unsigned long xoff;
  4739. +
  4740. + local_irq_save(flags);
  4741. + DFLOW(DEBUG_LOG(info->line, "XOFF rs_stop xmit %i\n",
  4742. + CIRC_CNT(info->xmit.head,
  4743. + info->xmit.tail,SERIAL_XMIT_SIZE)));
  4744. +
  4745. + xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char,
  4746. + STOP_CHAR(info->port.tty));
  4747. + xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, stop);
  4748. + if (tty->termios->c_iflag & IXON ) {
  4749. + xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
  4750. + }
  4751. +
  4752. + *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
  4753. + local_irq_restore(flags);
  4754. + }
  4755. +}
  4756. +
  4757. +static void
  4758. +rs_start(struct tty_struct *tty)
  4759. +{
  4760. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  4761. + if (info) {
  4762. + unsigned long flags;
  4763. + unsigned long xoff;
  4764. +
  4765. + local_irq_save(flags);
  4766. + DFLOW(DEBUG_LOG(info->line, "XOFF rs_start xmit %i\n",
  4767. + CIRC_CNT(info->xmit.head,
  4768. + info->xmit.tail,SERIAL_XMIT_SIZE)));
  4769. + xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(tty));
  4770. + xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable);
  4771. + if (tty->termios->c_iflag & IXON ) {
  4772. + xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
  4773. + }
  4774. +
  4775. + *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
  4776. + if (!info->uses_dma_out &&
  4777. + info->xmit.head != info->xmit.tail && info->xmit.buf)
  4778. + e100_enable_serial_tx_ready_irq(info);
  4779. +
  4780. + local_irq_restore(flags);
  4781. + }
  4782. +}
  4783. +
  4784. +/*
  4785. + * ----------------------------------------------------------------------
  4786. + *
  4787. + * Here starts the interrupt handling routines. All of the following
  4788. + * subroutines are declared as inline and are folded into
  4789. + * rs_interrupt(). They were separated out for readability's sake.
  4790. + *
  4791. + * Note: rs_interrupt() is a "fast" interrupt, which means that it
  4792. + * runs with interrupts turned off. People who may want to modify
  4793. + * rs_interrupt() should try to keep the interrupt handler as fast as
  4794. + * possible. After you are done making modifications, it is not a bad
  4795. + * idea to do:
  4796. + *
  4797. + * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c
  4798. + *
  4799. + * and look at the resulting assemble code in serial.s.
  4800. + *
  4801. + * - Ted Ts'o (tytso@mit.edu), 7-Mar-93
  4802. + * -----------------------------------------------------------------------
  4803. + */
  4804. +
  4805. +/*
  4806. + * This routine is used by the interrupt handler to schedule
  4807. + * processing in the software interrupt portion of the driver.
  4808. + */
  4809. +static void rs_sched_event(struct e100_serial *info, int event)
  4810. +{
  4811. + if (info->event & (1 << event))
  4812. + return;
  4813. + info->event |= 1 << event;
  4814. + schedule_work(&info->work);
  4815. +}
  4816. +
  4817. +/* The output DMA channel is free - use it to send as many chars as possible
  4818. + * NOTES:
  4819. + * We don't pay attention to info->x_char, which means if the TTY wants to
  4820. + * use XON/XOFF it will set info->x_char but we won't send any X char!
  4821. + *
  4822. + * To implement this, we'd just start a DMA send of 1 byte pointing at a
  4823. + * buffer containing the X char, and skip updating xmit. We'd also have to
  4824. + * check if the last sent char was the X char when we enter this function
  4825. + * the next time, to avoid updating xmit with the sent X value.
  4826. + */
  4827. +
  4828. +static void
  4829. +transmit_chars_dma(struct e100_serial *info)
  4830. +{
  4831. + unsigned int c, sentl;
  4832. + struct etrax_dma_descr *descr;
  4833. +
  4834. +#ifdef CONFIG_SVINTO_SIM
  4835. + /* This will output too little if tail is not 0 always since
  4836. + * we don't reloop to send the other part. Anyway this SHOULD be a
  4837. + * no-op - transmit_chars_dma would never really be called during sim
  4838. + * since rs_write does not write into the xmit buffer then.
  4839. + */
  4840. + if (info->xmit.tail)
  4841. + printk("Error in serial.c:transmit_chars-dma(), tail!=0\n");
  4842. + if (info->xmit.head != info->xmit.tail) {
  4843. + SIMCOUT(info->xmit.buf + info->xmit.tail,
  4844. + CIRC_CNT(info->xmit.head,
  4845. + info->xmit.tail,
  4846. + SERIAL_XMIT_SIZE));
  4847. + info->xmit.head = info->xmit.tail; /* move back head */
  4848. + info->tr_running = 0;
  4849. + }
  4850. + return;
  4851. +#endif
  4852. + /* acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */
  4853. + *info->oclrintradr =
  4854. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
  4855. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
  4856. +
  4857. +#ifdef SERIAL_DEBUG_INTR
  4858. + if (info->line == SERIAL_DEBUG_LINE)
  4859. + printk("tc\n");
  4860. +#endif
  4861. + if (!info->tr_running) {
  4862. + /* weirdo... we shouldn't get here! */
  4863. + printk(KERN_WARNING "Achtung: transmit_chars_dma with !tr_running\n");
  4864. + return;
  4865. + }
  4866. +
  4867. + descr = &info->tr_descr;
  4868. +
  4869. + /* first get the amount of bytes sent during the last DMA transfer,
  4870. + and update xmit accordingly */
  4871. +
  4872. + /* if the stop bit was not set, all data has been sent */
  4873. + if (!(descr->status & d_stop)) {
  4874. + sentl = descr->sw_len;
  4875. + } else
  4876. + /* otherwise we find the amount of data sent here */
  4877. + sentl = descr->hw_len;
  4878. +
  4879. + DFLOW(DEBUG_LOG(info->line, "TX %i done\n", sentl));
  4880. +
  4881. + /* update stats */
  4882. + info->icount.tx += sentl;
  4883. +
  4884. + /* update xmit buffer */
  4885. + info->xmit.tail = (info->xmit.tail + sentl) & (SERIAL_XMIT_SIZE - 1);
  4886. +
  4887. + /* if there is only a few chars left in the buf, wake up the blocked
  4888. + write if any */
  4889. + if (CIRC_CNT(info->xmit.head,
  4890. + info->xmit.tail,
  4891. + SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
  4892. + rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
  4893. +
  4894. + /* find out the largest amount of consecutive bytes we want to send now */
  4895. +
  4896. + c = CIRC_CNT_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
  4897. +
  4898. + /* Don't send all in one DMA transfer - divide it so we wake up
  4899. + * application before all is sent
  4900. + */
  4901. +
  4902. + if (c >= 4*WAKEUP_CHARS)
  4903. + c = c/2;
  4904. +
  4905. + if (c <= 0) {
  4906. + /* our job here is done, don't schedule any new DMA transfer */
  4907. + info->tr_running = 0;
  4908. +
  4909. +#if defined(CONFIG_ETRAX_RS485) && defined(CONFIG_ETRAX_FAST_TIMER)
  4910. + if (info->rs485.flags & SER_RS485_ENABLED) {
  4911. + /* Set a short timer to toggle RTS */
  4912. + start_one_shot_timer(&fast_timers_rs485[info->line],
  4913. + rs485_toggle_rts_timer_function,
  4914. + (unsigned long)info,
  4915. + info->char_time_usec*2,
  4916. + "RS-485");
  4917. + }
  4918. +#endif /* RS485 */
  4919. + return;
  4920. + }
  4921. +
  4922. + /* ok we can schedule a dma send of c chars starting at info->xmit.tail */
  4923. + /* set up the descriptor correctly for output */
  4924. + DFLOW(DEBUG_LOG(info->line, "TX %i\n", c));
  4925. + descr->ctrl = d_int | d_eol | d_wait; /* Wait needed for tty_wait_until_sent() */
  4926. + descr->sw_len = c;
  4927. + descr->buf = virt_to_phys(info->xmit.buf + info->xmit.tail);
  4928. + descr->status = 0;
  4929. +
  4930. + *info->ofirstadr = virt_to_phys(descr); /* write to R_DMAx_FIRST */
  4931. + *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, start);
  4932. +
  4933. + /* DMA is now running (hopefully) */
  4934. +} /* transmit_chars_dma */
  4935. +
  4936. +static void
  4937. +start_transmit(struct e100_serial *info)
  4938. +{
  4939. +#if 0
  4940. + if (info->line == SERIAL_DEBUG_LINE)
  4941. + printk("x\n");
  4942. +#endif
  4943. +
  4944. + info->tr_descr.sw_len = 0;
  4945. + info->tr_descr.hw_len = 0;
  4946. + info->tr_descr.status = 0;
  4947. + info->tr_running = 1;
  4948. + if (info->uses_dma_out)
  4949. + transmit_chars_dma(info);
  4950. + else
  4951. + e100_enable_serial_tx_ready_irq(info);
  4952. +} /* start_transmit */
  4953. +
  4954. +#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
  4955. +static int serial_fast_timer_started = 0;
  4956. +static int serial_fast_timer_expired = 0;
  4957. +static void flush_timeout_function(unsigned long data);
  4958. +#define START_FLUSH_FAST_TIMER_TIME(info, string, usec) {\
  4959. + unsigned long timer_flags; \
  4960. + local_irq_save(timer_flags); \
  4961. + if (fast_timers[info->line].function == NULL) { \
  4962. + serial_fast_timer_started++; \
  4963. + TIMERD(DEBUG_LOG(info->line, "start_timer %i ", info->line)); \
  4964. + TIMERD(DEBUG_LOG(info->line, "num started: %i\n", serial_fast_timer_started)); \
  4965. + start_one_shot_timer(&fast_timers[info->line], \
  4966. + flush_timeout_function, \
  4967. + (unsigned long)info, \
  4968. + (usec), \
  4969. + string); \
  4970. + } \
  4971. + else { \
  4972. + TIMERD(DEBUG_LOG(info->line, "timer %i already running\n", info->line)); \
  4973. + } \
  4974. + local_irq_restore(timer_flags); \
  4975. +}
  4976. +#define START_FLUSH_FAST_TIMER(info, string) START_FLUSH_FAST_TIMER_TIME(info, string, info->flush_time_usec)
  4977. +
  4978. +#else
  4979. +#define START_FLUSH_FAST_TIMER_TIME(info, string, usec)
  4980. +#define START_FLUSH_FAST_TIMER(info, string)
  4981. +#endif
  4982. +
  4983. +static struct etrax_recv_buffer *
  4984. +alloc_recv_buffer(unsigned int size)
  4985. +{
  4986. + struct etrax_recv_buffer *buffer;
  4987. +
  4988. + if (!(buffer = kmalloc(sizeof *buffer + size, GFP_ATOMIC)))
  4989. + return NULL;
  4990. +
  4991. + buffer->next = NULL;
  4992. + buffer->length = 0;
  4993. + buffer->error = TTY_NORMAL;
  4994. +
  4995. + return buffer;
  4996. +}
  4997. +
  4998. +static void
  4999. +append_recv_buffer(struct e100_serial *info, struct etrax_recv_buffer *buffer)
  5000. +{
  5001. + unsigned long flags;
  5002. +
  5003. + local_irq_save(flags);
  5004. +
  5005. + if (!info->first_recv_buffer)
  5006. + info->first_recv_buffer = buffer;
  5007. + else
  5008. + info->last_recv_buffer->next = buffer;
  5009. +
  5010. + info->last_recv_buffer = buffer;
  5011. +
  5012. + info->recv_cnt += buffer->length;
  5013. + if (info->recv_cnt > info->max_recv_cnt)
  5014. + info->max_recv_cnt = info->recv_cnt;
  5015. +
  5016. + local_irq_restore(flags);
  5017. +}
  5018. +
  5019. +static int
  5020. +add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char flag)
  5021. +{
  5022. + struct etrax_recv_buffer *buffer;
  5023. + if (info->uses_dma_in) {
  5024. + if (!(buffer = alloc_recv_buffer(4)))
  5025. + return 0;
  5026. +
  5027. + buffer->length = 1;
  5028. + buffer->error = flag;
  5029. + buffer->buffer[0] = data;
  5030. +
  5031. + append_recv_buffer(info, buffer);
  5032. +
  5033. + info->icount.rx++;
  5034. + } else {
  5035. + struct tty_struct *tty = info->port.tty;
  5036. + tty_insert_flip_char(tty, data, flag);
  5037. + info->icount.rx++;
  5038. + }
  5039. +
  5040. + return 1;
  5041. +}
  5042. +
  5043. +static unsigned int handle_descr_data(struct e100_serial *info,
  5044. + struct etrax_dma_descr *descr,
  5045. + unsigned int recvl)
  5046. +{
  5047. + struct etrax_recv_buffer *buffer = phys_to_virt(descr->buf) - sizeof *buffer;
  5048. +
  5049. + if (info->recv_cnt + recvl > 65536) {
  5050. + printk(KERN_CRIT
  5051. + "%s: Too much pending incoming serial data! Dropping %u bytes.\n", __func__, recvl);
  5052. + return 0;
  5053. + }
  5054. +
  5055. + buffer->length = recvl;
  5056. +
  5057. + if (info->errorcode == ERRCODE_SET_BREAK)
  5058. + buffer->error = TTY_BREAK;
  5059. + info->errorcode = 0;
  5060. +
  5061. + append_recv_buffer(info, buffer);
  5062. +
  5063. + if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
  5064. + panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
  5065. +
  5066. + descr->buf = virt_to_phys(buffer->buffer);
  5067. +
  5068. + return recvl;
  5069. +}
  5070. +
  5071. +static unsigned int handle_all_descr_data(struct e100_serial *info)
  5072. +{
  5073. + struct etrax_dma_descr *descr;
  5074. + unsigned int recvl;
  5075. + unsigned int ret = 0;
  5076. +
  5077. + while (1)
  5078. + {
  5079. + descr = &info->rec_descr[info->cur_rec_descr];
  5080. +
  5081. + if (descr == phys_to_virt(*info->idescradr))
  5082. + break;
  5083. +
  5084. + if (++info->cur_rec_descr == SERIAL_RECV_DESCRIPTORS)
  5085. + info->cur_rec_descr = 0;
  5086. +
  5087. + /* find out how many bytes were read */
  5088. +
  5089. + /* if the eop bit was not set, all data has been received */
  5090. + if (!(descr->status & d_eop)) {
  5091. + recvl = descr->sw_len;
  5092. + } else {
  5093. + /* otherwise we find the amount of data received here */
  5094. + recvl = descr->hw_len;
  5095. + }
  5096. +
  5097. + /* Reset the status information */
  5098. + descr->status = 0;
  5099. +
  5100. + DFLOW( DEBUG_LOG(info->line, "RX %lu\n", recvl);
  5101. + if (info->port.tty->stopped) {
  5102. + unsigned char *buf = phys_to_virt(descr->buf);
  5103. + DEBUG_LOG(info->line, "rx 0x%02X\n", buf[0]);
  5104. + DEBUG_LOG(info->line, "rx 0x%02X\n", buf[1]);
  5105. + DEBUG_LOG(info->line, "rx 0x%02X\n", buf[2]);
  5106. + }
  5107. + );
  5108. +
  5109. + /* update stats */
  5110. + info->icount.rx += recvl;
  5111. +
  5112. + ret += handle_descr_data(info, descr, recvl);
  5113. + }
  5114. +
  5115. + return ret;
  5116. +}
  5117. +
  5118. +static void receive_chars_dma(struct e100_serial *info)
  5119. +{
  5120. + struct tty_struct *tty;
  5121. + unsigned char rstat;
  5122. +
  5123. +#ifdef CONFIG_SVINTO_SIM
  5124. + /* No receive in the simulator. Will probably be when the rest of
  5125. + * the serial interface works, and this piece will just be removed.
  5126. + */
  5127. + return;
  5128. +#endif
  5129. +
  5130. + /* Acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */
  5131. + *info->iclrintradr =
  5132. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
  5133. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
  5134. +
  5135. + tty = info->port.tty;
  5136. + if (!tty) /* Something wrong... */
  5137. + return;
  5138. +
  5139. +#ifdef SERIAL_HANDLE_EARLY_ERRORS
  5140. + if (info->uses_dma_in)
  5141. + e100_enable_serial_data_irq(info);
  5142. +#endif
  5143. +
  5144. + if (info->errorcode == ERRCODE_INSERT_BREAK)
  5145. + add_char_and_flag(info, '\0', TTY_BREAK);
  5146. +
  5147. + handle_all_descr_data(info);
  5148. +
  5149. + /* Read the status register to detect errors */
  5150. + rstat = info->ioport[REG_STATUS];
  5151. + if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) {
  5152. + DFLOW(DEBUG_LOG(info->line, "XOFF detect stat %x\n", rstat));
  5153. + }
  5154. +
  5155. + if (rstat & SER_ERROR_MASK) {
  5156. + /* If we got an error, we must reset it by reading the
  5157. + * data_in field
  5158. + */
  5159. + unsigned char data = info->ioport[REG_DATA];
  5160. +
  5161. + PROCSTAT(ser_stat[info->line].errors_cnt++);
  5162. + DEBUG_LOG(info->line, "#dERR: s d 0x%04X\n",
  5163. + ((rstat & SER_ERROR_MASK) << 8) | data);
  5164. +
  5165. + if (rstat & SER_PAR_ERR_MASK)
  5166. + add_char_and_flag(info, data, TTY_PARITY);
  5167. + else if (rstat & SER_OVERRUN_MASK)
  5168. + add_char_and_flag(info, data, TTY_OVERRUN);
  5169. + else if (rstat & SER_FRAMING_ERR_MASK)
  5170. + add_char_and_flag(info, data, TTY_FRAME);
  5171. + }
  5172. +
  5173. + START_FLUSH_FAST_TIMER(info, "receive_chars");
  5174. +
  5175. + /* Restart the receiving DMA */
  5176. + *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart);
  5177. +}
  5178. +
  5179. +static int start_recv_dma(struct e100_serial *info)
  5180. +{
  5181. + struct etrax_dma_descr *descr = info->rec_descr;
  5182. + struct etrax_recv_buffer *buffer;
  5183. + int i;
  5184. +
  5185. + /* Set up the receiving descriptors */
  5186. + for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) {
  5187. + if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
  5188. + panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
  5189. +
  5190. + descr[i].ctrl = d_int;
  5191. + descr[i].buf = virt_to_phys(buffer->buffer);
  5192. + descr[i].sw_len = SERIAL_DESCR_BUF_SIZE;
  5193. + descr[i].hw_len = 0;
  5194. + descr[i].status = 0;
  5195. + descr[i].next = virt_to_phys(&descr[i+1]);
  5196. + }
  5197. +
  5198. + /* Link the last descriptor to the first */
  5199. + descr[i-1].next = virt_to_phys(&descr[0]);
  5200. +
  5201. + /* Start with the first descriptor in the list */
  5202. + info->cur_rec_descr = 0;
  5203. +
  5204. + /* Start the DMA */
  5205. + *info->ifirstadr = virt_to_phys(&descr[info->cur_rec_descr]);
  5206. + *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, start);
  5207. +
  5208. + /* Input DMA should be running now */
  5209. + return 1;
  5210. +}
  5211. +
  5212. +static void
  5213. +start_receive(struct e100_serial *info)
  5214. +{
  5215. +#ifdef CONFIG_SVINTO_SIM
  5216. + /* No receive in the simulator. Will probably be when the rest of
  5217. + * the serial interface works, and this piece will just be removed.
  5218. + */
  5219. + return;
  5220. +#endif
  5221. + if (info->uses_dma_in) {
  5222. + /* reset the input dma channel to be sure it works */
  5223. +
  5224. + *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
  5225. + while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->icmdadr) ==
  5226. + IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
  5227. +
  5228. + start_recv_dma(info);
  5229. + }
  5230. +}
  5231. +
  5232. +
  5233. +/* the bits in the MASK2 register are laid out like this:
  5234. + DMAI_EOP DMAI_DESCR DMAO_EOP DMAO_DESCR
  5235. + where I is the input channel and O is the output channel for the port.
  5236. + info->irq is the bit number for the DMAO_DESCR so to check the others we
  5237. + shift info->irq to the left.
  5238. +*/
  5239. +
  5240. +/* dma output channel interrupt handler
  5241. + this interrupt is called from DMA2(ser2), DMA4(ser3), DMA6(ser0) or
  5242. + DMA8(ser1) when they have finished a descriptor with the intr flag set.
  5243. +*/
  5244. +
  5245. +static irqreturn_t
  5246. +tr_interrupt(int irq, void *dev_id)
  5247. +{
  5248. + struct e100_serial *info;
  5249. + unsigned long ireg;
  5250. + int i;
  5251. + int handled = 0;
  5252. +
  5253. +#ifdef CONFIG_SVINTO_SIM
  5254. + /* No receive in the simulator. Will probably be when the rest of
  5255. + * the serial interface works, and this piece will just be removed.
  5256. + */
  5257. + {
  5258. + const char *s = "What? tr_interrupt in simulator??\n";
  5259. + SIMCOUT(s,strlen(s));
  5260. + }
  5261. + return IRQ_HANDLED;
  5262. +#endif
  5263. +
  5264. + /* find out the line that caused this irq and get it from rs_table */
  5265. +
  5266. + ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */
  5267. +
  5268. + for (i = 0; i < NR_PORTS; i++) {
  5269. + info = rs_table + i;
  5270. + if (!info->enabled || !info->uses_dma_out)
  5271. + continue;
  5272. + /* check for dma_descr (don't need to check for dma_eop in output dma for serial */
  5273. + if (ireg & info->irq) {
  5274. + handled = 1;
  5275. + /* we can send a new dma bunch. make it so. */
  5276. + DINTR2(DEBUG_LOG(info->line, "tr_interrupt %i\n", i));
  5277. + /* Read jiffies_usec first,
  5278. + * we want this time to be as late as possible
  5279. + */
  5280. + PROCSTAT(ser_stat[info->line].tx_dma_ints++);
  5281. + info->last_tx_active_usec = GET_JIFFIES_USEC();
  5282. + info->last_tx_active = jiffies;
  5283. + transmit_chars_dma(info);
  5284. + }
  5285. +
  5286. + /* FIXME: here we should really check for a change in the
  5287. + status lines and if so call status_handle(info) */
  5288. + }
  5289. + return IRQ_RETVAL(handled);
  5290. +} /* tr_interrupt */
  5291. +
  5292. +/* dma input channel interrupt handler */
  5293. +
  5294. +static irqreturn_t
  5295. +rec_interrupt(int irq, void *dev_id)
  5296. +{
  5297. + struct e100_serial *info;
  5298. + unsigned long ireg;
  5299. + int i;
  5300. + int handled = 0;
  5301. +
  5302. +#ifdef CONFIG_SVINTO_SIM
  5303. + /* No receive in the simulator. Will probably be when the rest of
  5304. + * the serial interface works, and this piece will just be removed.
  5305. + */
  5306. + {
  5307. + const char *s = "What? rec_interrupt in simulator??\n";
  5308. + SIMCOUT(s,strlen(s));
  5309. + }
  5310. + return IRQ_HANDLED;
  5311. +#endif
  5312. +
  5313. + /* find out the line that caused this irq and get it from rs_table */
  5314. +
  5315. + ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */
  5316. +
  5317. + for (i = 0; i < NR_PORTS; i++) {
  5318. + info = rs_table + i;
  5319. + if (!info->enabled || !info->uses_dma_in)
  5320. + continue;
  5321. + /* check for both dma_eop and dma_descr for the input dma channel */
  5322. + if (ireg & ((info->irq << 2) | (info->irq << 3))) {
  5323. + handled = 1;
  5324. + /* we have received something */
  5325. + receive_chars_dma(info);
  5326. + }
  5327. +
  5328. + /* FIXME: here we should really check for a change in the
  5329. + status lines and if so call status_handle(info) */
  5330. + }
  5331. + return IRQ_RETVAL(handled);
  5332. +} /* rec_interrupt */
  5333. +
  5334. +static int force_eop_if_needed(struct e100_serial *info)
  5335. +{
  5336. + /* We check data_avail bit to determine if data has
  5337. + * arrived since last time
  5338. + */
  5339. + unsigned char rstat = info->ioport[REG_STATUS];
  5340. +
  5341. + /* error or datavail? */
  5342. + if (rstat & SER_ERROR_MASK) {
  5343. + /* Some error has occurred. If there has been valid data, an
  5344. + * EOP interrupt will be made automatically. If no data, the
  5345. + * normal ser_interrupt should be enabled and handle it.
  5346. + * So do nothing!
  5347. + */
  5348. + DEBUG_LOG(info->line, "timeout err: rstat 0x%03X\n",
  5349. + rstat | (info->line << 8));
  5350. + return 0;
  5351. + }
  5352. +
  5353. + if (rstat & SER_DATA_AVAIL_MASK) {
  5354. + /* Ok data, no error, count it */
  5355. + TIMERD(DEBUG_LOG(info->line, "timeout: rstat 0x%03X\n",
  5356. + rstat | (info->line << 8)));
  5357. + /* Read data to clear status flags */
  5358. + (void)info->ioport[REG_DATA];
  5359. +
  5360. + info->forced_eop = 0;
  5361. + START_FLUSH_FAST_TIMER(info, "magic");
  5362. + return 0;
  5363. + }
  5364. +
  5365. + /* hit the timeout, force an EOP for the input
  5366. + * dma channel if we haven't already
  5367. + */
  5368. + if (!info->forced_eop) {
  5369. + info->forced_eop = 1;
  5370. + PROCSTAT(ser_stat[info->line].timeout_flush_cnt++);
  5371. + TIMERD(DEBUG_LOG(info->line, "timeout EOP %i\n", info->line));
  5372. + FORCE_EOP(info);
  5373. + }
  5374. +
  5375. + return 1;
  5376. +}
  5377. +
  5378. +static void flush_to_flip_buffer(struct e100_serial *info)
  5379. +{
  5380. + struct tty_struct *tty;
  5381. + struct etrax_recv_buffer *buffer;
  5382. + unsigned long flags;
  5383. +
  5384. + local_irq_save(flags);
  5385. + tty = info->port.tty;
  5386. +
  5387. + if (!tty) {
  5388. + local_irq_restore(flags);
  5389. + return;
  5390. + }
  5391. +
  5392. + while ((buffer = info->first_recv_buffer) != NULL) {
  5393. + unsigned int count = buffer->length;
  5394. +
  5395. + tty_insert_flip_string(tty, buffer->buffer, count);
  5396. + info->recv_cnt -= count;
  5397. +
  5398. + if (count == buffer->length) {
  5399. + info->first_recv_buffer = buffer->next;
  5400. + kfree(buffer);
  5401. + } else {
  5402. + buffer->length -= count;
  5403. + memmove(buffer->buffer, buffer->buffer + count, buffer->length);
  5404. + buffer->error = TTY_NORMAL;
  5405. + }
  5406. + }
  5407. +
  5408. + if (!info->first_recv_buffer)
  5409. + info->last_recv_buffer = NULL;
  5410. +
  5411. + local_irq_restore(flags);
  5412. +
  5413. + /* This includes a check for low-latency */
  5414. + tty_flip_buffer_push(tty);
  5415. +}
  5416. +
  5417. +static void check_flush_timeout(struct e100_serial *info)
  5418. +{
  5419. + /* Flip what we've got (if we can) */
  5420. + flush_to_flip_buffer(info);
  5421. +
  5422. + /* We might need to flip later, but not to fast
  5423. + * since the system is busy processing input... */
  5424. + if (info->first_recv_buffer)
  5425. + START_FLUSH_FAST_TIMER_TIME(info, "flip", 2000);
  5426. +
  5427. + /* Force eop last, since data might have come while we're processing
  5428. + * and if we started the slow timer above, we won't start a fast
  5429. + * below.
  5430. + */
  5431. + force_eop_if_needed(info);
  5432. +}
  5433. +
  5434. +#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
  5435. +static void flush_timeout_function(unsigned long data)
  5436. +{
  5437. + struct e100_serial *info = (struct e100_serial *)data;
  5438. +
  5439. + fast_timers[info->line].function = NULL;
  5440. + serial_fast_timer_expired++;
  5441. + TIMERD(DEBUG_LOG(info->line, "flush_timout %i ", info->line));
  5442. + TIMERD(DEBUG_LOG(info->line, "num expired: %i\n", serial_fast_timer_expired));
  5443. + check_flush_timeout(info);
  5444. +}
  5445. +
  5446. +#else
  5447. +
  5448. +/* dma fifo/buffer timeout handler
  5449. + forces an end-of-packet for the dma input channel if no chars
  5450. + have been received for CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS/100 s.
  5451. +*/
  5452. +
  5453. +static struct timer_list flush_timer;
  5454. +
  5455. +static void
  5456. +timed_flush_handler(unsigned long ptr)
  5457. +{
  5458. + struct e100_serial *info;
  5459. + int i;
  5460. +
  5461. +#ifdef CONFIG_SVINTO_SIM
  5462. + return;
  5463. +#endif
  5464. +
  5465. + for (i = 0; i < NR_PORTS; i++) {
  5466. + info = rs_table + i;
  5467. + if (info->uses_dma_in)
  5468. + check_flush_timeout(info);
  5469. + }
  5470. +
  5471. + /* restart flush timer */
  5472. + mod_timer(&flush_timer, jiffies + CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS);
  5473. +}
  5474. +#endif
  5475. +
  5476. +#ifdef SERIAL_HANDLE_EARLY_ERRORS
  5477. +
  5478. +/* If there is an error (ie break) when the DMA is running and
  5479. + * there are no bytes in the fifo the DMA is stopped and we get no
  5480. + * eop interrupt. Thus we have to monitor the first bytes on a DMA
  5481. + * transfer, and if it is without error we can turn the serial
  5482. + * interrupts off.
  5483. + */
  5484. +
  5485. +/*
  5486. +BREAK handling on ETRAX 100:
  5487. +ETRAX will generate interrupt although there is no stop bit between the
  5488. +characters.
  5489. +
  5490. +Depending on how long the break sequence is, the end of the breaksequence
  5491. +will look differently:
  5492. +| indicates start/end of a character.
  5493. +
  5494. +B= Break character (0x00) with framing error.
  5495. +E= Error byte with parity error received after B characters.
  5496. +F= "Faked" valid byte received immediately after B characters.
  5497. +V= Valid byte
  5498. +
  5499. +1.
  5500. + B BL ___________________________ V
  5501. +.._|__________|__________| |valid data |
  5502. +
  5503. +Multiple frame errors with data == 0x00 (B),
  5504. +the timing matches up "perfectly" so no extra ending char is detected.
  5505. +The RXD pin is 1 in the last interrupt, in that case
  5506. +we set info->errorcode = ERRCODE_INSERT_BREAK, but we can't really
  5507. +know if another byte will come and this really is case 2. below
  5508. +(e.g F=0xFF or 0xFE)
  5509. +If RXD pin is 0 we can expect another character (see 2. below).
  5510. +
  5511. +
  5512. +2.
  5513. +
  5514. + B B E or F__________________..__ V
  5515. +.._|__________|__________|______ | |valid data
  5516. + "valid" or
  5517. + parity error
  5518. +
  5519. +Multiple frame errors with data == 0x00 (B),
  5520. +but the part of the break trigs is interpreted as a start bit (and possibly
  5521. +some 0 bits followed by a number of 1 bits and a stop bit).
  5522. +Depending on parity settings etc. this last character can be either
  5523. +a fake "valid" char (F) or have a parity error (E).
  5524. +
  5525. +If the character is valid it will be put in the buffer,
  5526. +we set info->errorcode = ERRCODE_SET_BREAK so the receive interrupt
  5527. +will set the flags so the tty will handle it,
  5528. +if it's an error byte it will not be put in the buffer
  5529. +and we set info->errorcode = ERRCODE_INSERT_BREAK.
  5530. +
  5531. +To distinguish a V byte in 1. from an F byte in 2. we keep a timestamp
  5532. +of the last faulty char (B) and compares it with the current time:
  5533. +If the time elapsed time is less then 2*char_time_usec we will assume
  5534. +it's a faked F char and not a Valid char and set
  5535. +info->errorcode = ERRCODE_SET_BREAK.
  5536. +
  5537. +Flaws in the above solution:
  5538. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  5539. +We use the timer to distinguish a F character from a V character,
  5540. +if a V character is to close after the break we might make the wrong decision.
  5541. +
  5542. +TODO: The break will be delayed until an F or V character is received.
  5543. +
  5544. +*/
  5545. +
  5546. +static
  5547. +struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info)
  5548. +{
  5549. + unsigned long data_read;
  5550. + struct tty_struct *tty = info->port.tty;
  5551. +
  5552. + if (!tty) {
  5553. + printk("!NO TTY!\n");
  5554. + return info;
  5555. + }
  5556. +
  5557. + /* Read data and status at the same time */
  5558. + data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]);
  5559. +more_data:
  5560. + if (data_read & IO_MASK(R_SERIAL0_READ, xoff_detect) ) {
  5561. + DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0));
  5562. + }
  5563. + DINTR2(DEBUG_LOG(info->line, "ser_rx %c\n", IO_EXTRACT(R_SERIAL0_READ, data_in, data_read)));
  5564. +
  5565. + if (data_read & ( IO_MASK(R_SERIAL0_READ, framing_err) |
  5566. + IO_MASK(R_SERIAL0_READ, par_err) |
  5567. + IO_MASK(R_SERIAL0_READ, overrun) )) {
  5568. + /* An error */
  5569. + info->last_rx_active_usec = GET_JIFFIES_USEC();
  5570. + info->last_rx_active = jiffies;
  5571. + DINTR1(DEBUG_LOG(info->line, "ser_rx err stat_data %04X\n", data_read));
  5572. + DLOG_INT_TRIG(
  5573. + if (!log_int_trig1_pos) {
  5574. + log_int_trig1_pos = log_int_pos;
  5575. + log_int(rdpc(), 0, 0);
  5576. + }
  5577. + );
  5578. +
  5579. +
  5580. + if ( ((data_read & IO_MASK(R_SERIAL0_READ, data_in)) == 0) &&
  5581. + (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) ) {
  5582. + /* Most likely a break, but we get interrupts over and
  5583. + * over again.
  5584. + */
  5585. +
  5586. + if (!info->break_detected_cnt) {
  5587. + DEBUG_LOG(info->line, "#BRK start\n", 0);
  5588. + }
  5589. + if (data_read & IO_MASK(R_SERIAL0_READ, rxd)) {
  5590. + /* The RX pin is high now, so the break
  5591. + * must be over, but....
  5592. + * we can't really know if we will get another
  5593. + * last byte ending the break or not.
  5594. + * And we don't know if the byte (if any) will
  5595. + * have an error or look valid.
  5596. + */
  5597. + DEBUG_LOG(info->line, "# BL BRK\n", 0);
  5598. + info->errorcode = ERRCODE_INSERT_BREAK;
  5599. + }
  5600. + info->break_detected_cnt++;
  5601. + } else {
  5602. + /* The error does not look like a break, but could be
  5603. + * the end of one
  5604. + */
  5605. + if (info->break_detected_cnt) {
  5606. + DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt);
  5607. + info->errorcode = ERRCODE_INSERT_BREAK;
  5608. + } else {
  5609. + unsigned char data = IO_EXTRACT(R_SERIAL0_READ,
  5610. + data_in, data_read);
  5611. + char flag = TTY_NORMAL;
  5612. + if (info->errorcode == ERRCODE_INSERT_BREAK) {
  5613. + struct tty_struct *tty = info->port.tty;
  5614. + tty_insert_flip_char(tty, 0, flag);
  5615. + info->icount.rx++;
  5616. + }
  5617. +
  5618. + if (data_read & IO_MASK(R_SERIAL0_READ, par_err)) {
  5619. + info->icount.parity++;
  5620. + flag = TTY_PARITY;
  5621. + } else if (data_read & IO_MASK(R_SERIAL0_READ, overrun)) {
  5622. + info->icount.overrun++;
  5623. + flag = TTY_OVERRUN;
  5624. + } else if (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) {
  5625. + info->icount.frame++;
  5626. + flag = TTY_FRAME;
  5627. + }
  5628. + tty_insert_flip_char(tty, data, flag);
  5629. + info->errorcode = 0;
  5630. + }
  5631. + info->break_detected_cnt = 0;
  5632. + }
  5633. + } else if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) {
  5634. + /* No error */
  5635. + DLOG_INT_TRIG(
  5636. + if (!log_int_trig1_pos) {
  5637. + if (log_int_pos >= log_int_size) {
  5638. + log_int_pos = 0;
  5639. + }
  5640. + log_int_trig0_pos = log_int_pos;
  5641. + log_int(rdpc(), 0, 0);
  5642. + }
  5643. + );
  5644. + tty_insert_flip_char(tty,
  5645. + IO_EXTRACT(R_SERIAL0_READ, data_in, data_read),
  5646. + TTY_NORMAL);
  5647. + } else {
  5648. + DEBUG_LOG(info->line, "ser_rx int but no data_avail %08lX\n", data_read);
  5649. + }
  5650. +
  5651. +
  5652. + info->icount.rx++;
  5653. + data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]);
  5654. + if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) {
  5655. + DEBUG_LOG(info->line, "ser_rx %c in loop\n", IO_EXTRACT(R_SERIAL0_READ, data_in, data_read));
  5656. + goto more_data;
  5657. + }
  5658. +
  5659. + tty_flip_buffer_push(info->port.tty);
  5660. + return info;
  5661. +}
  5662. +
  5663. +static struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info)
  5664. +{
  5665. + unsigned char rstat;
  5666. +
  5667. +#ifdef SERIAL_DEBUG_INTR
  5668. + printk("Interrupt from serport %d\n", i);
  5669. +#endif
  5670. +/* DEBUG_LOG(info->line, "ser_interrupt stat %03X\n", rstat | (i << 8)); */
  5671. + if (!info->uses_dma_in) {
  5672. + return handle_ser_rx_interrupt_no_dma(info);
  5673. + }
  5674. + /* DMA is used */
  5675. + rstat = info->ioport[REG_STATUS];
  5676. + if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) {
  5677. + DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0));
  5678. + }
  5679. +
  5680. + if (rstat & SER_ERROR_MASK) {
  5681. + unsigned char data;
  5682. +
  5683. + info->last_rx_active_usec = GET_JIFFIES_USEC();
  5684. + info->last_rx_active = jiffies;
  5685. + /* If we got an error, we must reset it by reading the
  5686. + * data_in field
  5687. + */
  5688. + data = info->ioport[REG_DATA];
  5689. + DINTR1(DEBUG_LOG(info->line, "ser_rx! %c\n", data));
  5690. + DINTR1(DEBUG_LOG(info->line, "ser_rx err stat %02X\n", rstat));
  5691. + if (!data && (rstat & SER_FRAMING_ERR_MASK)) {
  5692. + /* Most likely a break, but we get interrupts over and
  5693. + * over again.
  5694. + */
  5695. +
  5696. + if (!info->break_detected_cnt) {
  5697. + DEBUG_LOG(info->line, "#BRK start\n", 0);
  5698. + }
  5699. + if (rstat & SER_RXD_MASK) {
  5700. + /* The RX pin is high now, so the break
  5701. + * must be over, but....
  5702. + * we can't really know if we will get another
  5703. + * last byte ending the break or not.
  5704. + * And we don't know if the byte (if any) will
  5705. + * have an error or look valid.
  5706. + */
  5707. + DEBUG_LOG(info->line, "# BL BRK\n", 0);
  5708. + info->errorcode = ERRCODE_INSERT_BREAK;
  5709. + }
  5710. + info->break_detected_cnt++;
  5711. + } else {
  5712. + /* The error does not look like a break, but could be
  5713. + * the end of one
  5714. + */
  5715. + if (info->break_detected_cnt) {
  5716. + DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt);
  5717. + info->errorcode = ERRCODE_INSERT_BREAK;
  5718. + } else {
  5719. + if (info->errorcode == ERRCODE_INSERT_BREAK) {
  5720. + info->icount.brk++;
  5721. + add_char_and_flag(info, '\0', TTY_BREAK);
  5722. + }
  5723. +
  5724. + if (rstat & SER_PAR_ERR_MASK) {
  5725. + info->icount.parity++;
  5726. + add_char_and_flag(info, data, TTY_PARITY);
  5727. + } else if (rstat & SER_OVERRUN_MASK) {
  5728. + info->icount.overrun++;
  5729. + add_char_and_flag(info, data, TTY_OVERRUN);
  5730. + } else if (rstat & SER_FRAMING_ERR_MASK) {
  5731. + info->icount.frame++;
  5732. + add_char_and_flag(info, data, TTY_FRAME);
  5733. + }
  5734. +
  5735. + info->errorcode = 0;
  5736. + }
  5737. + info->break_detected_cnt = 0;
  5738. + DEBUG_LOG(info->line, "#iERR s d %04X\n",
  5739. + ((rstat & SER_ERROR_MASK) << 8) | data);
  5740. + }
  5741. + PROCSTAT(ser_stat[info->line].early_errors_cnt++);
  5742. + } else { /* It was a valid byte, now let the DMA do the rest */
  5743. + unsigned long curr_time_u = GET_JIFFIES_USEC();
  5744. + unsigned long curr_time = jiffies;
  5745. +
  5746. + if (info->break_detected_cnt) {
  5747. + /* Detect if this character is a new valid char or the
  5748. + * last char in a break sequence: If LSBits are 0 and
  5749. + * MSBits are high AND the time is close to the
  5750. + * previous interrupt we should discard it.
  5751. + */
  5752. + long elapsed_usec =
  5753. + (curr_time - info->last_rx_active) * (1000000/HZ) +
  5754. + curr_time_u - info->last_rx_active_usec;
  5755. + if (elapsed_usec < 2*info->char_time_usec) {
  5756. + DEBUG_LOG(info->line, "FBRK %i\n", info->line);
  5757. + /* Report as BREAK (error) and let
  5758. + * receive_chars_dma() handle it
  5759. + */
  5760. + info->errorcode = ERRCODE_SET_BREAK;
  5761. + } else {
  5762. + DEBUG_LOG(info->line, "Not end of BRK (V)%i\n", info->line);
  5763. + }
  5764. + DEBUG_LOG(info->line, "num brk %i\n", info->break_detected_cnt);
  5765. + }
  5766. +
  5767. +#ifdef SERIAL_DEBUG_INTR
  5768. + printk("** OK, disabling ser_interrupts\n");
  5769. +#endif
  5770. + e100_disable_serial_data_irq(info);
  5771. + DINTR2(DEBUG_LOG(info->line, "ser_rx OK %d\n", info->line));
  5772. + info->break_detected_cnt = 0;
  5773. +
  5774. + PROCSTAT(ser_stat[info->line].ser_ints_ok_cnt++);
  5775. + }
  5776. + /* Restarting the DMA never hurts */
  5777. + *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart);
  5778. + START_FLUSH_FAST_TIMER(info, "ser_int");
  5779. + return info;
  5780. +} /* handle_ser_rx_interrupt */
  5781. +
  5782. +static void handle_ser_tx_interrupt(struct e100_serial *info)
  5783. +{
  5784. + unsigned long flags;
  5785. +
  5786. + if (info->x_char) {
  5787. + unsigned char rstat;
  5788. + DFLOW(DEBUG_LOG(info->line, "tx_int: xchar 0x%02X\n", info->x_char));
  5789. + local_irq_save(flags);
  5790. + rstat = info->ioport[REG_STATUS];
  5791. + DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
  5792. +
  5793. + info->ioport[REG_TR_DATA] = info->x_char;
  5794. + info->icount.tx++;
  5795. + info->x_char = 0;
  5796. + /* We must enable since it is disabled in ser_interrupt */
  5797. + e100_enable_serial_tx_ready_irq(info);
  5798. + local_irq_restore(flags);
  5799. + return;
  5800. + }
  5801. + if (info->uses_dma_out) {
  5802. + unsigned char rstat;
  5803. + int i;
  5804. + /* We only use normal tx interrupt when sending x_char */
  5805. + DFLOW(DEBUG_LOG(info->line, "tx_int: xchar sent\n", 0));
  5806. + local_irq_save(flags);
  5807. + rstat = info->ioport[REG_STATUS];
  5808. + DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
  5809. + e100_disable_serial_tx_ready_irq(info);
  5810. + if (info->port.tty->stopped)
  5811. + rs_stop(info->port.tty);
  5812. + /* Enable the DMA channel and tell it to continue */
  5813. + e100_enable_txdma_channel(info);
  5814. + /* Wait 12 cycles before doing the DMA command */
  5815. + for(i = 6; i > 0; i--)
  5816. + nop();
  5817. +
  5818. + *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, continue);
  5819. + local_irq_restore(flags);
  5820. + return;
  5821. + }
  5822. + /* Normal char-by-char interrupt */
  5823. + if (info->xmit.head == info->xmit.tail
  5824. + || info->port.tty->stopped
  5825. + || info->port.tty->hw_stopped) {
  5826. + DFLOW(DEBUG_LOG(info->line, "tx_int: stopped %i\n",
  5827. + info->port.tty->stopped));
  5828. + e100_disable_serial_tx_ready_irq(info);
  5829. + info->tr_running = 0;
  5830. + return;
  5831. + }
  5832. + DINTR2(DEBUG_LOG(info->line, "tx_int %c\n", info->xmit.buf[info->xmit.tail]));
  5833. + /* Send a byte, rs485 timing is critical so turn of ints */
  5834. + local_irq_save(flags);
  5835. + info->ioport[REG_TR_DATA] = info->xmit.buf[info->xmit.tail];
  5836. + info->xmit.tail = (info->xmit.tail + 1) & (SERIAL_XMIT_SIZE-1);
  5837. + info->icount.tx++;
  5838. + if (info->xmit.head == info->xmit.tail) {
  5839. +#if defined(CONFIG_ETRAX_RS485) && defined(CONFIG_ETRAX_FAST_TIMER)
  5840. + if (info->rs485.flags & SER_RS485_ENABLED) {
  5841. + /* Set a short timer to toggle RTS */
  5842. + start_one_shot_timer(&fast_timers_rs485[info->line],
  5843. + rs485_toggle_rts_timer_function,
  5844. + (unsigned long)info,
  5845. + info->char_time_usec*2,
  5846. + "RS-485");
  5847. + }
  5848. +#endif /* RS485 */
  5849. + info->last_tx_active_usec = GET_JIFFIES_USEC();
  5850. + info->last_tx_active = jiffies;
  5851. + e100_disable_serial_tx_ready_irq(info);
  5852. + info->tr_running = 0;
  5853. + DFLOW(DEBUG_LOG(info->line, "tx_int: stop2\n", 0));
  5854. + } else {
  5855. + /* We must enable since it is disabled in ser_interrupt */
  5856. + e100_enable_serial_tx_ready_irq(info);
  5857. + }
  5858. + local_irq_restore(flags);
  5859. +
  5860. + if (CIRC_CNT(info->xmit.head,
  5861. + info->xmit.tail,
  5862. + SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
  5863. + rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
  5864. +
  5865. +} /* handle_ser_tx_interrupt */
  5866. +
  5867. +/* result of time measurements:
  5868. + * RX duration 54-60 us when doing something, otherwise 6-9 us
  5869. + * ser_int duration: just sending: 8-15 us normally, up to 73 us
  5870. + */
  5871. +static irqreturn_t
  5872. +ser_interrupt(int irq, void *dev_id)
  5873. +{
  5874. + static volatile int tx_started = 0;
  5875. + struct e100_serial *info;
  5876. + int i;
  5877. + unsigned long flags;
  5878. + unsigned long irq_mask1_rd;
  5879. + unsigned long data_mask = (1 << (8+2*0)); /* ser0 data_avail */
  5880. + int handled = 0;
  5881. + static volatile unsigned long reentered_ready_mask = 0;
  5882. +
  5883. + local_irq_save(flags);
  5884. + irq_mask1_rd = *R_IRQ_MASK1_RD;
  5885. + /* First handle all rx interrupts with ints disabled */
  5886. + info = rs_table;
  5887. + irq_mask1_rd &= e100_ser_int_mask;
  5888. + for (i = 0; i < NR_PORTS; i++) {
  5889. + /* Which line caused the data irq? */
  5890. + if (irq_mask1_rd & data_mask) {
  5891. + handled = 1;
  5892. + handle_ser_rx_interrupt(info);
  5893. + }
  5894. + info += 1;
  5895. + data_mask <<= 2;
  5896. + }
  5897. + /* Handle tx interrupts with interrupts enabled so we
  5898. + * can take care of new data interrupts while transmitting
  5899. + * We protect the tx part with the tx_started flag.
  5900. + * We disable the tr_ready interrupts we are about to handle and
  5901. + * unblock the serial interrupt so new serial interrupts may come.
  5902. + *
  5903. + * If we get a new interrupt:
  5904. + * - it migth be due to synchronous serial ports.
  5905. + * - serial irq will be blocked by general irq handler.
  5906. + * - async data will be handled above (sync will be ignored).
  5907. + * - tx_started flag will prevent us from trying to send again and
  5908. + * we will exit fast - no need to unblock serial irq.
  5909. + * - Next (sync) serial interrupt handler will be runned with
  5910. + * disabled interrupt due to restore_flags() at end of function,
  5911. + * so sync handler will not be preempted or reentered.
  5912. + */
  5913. + if (!tx_started) {
  5914. + unsigned long ready_mask;
  5915. + unsigned long
  5916. + tx_started = 1;
  5917. + /* Only the tr_ready interrupts left */
  5918. + irq_mask1_rd &= (IO_MASK(R_IRQ_MASK1_RD, ser0_ready) |
  5919. + IO_MASK(R_IRQ_MASK1_RD, ser1_ready) |
  5920. + IO_MASK(R_IRQ_MASK1_RD, ser2_ready) |
  5921. + IO_MASK(R_IRQ_MASK1_RD, ser3_ready));
  5922. + while (irq_mask1_rd) {
  5923. + /* Disable those we are about to handle */
  5924. + *R_IRQ_MASK1_CLR = irq_mask1_rd;
  5925. + /* Unblock the serial interrupt */
  5926. + *R_VECT_MASK_SET = IO_STATE(R_VECT_MASK_SET, serial, set);
  5927. +
  5928. + local_irq_enable();
  5929. + ready_mask = (1 << (8+1+2*0)); /* ser0 tr_ready */
  5930. + info = rs_table;
  5931. + for (i = 0; i < NR_PORTS; i++) {
  5932. + /* Which line caused the ready irq? */
  5933. + if (irq_mask1_rd & ready_mask) {
  5934. + handled = 1;
  5935. + handle_ser_tx_interrupt(info);
  5936. + }
  5937. + info += 1;
  5938. + ready_mask <<= 2;
  5939. + }
  5940. + /* handle_ser_tx_interrupt enables tr_ready interrupts */
  5941. + local_irq_disable();
  5942. + /* Handle reentered TX interrupt */
  5943. + irq_mask1_rd = reentered_ready_mask;
  5944. + }
  5945. + local_irq_disable();
  5946. + tx_started = 0;
  5947. + } else {
  5948. + unsigned long ready_mask;
  5949. + ready_mask = irq_mask1_rd & (IO_MASK(R_IRQ_MASK1_RD, ser0_ready) |
  5950. + IO_MASK(R_IRQ_MASK1_RD, ser1_ready) |
  5951. + IO_MASK(R_IRQ_MASK1_RD, ser2_ready) |
  5952. + IO_MASK(R_IRQ_MASK1_RD, ser3_ready));
  5953. + if (ready_mask) {
  5954. + reentered_ready_mask |= ready_mask;
  5955. + /* Disable those we are about to handle */
  5956. + *R_IRQ_MASK1_CLR = ready_mask;
  5957. + DFLOW(DEBUG_LOG(SERIAL_DEBUG_LINE, "ser_int reentered with TX %X\n", ready_mask));
  5958. + }
  5959. + }
  5960. +
  5961. + local_irq_restore(flags);
  5962. + return IRQ_RETVAL(handled);
  5963. +} /* ser_interrupt */
  5964. +#endif
  5965. +
  5966. +/*
  5967. + * -------------------------------------------------------------------
  5968. + * Here ends the serial interrupt routines.
  5969. + * -------------------------------------------------------------------
  5970. + */
  5971. +
  5972. +/*
  5973. + * This routine is used to handle the "bottom half" processing for the
  5974. + * serial driver, known also the "software interrupt" processing.
  5975. + * This processing is done at the kernel interrupt level, after the
  5976. + * rs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON. This
  5977. + * is where time-consuming activities which can not be done in the
  5978. + * interrupt driver proper are done; the interrupt driver schedules
  5979. + * them using rs_sched_event(), and they get done here.
  5980. + */
  5981. +static void
  5982. +do_softint(struct work_struct *work)
  5983. +{
  5984. + struct e100_serial *info;
  5985. + struct tty_struct *tty;
  5986. +
  5987. + info = container_of(work, struct e100_serial, work);
  5988. +
  5989. + tty = info->port.tty;
  5990. + if (!tty)
  5991. + return;
  5992. +
  5993. + if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event))
  5994. + tty_wakeup(tty);
  5995. +}
  5996. +
  5997. +static int
  5998. +startup(struct e100_serial * info)
  5999. +{
  6000. + unsigned long flags;
  6001. + unsigned long xmit_page;
  6002. + int i;
  6003. +
  6004. + xmit_page = get_zeroed_page(GFP_KERNEL);
  6005. + if (!xmit_page)
  6006. + return -ENOMEM;
  6007. +
  6008. + local_irq_save(flags);
  6009. +
  6010. + /* if it was already initialized, skip this */
  6011. +
  6012. + if (info->flags & ASYNC_INITIALIZED) {
  6013. + local_irq_restore(flags);
  6014. + free_page(xmit_page);
  6015. + return 0;
  6016. + }
  6017. +
  6018. + if (info->xmit.buf)
  6019. + free_page(xmit_page);
  6020. + else
  6021. + info->xmit.buf = (unsigned char *) xmit_page;
  6022. +
  6023. +#ifdef SERIAL_DEBUG_OPEN
  6024. + printk("starting up ttyS%d (xmit_buf 0x%p)...\n", info->line, info->xmit.buf);
  6025. +#endif
  6026. +
  6027. +#ifdef CONFIG_SVINTO_SIM
  6028. + /* Bits and pieces collected from below. Better to have them
  6029. + in one ifdef:ed clause than to mix in a lot of ifdefs,
  6030. + right? */
  6031. + if (info->port.tty)
  6032. + clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
  6033. +
  6034. + info->xmit.head = info->xmit.tail = 0;
  6035. + info->first_recv_buffer = info->last_recv_buffer = NULL;
  6036. + info->recv_cnt = info->max_recv_cnt = 0;
  6037. +
  6038. + for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
  6039. + info->rec_descr[i].buf = NULL;
  6040. +
  6041. + /* No real action in the simulator, but may set info important
  6042. + to ioctl. */
  6043. + change_speed(info);
  6044. +#else
  6045. +
  6046. + /*
  6047. + * Clear the FIFO buffers and disable them
  6048. + * (they will be reenabled in change_speed())
  6049. + */
  6050. +
  6051. + /*
  6052. + * Reset the DMA channels and make sure their interrupts are cleared
  6053. + */
  6054. +
  6055. + if (info->dma_in_enabled) {
  6056. + info->uses_dma_in = 1;
  6057. + e100_enable_rxdma_channel(info);
  6058. +
  6059. + *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
  6060. +
  6061. + /* Wait until reset cycle is complete */
  6062. + while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->icmdadr) ==
  6063. + IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
  6064. +
  6065. + /* Make sure the irqs are cleared */
  6066. + *info->iclrintradr =
  6067. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
  6068. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
  6069. + } else {
  6070. + e100_disable_rxdma_channel(info);
  6071. + }
  6072. +
  6073. + if (info->dma_out_enabled) {
  6074. + info->uses_dma_out = 1;
  6075. + e100_enable_txdma_channel(info);
  6076. + *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
  6077. +
  6078. + while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->ocmdadr) ==
  6079. + IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
  6080. +
  6081. + /* Make sure the irqs are cleared */
  6082. + *info->oclrintradr =
  6083. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
  6084. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
  6085. + } else {
  6086. + e100_disable_txdma_channel(info);
  6087. + }
  6088. +
  6089. + if (info->port.tty)
  6090. + clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
  6091. +
  6092. + info->xmit.head = info->xmit.tail = 0;
  6093. + info->first_recv_buffer = info->last_recv_buffer = NULL;
  6094. + info->recv_cnt = info->max_recv_cnt = 0;
  6095. +
  6096. + for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
  6097. + info->rec_descr[i].buf = 0;
  6098. +
  6099. + /*
  6100. + * and set the speed and other flags of the serial port
  6101. + * this will start the rx/tx as well
  6102. + */
  6103. +#ifdef SERIAL_HANDLE_EARLY_ERRORS
  6104. + e100_enable_serial_data_irq(info);
  6105. +#endif
  6106. + change_speed(info);
  6107. +
  6108. + /* dummy read to reset any serial errors */
  6109. +
  6110. + (void)info->ioport[REG_DATA];
  6111. +
  6112. + /* enable the interrupts */
  6113. + if (info->uses_dma_out)
  6114. + e100_enable_txdma_irq(info);
  6115. +
  6116. + e100_enable_rx_irq(info);
  6117. +
  6118. + info->tr_running = 0; /* to be sure we don't lock up the transmitter */
  6119. +
  6120. + /* setup the dma input descriptor and start dma */
  6121. +
  6122. + start_receive(info);
  6123. +
  6124. + /* for safety, make sure the descriptors last result is 0 bytes written */
  6125. +
  6126. + info->tr_descr.sw_len = 0;
  6127. + info->tr_descr.hw_len = 0;
  6128. + info->tr_descr.status = 0;
  6129. +
  6130. + /* enable RTS/DTR last */
  6131. +
  6132. + e100_rts(info, 1);
  6133. + e100_dtr(info, 1);
  6134. +
  6135. +#endif /* CONFIG_SVINTO_SIM */
  6136. +
  6137. + info->flags |= ASYNC_INITIALIZED;
  6138. +
  6139. + local_irq_restore(flags);
  6140. + return 0;
  6141. +}
  6142. +
  6143. +/*
  6144. + * This routine will shutdown a serial port; interrupts are disabled, and
  6145. + * DTR is dropped if the hangup on close termio flag is on.
  6146. + */
  6147. +static void
  6148. +shutdown(struct e100_serial * info)
  6149. +{
  6150. + unsigned long flags;
  6151. + struct etrax_dma_descr *descr = info->rec_descr;
  6152. + struct etrax_recv_buffer *buffer;
  6153. + int i;
  6154. +
  6155. +#ifndef CONFIG_SVINTO_SIM
  6156. + /* shut down the transmitter and receiver */
  6157. + DFLOW(DEBUG_LOG(info->line, "shutdown %i\n", info->line));
  6158. + e100_disable_rx(info);
  6159. + info->ioport[REG_TR_CTRL] = (info->tx_ctrl &= ~0x40);
  6160. +
  6161. + /* disable interrupts, reset dma channels */
  6162. + if (info->uses_dma_in) {
  6163. + e100_disable_rxdma_irq(info);
  6164. + *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
  6165. + info->uses_dma_in = 0;
  6166. + } else {
  6167. + e100_disable_serial_data_irq(info);
  6168. + }
  6169. +
  6170. + if (info->uses_dma_out) {
  6171. + e100_disable_txdma_irq(info);
  6172. + info->tr_running = 0;
  6173. + *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
  6174. + info->uses_dma_out = 0;
  6175. + } else {
  6176. + e100_disable_serial_tx_ready_irq(info);
  6177. + info->tr_running = 0;
  6178. + }
  6179. +
  6180. +#endif /* CONFIG_SVINTO_SIM */
  6181. +
  6182. + if (!(info->flags & ASYNC_INITIALIZED))
  6183. + return;
  6184. +
  6185. +#ifdef SERIAL_DEBUG_OPEN
  6186. + printk("Shutting down serial port %d (irq %d)....\n", info->line,
  6187. + info->irq);
  6188. +#endif
  6189. +
  6190. + local_irq_save(flags);
  6191. +
  6192. + if (info->xmit.buf) {
  6193. + free_page((unsigned long)info->xmit.buf);
  6194. + info->xmit.buf = NULL;
  6195. + }
  6196. +
  6197. + for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
  6198. + if (descr[i].buf) {
  6199. + buffer = phys_to_virt(descr[i].buf) - sizeof *buffer;
  6200. + kfree(buffer);
  6201. + descr[i].buf = 0;
  6202. + }
  6203. +
  6204. + if (!info->port.tty || (info->port.tty->termios->c_cflag & HUPCL)) {
  6205. + /* hang up DTR and RTS if HUPCL is enabled */
  6206. + e100_dtr(info, 0);
  6207. + e100_rts(info, 0); /* could check CRTSCTS before doing this */
  6208. + }
  6209. +
  6210. + if (info->port.tty)
  6211. + set_bit(TTY_IO_ERROR, &info->port.tty->flags);
  6212. +
  6213. + info->flags &= ~ASYNC_INITIALIZED;
  6214. + local_irq_restore(flags);
  6215. +}
  6216. +
  6217. +
  6218. +/* change baud rate and other assorted parameters */
  6219. +
  6220. +static void
  6221. +change_speed(struct e100_serial *info)
  6222. +{
  6223. + unsigned int cflag;
  6224. + unsigned long xoff;
  6225. + unsigned long flags;
  6226. + /* first some safety checks */
  6227. +
  6228. + if (!info->port.tty || !info->port.tty->termios)
  6229. + return;
  6230. + if (!info->ioport)
  6231. + return;
  6232. +
  6233. + cflag = info->port.tty->termios->c_cflag;
  6234. +
  6235. + /* possibly, the tx/rx should be disabled first to do this safely */
  6236. +
  6237. + /* change baud-rate and write it to the hardware */
  6238. + if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) {
  6239. + /* Special baudrate */
  6240. + u32 mask = 0xFF << (info->line*8); /* Each port has 8 bits */
  6241. + unsigned long alt_source =
  6242. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, normal) |
  6243. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, normal);
  6244. + /* R_ALT_SER_BAUDRATE selects the source */
  6245. + DBAUD(printk("Custom baudrate: baud_base/divisor %lu/%i\n",
  6246. + (unsigned long)info->baud_base, info->custom_divisor));
  6247. + if (info->baud_base == SERIAL_PRESCALE_BASE) {
  6248. + /* 0, 2-65535 (0=65536) */
  6249. + u16 divisor = info->custom_divisor;
  6250. + /* R_SERIAL_PRESCALE (upper 16 bits of R_CLOCK_PRESCALE) */
  6251. + /* baudrate is 3.125MHz/custom_divisor */
  6252. + alt_source =
  6253. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, prescale) |
  6254. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, prescale);
  6255. + alt_source = 0x11;
  6256. + DBAUD(printk("Writing SERIAL_PRESCALE: divisor %i\n", divisor));
  6257. + *R_SERIAL_PRESCALE = divisor;
  6258. + info->baud = SERIAL_PRESCALE_BASE/divisor;
  6259. + }
  6260. +#ifdef CONFIG_ETRAX_EXTERN_PB6CLK_ENABLED
  6261. + else if ((info->baud_base==CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8 &&
  6262. + info->custom_divisor == 1) ||
  6263. + (info->baud_base==CONFIG_ETRAX_EXTERN_PB6CLK_FREQ &&
  6264. + info->custom_divisor == 8)) {
  6265. + /* ext_clk selected */
  6266. + alt_source =
  6267. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, extern) |
  6268. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, extern);
  6269. + DBAUD(printk("using external baudrate: %lu\n", CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8));
  6270. + info->baud = CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8;
  6271. + }
  6272. +#endif
  6273. + else
  6274. + {
  6275. + /* Bad baudbase, we don't support using timer0
  6276. + * for baudrate.
  6277. + */
  6278. + printk(KERN_WARNING "Bad baud_base/custom_divisor: %lu/%i\n",
  6279. + (unsigned long)info->baud_base, info->custom_divisor);
  6280. + }
  6281. + r_alt_ser_baudrate_shadow &= ~mask;
  6282. + r_alt_ser_baudrate_shadow |= (alt_source << (info->line*8));
  6283. + *R_ALT_SER_BAUDRATE = r_alt_ser_baudrate_shadow;
  6284. + } else {
  6285. + /* Normal baudrate */
  6286. + /* Make sure we use normal baudrate */
  6287. + u32 mask = 0xFF << (info->line*8); /* Each port has 8 bits */
  6288. + unsigned long alt_source =
  6289. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, normal) |
  6290. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, normal);
  6291. + r_alt_ser_baudrate_shadow &= ~mask;
  6292. + r_alt_ser_baudrate_shadow |= (alt_source << (info->line*8));
  6293. +#ifndef CONFIG_SVINTO_SIM
  6294. + *R_ALT_SER_BAUDRATE = r_alt_ser_baudrate_shadow;
  6295. +#endif /* CONFIG_SVINTO_SIM */
  6296. +
  6297. + info->baud = cflag_to_baud(cflag);
  6298. +#ifndef CONFIG_SVINTO_SIM
  6299. + info->ioport[REG_BAUD] = cflag_to_etrax_baud(cflag);
  6300. +#endif /* CONFIG_SVINTO_SIM */
  6301. + }
  6302. +
  6303. +#ifndef CONFIG_SVINTO_SIM
  6304. + /* start with default settings and then fill in changes */
  6305. + local_irq_save(flags);
  6306. + /* 8 bit, no/even parity */
  6307. + info->rx_ctrl &= ~(IO_MASK(R_SERIAL0_REC_CTRL, rec_bitnr) |
  6308. + IO_MASK(R_SERIAL0_REC_CTRL, rec_par_en) |
  6309. + IO_MASK(R_SERIAL0_REC_CTRL, rec_par));
  6310. +
  6311. + /* 8 bit, no/even parity, 1 stop bit, no cts */
  6312. + info->tx_ctrl &= ~(IO_MASK(R_SERIAL0_TR_CTRL, tr_bitnr) |
  6313. + IO_MASK(R_SERIAL0_TR_CTRL, tr_par_en) |
  6314. + IO_MASK(R_SERIAL0_TR_CTRL, tr_par) |
  6315. + IO_MASK(R_SERIAL0_TR_CTRL, stop_bits) |
  6316. + IO_MASK(R_SERIAL0_TR_CTRL, auto_cts));
  6317. +
  6318. + if ((cflag & CSIZE) == CS7) {
  6319. + /* set 7 bit mode */
  6320. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_bitnr, tr_7bit);
  6321. + info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_bitnr, rec_7bit);
  6322. + }
  6323. +
  6324. + if (cflag & CSTOPB) {
  6325. + /* set 2 stop bit mode */
  6326. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, stop_bits, two_bits);
  6327. + }
  6328. +
  6329. + if (cflag & PARENB) {
  6330. + /* enable parity */
  6331. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_par_en, enable);
  6332. + info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_par_en, enable);
  6333. + }
  6334. +
  6335. + if (cflag & CMSPAR) {
  6336. + /* enable stick parity, PARODD mean Mark which matches ETRAX */
  6337. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_stick_par, stick);
  6338. + info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_stick_par, stick);
  6339. + }
  6340. + if (cflag & PARODD) {
  6341. + /* set odd parity (or Mark if CMSPAR) */
  6342. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_par, odd);
  6343. + info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_par, odd);
  6344. + }
  6345. +
  6346. + if (cflag & CRTSCTS) {
  6347. + /* enable automatic CTS handling */
  6348. + DFLOW(DEBUG_LOG(info->line, "FLOW auto_cts enabled\n", 0));
  6349. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, auto_cts, active);
  6350. + }
  6351. +
  6352. + /* make sure the tx and rx are enabled */
  6353. +
  6354. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_enable, enable);
  6355. + info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_enable, enable);
  6356. +
  6357. + /* actually write the control regs to the hardware */
  6358. +
  6359. + info->ioport[REG_TR_CTRL] = info->tx_ctrl;
  6360. + info->ioport[REG_REC_CTRL] = info->rx_ctrl;
  6361. + xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(info->port.tty));
  6362. + xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable);
  6363. + if (info->port.tty->termios->c_iflag & IXON ) {
  6364. + DFLOW(DEBUG_LOG(info->line, "FLOW XOFF enabled 0x%02X\n",
  6365. + STOP_CHAR(info->port.tty)));
  6366. + xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
  6367. + }
  6368. +
  6369. + *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
  6370. + local_irq_restore(flags);
  6371. +#endif /* !CONFIG_SVINTO_SIM */
  6372. +
  6373. + update_char_time(info);
  6374. +
  6375. +} /* change_speed */
  6376. +
  6377. +/* start transmitting chars NOW */
  6378. +
  6379. +static void
  6380. +rs_flush_chars(struct tty_struct *tty)
  6381. +{
  6382. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  6383. + unsigned long flags;
  6384. +
  6385. + if (info->tr_running ||
  6386. + info->xmit.head == info->xmit.tail ||
  6387. + tty->stopped ||
  6388. + tty->hw_stopped ||
  6389. + !info->xmit.buf)
  6390. + return;
  6391. +
  6392. +#ifdef SERIAL_DEBUG_FLOW
  6393. + printk("rs_flush_chars\n");
  6394. +#endif
  6395. +
  6396. + /* this protection might not exactly be necessary here */
  6397. +
  6398. + local_irq_save(flags);
  6399. + start_transmit(info);
  6400. + local_irq_restore(flags);
  6401. +}
  6402. +
  6403. +static int rs_raw_write(struct tty_struct *tty,
  6404. + const unsigned char *buf, int count)
  6405. +{
  6406. + int c, ret = 0;
  6407. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  6408. + unsigned long flags;
  6409. +
  6410. + /* first some sanity checks */
  6411. +
  6412. + if (!tty || !info->xmit.buf || !tmp_buf)
  6413. + return 0;
  6414. +
  6415. +#ifdef SERIAL_DEBUG_DATA
  6416. + if (info->line == SERIAL_DEBUG_LINE)
  6417. + printk("rs_raw_write (%d), status %d\n",
  6418. + count, info->ioport[REG_STATUS]);
  6419. +#endif
  6420. +
  6421. +#ifdef CONFIG_SVINTO_SIM
  6422. + /* Really simple. The output is here and now. */
  6423. + SIMCOUT(buf, count);
  6424. + return count;
  6425. +#endif
  6426. + local_save_flags(flags);
  6427. + DFLOW(DEBUG_LOG(info->line, "write count %i ", count));
  6428. + DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty)));
  6429. +
  6430. +
  6431. + /* The local_irq_disable/restore_flags pairs below are needed
  6432. + * because the DMA interrupt handler moves the info->xmit values.
  6433. + * the memcpy needs to be in the critical region unfortunately,
  6434. + * because we need to read xmit values, memcpy, write xmit values
  6435. + * in one atomic operation... this could perhaps be avoided by
  6436. + * more clever design.
  6437. + */
  6438. + local_irq_disable();
  6439. + while (count) {
  6440. + c = CIRC_SPACE_TO_END(info->xmit.head,
  6441. + info->xmit.tail,
  6442. + SERIAL_XMIT_SIZE);
  6443. +
  6444. + if (count < c)
  6445. + c = count;
  6446. + if (c <= 0)
  6447. + break;
  6448. +
  6449. + memcpy(info->xmit.buf + info->xmit.head, buf, c);
  6450. + info->xmit.head = (info->xmit.head + c) &
  6451. + (SERIAL_XMIT_SIZE-1);
  6452. + buf += c;
  6453. + count -= c;
  6454. + ret += c;
  6455. + }
  6456. + local_irq_restore(flags);
  6457. +
  6458. + /* enable transmitter if not running, unless the tty is stopped
  6459. + * this does not need IRQ protection since if tr_running == 0
  6460. + * the IRQ's are not running anyway for this port.
  6461. + */
  6462. + DFLOW(DEBUG_LOG(info->line, "write ret %i\n", ret));
  6463. +
  6464. + if (info->xmit.head != info->xmit.tail &&
  6465. + !tty->stopped &&
  6466. + !tty->hw_stopped &&
  6467. + !info->tr_running) {
  6468. + start_transmit(info);
  6469. + }
  6470. +
  6471. + return ret;
  6472. +} /* raw_raw_write() */
  6473. +
  6474. +static int
  6475. +rs_write(struct tty_struct *tty,
  6476. + const unsigned char *buf, int count)
  6477. +{
  6478. +#if defined(CONFIG_ETRAX_RS485)
  6479. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  6480. +
  6481. + if (info->rs485.flags & SER_RS485_ENABLED)
  6482. + {
  6483. + /* If we are in RS-485 mode, we need to toggle RTS and disable
  6484. + * the receiver before initiating a DMA transfer
  6485. + */
  6486. +#ifdef CONFIG_ETRAX_FAST_TIMER
  6487. + /* Abort any started timer */
  6488. + fast_timers_rs485[info->line].function = NULL;
  6489. + del_fast_timer(&fast_timers_rs485[info->line]);
  6490. +#endif
  6491. + e100_rts(info, (info->rs485.flags & SER_RS485_RTS_ON_SEND));
  6492. +#if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER)
  6493. + e100_disable_rx(info);
  6494. + e100_enable_rx_irq(info);
  6495. +#endif
  6496. + if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) &&
  6497. + (info->rs485.delay_rts_before_send > 0))
  6498. + msleep(info->rs485.delay_rts_before_send);
  6499. + }
  6500. +#endif /* CONFIG_ETRAX_RS485 */
  6501. +
  6502. + count = rs_raw_write(tty, buf, count);
  6503. +
  6504. +#if defined(CONFIG_ETRAX_RS485)
  6505. + if (info->rs485.flags & SER_RS485_ENABLED)
  6506. + {
  6507. + unsigned int val;
  6508. + /* If we are in RS-485 mode the following has to be done:
  6509. + * wait until DMA is ready
  6510. + * wait on transmit shift register
  6511. + * toggle RTS
  6512. + * enable the receiver
  6513. + */
  6514. +
  6515. + /* Sleep until all sent */
  6516. + tty_wait_until_sent(tty, 0);
  6517. +#ifdef CONFIG_ETRAX_FAST_TIMER
  6518. + /* Now sleep a little more so that shift register is empty */
  6519. + schedule_usleep(info->char_time_usec * 2);
  6520. +#endif
  6521. + /* wait on transmit shift register */
  6522. + do{
  6523. + get_lsr_info(info, &val);
  6524. + }while (!(val & TIOCSER_TEMT));
  6525. +
  6526. + e100_rts(info, (info->rs485.flags & SER_RS485_RTS_AFTER_SEND));
  6527. +
  6528. +#if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER)
  6529. + e100_enable_rx(info);
  6530. + e100_enable_rxdma_irq(info);
  6531. +#endif
  6532. + }
  6533. +#endif /* CONFIG_ETRAX_RS485 */
  6534. +
  6535. + return count;
  6536. +} /* rs_write */
  6537. +
  6538. +
  6539. +/* how much space is available in the xmit buffer? */
  6540. +
  6541. +static int
  6542. +rs_write_room(struct tty_struct *tty)
  6543. +{
  6544. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  6545. +
  6546. + return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
  6547. +}
  6548. +
  6549. +/* How many chars are in the xmit buffer?
  6550. + * This does not include any chars in the transmitter FIFO.
  6551. + * Use wait_until_sent for waiting for FIFO drain.
  6552. + */
  6553. +
  6554. +static int
  6555. +rs_chars_in_buffer(struct tty_struct *tty)
  6556. +{
  6557. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  6558. +
  6559. + return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
  6560. +}
  6561. +
  6562. +/* discard everything in the xmit buffer */
  6563. +
  6564. +static void
  6565. +rs_flush_buffer(struct tty_struct *tty)
  6566. +{
  6567. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  6568. + unsigned long flags;
  6569. +
  6570. + local_irq_save(flags);
  6571. + info->xmit.head = info->xmit.tail = 0;
  6572. + local_irq_restore(flags);
  6573. +
  6574. + tty_wakeup(tty);
  6575. +}
  6576. +
  6577. +/*
  6578. + * This function is used to send a high-priority XON/XOFF character to
  6579. + * the device
  6580. + *
  6581. + * Since we use DMA we don't check for info->x_char in transmit_chars_dma(),
  6582. + * but we do it in handle_ser_tx_interrupt().
  6583. + * We disable DMA channel and enable tx ready interrupt and write the
  6584. + * character when possible.
  6585. + */
  6586. +static void rs_send_xchar(struct tty_struct *tty, char ch)
  6587. +{
  6588. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  6589. + unsigned long flags;
  6590. + local_irq_save(flags);
  6591. + if (info->uses_dma_out) {
  6592. + /* Put the DMA on hold and disable the channel */
  6593. + *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, hold);
  6594. + while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->ocmdadr) !=
  6595. + IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, hold));
  6596. + e100_disable_txdma_channel(info);
  6597. + }
  6598. +
  6599. + /* Must make sure transmitter is not stopped before we can transmit */
  6600. + if (tty->stopped)
  6601. + rs_start(tty);
  6602. +
  6603. + /* Enable manual transmit interrupt and send from there */
  6604. + DFLOW(DEBUG_LOG(info->line, "rs_send_xchar 0x%02X\n", ch));
  6605. + info->x_char = ch;
  6606. + e100_enable_serial_tx_ready_irq(info);
  6607. + local_irq_restore(flags);
  6608. +}
  6609. +
  6610. +/*
  6611. + * ------------------------------------------------------------
  6612. + * rs_throttle()
  6613. + *
  6614. + * This routine is called by the upper-layer tty layer to signal that
  6615. + * incoming characters should be throttled.
  6616. + * ------------------------------------------------------------
  6617. + */
  6618. +static void
  6619. +rs_throttle(struct tty_struct * tty)
  6620. +{
  6621. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  6622. +#ifdef SERIAL_DEBUG_THROTTLE
  6623. + char buf[64];
  6624. +
  6625. + printk("throttle %s: %lu....\n", tty_name(tty, buf),
  6626. + (unsigned long)tty->ldisc.chars_in_buffer(tty));
  6627. +#endif
  6628. + DFLOW(DEBUG_LOG(info->line,"rs_throttle %lu\n", tty->ldisc.chars_in_buffer(tty)));
  6629. +
  6630. + /* Do RTS before XOFF since XOFF might take some time */
  6631. + if (tty->termios->c_cflag & CRTSCTS) {
  6632. + /* Turn off RTS line */
  6633. + e100_rts(info, 0);
  6634. + }
  6635. + if (I_IXOFF(tty))
  6636. + rs_send_xchar(tty, STOP_CHAR(tty));
  6637. +
  6638. +}
  6639. +
  6640. +static void
  6641. +rs_unthrottle(struct tty_struct * tty)
  6642. +{
  6643. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  6644. +#ifdef SERIAL_DEBUG_THROTTLE
  6645. + char buf[64];
  6646. +
  6647. + printk("unthrottle %s: %lu....\n", tty_name(tty, buf),
  6648. + (unsigned long)tty->ldisc.chars_in_buffer(tty));
  6649. +#endif
  6650. + DFLOW(DEBUG_LOG(info->line,"rs_unthrottle ldisc %d\n", tty->ldisc.chars_in_buffer(tty)));
  6651. + DFLOW(DEBUG_LOG(info->line,"rs_unthrottle flip.count: %i\n", tty->flip.count));
  6652. + /* Do RTS before XOFF since XOFF might take some time */
  6653. + if (tty->termios->c_cflag & CRTSCTS) {
  6654. + /* Assert RTS line */
  6655. + e100_rts(info, 1);
  6656. + }
  6657. +
  6658. + if (I_IXOFF(tty)) {
  6659. + if (info->x_char)
  6660. + info->x_char = 0;
  6661. + else
  6662. + rs_send_xchar(tty, START_CHAR(tty));
  6663. + }
  6664. +
  6665. +}
  6666. +
  6667. +/*
  6668. + * ------------------------------------------------------------
  6669. + * rs_ioctl() and friends
  6670. + * ------------------------------------------------------------
  6671. + */
  6672. +
  6673. +static int
  6674. +get_serial_info(struct e100_serial * info,
  6675. + struct serial_struct * retinfo)
  6676. +{
  6677. + struct serial_struct tmp;
  6678. +
  6679. + /* this is all probably wrong, there are a lot of fields
  6680. + * here that we don't have in e100_serial and maybe we
  6681. + * should set them to something else than 0.
  6682. + */
  6683. +
  6684. + if (!retinfo)
  6685. + return -EFAULT;
  6686. + memset(&tmp, 0, sizeof(tmp));
  6687. + tmp.type = info->type;
  6688. + tmp.line = info->line;
  6689. + tmp.port = (int)info->ioport;
  6690. + tmp.irq = info->irq;
  6691. + tmp.flags = info->flags;
  6692. + tmp.baud_base = info->baud_base;
  6693. + tmp.close_delay = info->close_delay;
  6694. + tmp.closing_wait = info->closing_wait;
  6695. + tmp.custom_divisor = info->custom_divisor;
  6696. + if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
  6697. + return -EFAULT;
  6698. + return 0;
  6699. +}
  6700. +
  6701. +static int
  6702. +set_serial_info(struct e100_serial *info,
  6703. + struct serial_struct *new_info)
  6704. +{
  6705. + struct serial_struct new_serial;
  6706. + struct e100_serial old_info;
  6707. + int retval = 0;
  6708. +
  6709. + if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
  6710. + return -EFAULT;
  6711. +
  6712. + old_info = *info;
  6713. +
  6714. + if (!capable(CAP_SYS_ADMIN)) {
  6715. + if ((new_serial.type != info->type) ||
  6716. + (new_serial.close_delay != info->close_delay) ||
  6717. + ((new_serial.flags & ~ASYNC_USR_MASK) !=
  6718. + (info->flags & ~ASYNC_USR_MASK)))
  6719. + return -EPERM;
  6720. + info->flags = ((info->flags & ~ASYNC_USR_MASK) |
  6721. + (new_serial.flags & ASYNC_USR_MASK));
  6722. + goto check_and_exit;
  6723. + }
  6724. +
  6725. + if (info->count > 1)
  6726. + return -EBUSY;
  6727. +
  6728. + /*
  6729. + * OK, past this point, all the error checking has been done.
  6730. + * At this point, we start making changes.....
  6731. + */
  6732. +
  6733. + info->baud_base = new_serial.baud_base;
  6734. + info->flags = ((info->flags & ~ASYNC_FLAGS) |
  6735. + (new_serial.flags & ASYNC_FLAGS));
  6736. + info->custom_divisor = new_serial.custom_divisor;
  6737. + info->type = new_serial.type;
  6738. + info->close_delay = new_serial.close_delay;
  6739. + info->closing_wait = new_serial.closing_wait;
  6740. + info->port.tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
  6741. +
  6742. + check_and_exit:
  6743. + if (info->flags & ASYNC_INITIALIZED) {
  6744. + change_speed(info);
  6745. + } else
  6746. + retval = startup(info);
  6747. + return retval;
  6748. +}
  6749. +
  6750. +/*
  6751. + * get_lsr_info - get line status register info
  6752. + *
  6753. + * Purpose: Let user call ioctl() to get info when the UART physically
  6754. + * is emptied. On bus types like RS485, the transmitter must
  6755. + * release the bus after transmitting. This must be done when
  6756. + * the transmit shift register is empty, not be done when the
  6757. + * transmit holding register is empty. This functionality
  6758. + * allows an RS485 driver to be written in user space.
  6759. + */
  6760. +static int
  6761. +get_lsr_info(struct e100_serial * info, unsigned int *value)
  6762. +{
  6763. + unsigned int result = TIOCSER_TEMT;
  6764. +#ifndef CONFIG_SVINTO_SIM
  6765. + unsigned long curr_time = jiffies;
  6766. + unsigned long curr_time_usec = GET_JIFFIES_USEC();
  6767. + unsigned long elapsed_usec =
  6768. + (curr_time - info->last_tx_active) * 1000000/HZ +
  6769. + curr_time_usec - info->last_tx_active_usec;
  6770. +
  6771. + if (info->xmit.head != info->xmit.tail ||
  6772. + elapsed_usec < 2*info->char_time_usec) {
  6773. + result = 0;
  6774. + }
  6775. +#endif
  6776. +
  6777. + if (copy_to_user(value, &result, sizeof(int)))
  6778. + return -EFAULT;
  6779. + return 0;
  6780. +}
  6781. +
  6782. +#ifdef SERIAL_DEBUG_IO
  6783. +struct state_str
  6784. +{
  6785. + int state;
  6786. + const char *str;
  6787. +};
  6788. +
  6789. +const struct state_str control_state_str[] = {
  6790. + {TIOCM_DTR, "DTR" },
  6791. + {TIOCM_RTS, "RTS"},
  6792. + {TIOCM_ST, "ST?" },
  6793. + {TIOCM_SR, "SR?" },
  6794. + {TIOCM_CTS, "CTS" },
  6795. + {TIOCM_CD, "CD" },
  6796. + {TIOCM_RI, "RI" },
  6797. + {TIOCM_DSR, "DSR" },
  6798. + {0, NULL }
  6799. +};
  6800. +
  6801. +char *get_control_state_str(int MLines, char *s)
  6802. +{
  6803. + int i = 0;
  6804. +
  6805. + s[0]='\0';
  6806. + while (control_state_str[i].str != NULL) {
  6807. + if (MLines & control_state_str[i].state) {
  6808. + if (s[0] != '\0') {
  6809. + strcat(s, ", ");
  6810. + }
  6811. + strcat(s, control_state_str[i].str);
  6812. + }
  6813. + i++;
  6814. + }
  6815. + return s;
  6816. +}
  6817. +#endif
  6818. +
  6819. +static int
  6820. +rs_break(struct tty_struct *tty, int break_state)
  6821. +{
  6822. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  6823. + unsigned long flags;
  6824. +
  6825. + if (!info->ioport)
  6826. + return -EIO;
  6827. +
  6828. + local_irq_save(flags);
  6829. + if (break_state == -1) {
  6830. + /* Go to manual mode and set the txd pin to 0 */
  6831. + /* Clear bit 7 (txd) and 6 (tr_enable) */
  6832. + info->tx_ctrl &= 0x3F;
  6833. + } else {
  6834. + /* Set bit 7 (txd) and 6 (tr_enable) */
  6835. + info->tx_ctrl |= (0x80 | 0x40);
  6836. + }
  6837. + info->ioport[REG_TR_CTRL] = info->tx_ctrl;
  6838. + local_irq_restore(flags);
  6839. + return 0;
  6840. +}
  6841. +
  6842. +static int
  6843. +rs_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
  6844. +{
  6845. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  6846. + unsigned long flags;
  6847. +
  6848. + local_irq_save(flags);
  6849. +
  6850. + if (clear & TIOCM_RTS)
  6851. + e100_rts(info, 0);
  6852. + if (clear & TIOCM_DTR)
  6853. + e100_dtr(info, 0);
  6854. + /* Handle FEMALE behaviour */
  6855. + if (clear & TIOCM_RI)
  6856. + e100_ri_out(info, 0);
  6857. + if (clear & TIOCM_CD)
  6858. + e100_cd_out(info, 0);
  6859. +
  6860. + if (set & TIOCM_RTS)
  6861. + e100_rts(info, 1);
  6862. + if (set & TIOCM_DTR)
  6863. + e100_dtr(info, 1);
  6864. + /* Handle FEMALE behaviour */
  6865. + if (set & TIOCM_RI)
  6866. + e100_ri_out(info, 1);
  6867. + if (set & TIOCM_CD)
  6868. + e100_cd_out(info, 1);
  6869. +
  6870. + local_irq_restore(flags);
  6871. + return 0;
  6872. +}
  6873. +
  6874. +static int
  6875. +rs_tiocmget(struct tty_struct *tty)
  6876. +{
  6877. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  6878. + unsigned int result;
  6879. + unsigned long flags;
  6880. +
  6881. + local_irq_save(flags);
  6882. +
  6883. + result =
  6884. + (!E100_RTS_GET(info) ? TIOCM_RTS : 0)
  6885. + | (!E100_DTR_GET(info) ? TIOCM_DTR : 0)
  6886. + | (!E100_RI_GET(info) ? TIOCM_RNG : 0)
  6887. + | (!E100_DSR_GET(info) ? TIOCM_DSR : 0)
  6888. + | (!E100_CD_GET(info) ? TIOCM_CAR : 0)
  6889. + | (!E100_CTS_GET(info) ? TIOCM_CTS : 0);
  6890. +
  6891. + local_irq_restore(flags);
  6892. +
  6893. +#ifdef SERIAL_DEBUG_IO
  6894. + printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n",
  6895. + info->line, result, result);
  6896. + {
  6897. + char s[100];
  6898. +
  6899. + get_control_state_str(result, s);
  6900. + printk(KERN_DEBUG "state: %s\n", s);
  6901. + }
  6902. +#endif
  6903. + return result;
  6904. +
  6905. +}
  6906. +
  6907. +
  6908. +static int
  6909. +rs_ioctl(struct tty_struct *tty,
  6910. + unsigned int cmd, unsigned long arg)
  6911. +{
  6912. + struct e100_serial * info = (struct e100_serial *)tty->driver_data;
  6913. +
  6914. + if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
  6915. + (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD) &&
  6916. + (cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT)) {
  6917. + if (tty->flags & (1 << TTY_IO_ERROR))
  6918. + return -EIO;
  6919. + }
  6920. +
  6921. + switch (cmd) {
  6922. + case TIOCGSERIAL:
  6923. + return get_serial_info(info,
  6924. + (struct serial_struct *) arg);
  6925. + case TIOCSSERIAL:
  6926. + return set_serial_info(info,
  6927. + (struct serial_struct *) arg);
  6928. + case TIOCSERGETLSR: /* Get line status register */
  6929. + return get_lsr_info(info, (unsigned int *) arg);
  6930. +
  6931. + case TIOCSERGSTRUCT:
  6932. + if (copy_to_user((struct e100_serial *) arg,
  6933. + info, sizeof(struct e100_serial)))
  6934. + return -EFAULT;
  6935. + return 0;
  6936. +
  6937. +#if defined(CONFIG_ETRAX_RS485)
  6938. + case TIOCSERSETRS485:
  6939. + {
  6940. + /* In this ioctl we still use the old structure
  6941. + * rs485_control for backward compatibility
  6942. + * (if we use serial_rs485, then old user-level code
  6943. + * wouldn't work anymore...).
  6944. + * The use of this ioctl is deprecated: use TIOCSRS485
  6945. + * instead.*/
  6946. + struct rs485_control rs485ctrl;
  6947. + struct serial_rs485 rs485data;
  6948. + printk(KERN_DEBUG "The use of this ioctl is deprecated. Use TIOCSRS485 instead\n");
  6949. + if (copy_from_user(&rs485ctrl, (struct rs485_control *)arg,
  6950. + sizeof(rs485ctrl)))
  6951. + return -EFAULT;
  6952. +
  6953. + rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send;
  6954. + rs485data.flags = 0;
  6955. + if (rs485data.delay_rts_before_send != 0)
  6956. + rs485data.flags |= SER_RS485_RTS_BEFORE_SEND;
  6957. + else
  6958. + rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
  6959. +
  6960. + if (rs485ctrl.enabled)
  6961. + rs485data.flags |= SER_RS485_ENABLED;
  6962. + else
  6963. + rs485data.flags &= ~(SER_RS485_ENABLED);
  6964. +
  6965. + if (rs485ctrl.rts_on_send)
  6966. + rs485data.flags |= SER_RS485_RTS_ON_SEND;
  6967. + else
  6968. + rs485data.flags &= ~(SER_RS485_RTS_ON_SEND);
  6969. +
  6970. + if (rs485ctrl.rts_after_sent)
  6971. + rs485data.flags |= SER_RS485_RTS_AFTER_SEND;
  6972. + else
  6973. + rs485data.flags &= ~(SER_RS485_RTS_AFTER_SEND);
  6974. +
  6975. + return e100_enable_rs485(tty, &rs485data);
  6976. + }
  6977. +
  6978. + case TIOCSRS485:
  6979. + {
  6980. + /* This is the new version of TIOCSRS485, with new
  6981. + * data structure serial_rs485 */
  6982. + struct serial_rs485 rs485data;
  6983. + if (copy_from_user(&rs485data, (struct rs485_control *)arg,
  6984. + sizeof(rs485data)))
  6985. + return -EFAULT;
  6986. +
  6987. + return e100_enable_rs485(tty, &rs485data);
  6988. + }
  6989. +
  6990. + case TIOCGRS485:
  6991. + {
  6992. + struct serial_rs485 *rs485data =
  6993. + &(((struct e100_serial *)tty->driver_data)->rs485);
  6994. + /* This is the ioctl to get RS485 data from user-space */
  6995. + if (copy_to_user((struct serial_rs485 *) arg,
  6996. + rs485data,
  6997. + sizeof(struct serial_rs485)))
  6998. + return -EFAULT;
  6999. + break;
  7000. + }
  7001. +
  7002. + case TIOCSERWRRS485:
  7003. + {
  7004. + struct rs485_write rs485wr;
  7005. + if (copy_from_user(&rs485wr, (struct rs485_write *)arg,
  7006. + sizeof(rs485wr)))
  7007. + return -EFAULT;
  7008. +
  7009. + return e100_write_rs485(tty, rs485wr.outc, rs485wr.outc_size);
  7010. + }
  7011. +#endif
  7012. +
  7013. + default:
  7014. + return -ENOIOCTLCMD;
  7015. + }
  7016. + return 0;
  7017. +}
  7018. +
  7019. +static void
  7020. +rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
  7021. +{
  7022. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  7023. +
  7024. + change_speed(info);
  7025. +
  7026. + /* Handle turning off CRTSCTS */
  7027. + if ((old_termios->c_cflag & CRTSCTS) &&
  7028. + !(tty->termios->c_cflag & CRTSCTS)) {
  7029. + tty->hw_stopped = 0;
  7030. + rs_start(tty);
  7031. + }
  7032. +
  7033. +}
  7034. +
  7035. +/*
  7036. + * ------------------------------------------------------------
  7037. + * rs_close()
  7038. + *
  7039. + * This routine is called when the serial port gets closed. First, we
  7040. + * wait for the last remaining data to be sent. Then, we unlink its
  7041. + * S structure from the interrupt chain if necessary, and we free
  7042. + * that IRQ if nothing is left in the chain.
  7043. + * ------------------------------------------------------------
  7044. + */
  7045. +static void
  7046. +rs_close(struct tty_struct *tty, struct file * filp)
  7047. +{
  7048. + struct e100_serial * info = (struct e100_serial *)tty->driver_data;
  7049. + unsigned long flags;
  7050. +
  7051. + if (!info)
  7052. + return;
  7053. +
  7054. + /* interrupts are disabled for this entire function */
  7055. +
  7056. + local_irq_save(flags);
  7057. +
  7058. + if (tty_hung_up_p(filp)) {
  7059. + local_irq_restore(flags);
  7060. + return;
  7061. + }
  7062. +
  7063. +#ifdef SERIAL_DEBUG_OPEN
  7064. + printk("[%d] rs_close ttyS%d, count = %d\n", current->pid,
  7065. + info->line, info->count);
  7066. +#endif
  7067. + if ((tty->count == 1) && (info->count != 1)) {
  7068. + /*
  7069. + * Uh, oh. tty->count is 1, which means that the tty
  7070. + * structure will be freed. Info->count should always
  7071. + * be one in these conditions. If it's greater than
  7072. + * one, we've got real problems, since it means the
  7073. + * serial port won't be shutdown.
  7074. + */
  7075. + printk(KERN_CRIT
  7076. + "rs_close: bad serial port count; tty->count is 1, "
  7077. + "info->count is %d\n", info->count);
  7078. + info->count = 1;
  7079. + }
  7080. + if (--info->count < 0) {
  7081. + printk(KERN_CRIT "rs_close: bad serial port count for ttyS%d: %d\n",
  7082. + info->line, info->count);
  7083. + info->count = 0;
  7084. + }
  7085. + if (info->count) {
  7086. + local_irq_restore(flags);
  7087. + return;
  7088. + }
  7089. + info->flags |= ASYNC_CLOSING;
  7090. + /*
  7091. + * Save the termios structure, since this port may have
  7092. + * separate termios for callout and dialin.
  7093. + */
  7094. + if (info->flags & ASYNC_NORMAL_ACTIVE)
  7095. + info->normal_termios = *tty->termios;
  7096. + /*
  7097. + * Now we wait for the transmit buffer to clear; and we notify
  7098. + * the line discipline to only process XON/XOFF characters.
  7099. + */
  7100. + tty->closing = 1;
  7101. + if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE)
  7102. + tty_wait_until_sent(tty, info->closing_wait);
  7103. + /*
  7104. + * At this point we stop accepting input. To do this, we
  7105. + * disable the serial receiver and the DMA receive interrupt.
  7106. + */
  7107. +#ifdef SERIAL_HANDLE_EARLY_ERRORS
  7108. + e100_disable_serial_data_irq(info);
  7109. +#endif
  7110. +
  7111. +#ifndef CONFIG_SVINTO_SIM
  7112. + e100_disable_rx(info);
  7113. + e100_disable_rx_irq(info);
  7114. +
  7115. + if (info->flags & ASYNC_INITIALIZED) {
  7116. + /*
  7117. + * Before we drop DTR, make sure the UART transmitter
  7118. + * has completely drained; this is especially
  7119. + * important as we have a transmit FIFO!
  7120. + */
  7121. + rs_wait_until_sent(tty, HZ);
  7122. + }
  7123. +#endif
  7124. +
  7125. + shutdown(info);
  7126. + rs_flush_buffer(tty);
  7127. + tty_ldisc_flush(tty);
  7128. + tty->closing = 0;
  7129. + info->event = 0;
  7130. + info->port.tty = NULL;
  7131. + if (info->blocked_open) {
  7132. + if (info->close_delay)
  7133. + schedule_timeout_interruptible(info->close_delay);
  7134. + wake_up_interruptible(&info->open_wait);
  7135. + }
  7136. + info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
  7137. + wake_up_interruptible(&info->close_wait);
  7138. + local_irq_restore(flags);
  7139. +
  7140. + /* port closed */
  7141. +
  7142. +#if defined(CONFIG_ETRAX_RS485)
  7143. + if (info->rs485.flags & SER_RS485_ENABLED) {
  7144. + info->rs485.flags &= ~(SER_RS485_ENABLED);
  7145. +#if defined(CONFIG_ETRAX_RS485_ON_PA)
  7146. + *R_PORT_PA_DATA = port_pa_data_shadow &= ~(1 << rs485_pa_bit);
  7147. +#endif
  7148. +#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
  7149. + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
  7150. + rs485_port_g_bit, 0);
  7151. +#endif
  7152. +#if defined(CONFIG_ETRAX_RS485_LTC1387)
  7153. + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
  7154. + CONFIG_ETRAX_RS485_LTC1387_DXEN_PORT_G_BIT, 0);
  7155. + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
  7156. + CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 0);
  7157. +#endif
  7158. + }
  7159. +#endif
  7160. +
  7161. + /*
  7162. + * Release any allocated DMA irq's.
  7163. + */
  7164. + if (info->dma_in_enabled) {
  7165. + free_irq(info->dma_in_irq_nbr, info);
  7166. + cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
  7167. + info->uses_dma_in = 0;
  7168. +#ifdef SERIAL_DEBUG_OPEN
  7169. + printk(KERN_DEBUG "DMA irq '%s' freed\n",
  7170. + info->dma_in_irq_description);
  7171. +#endif
  7172. + }
  7173. + if (info->dma_out_enabled) {
  7174. + free_irq(info->dma_out_irq_nbr, info);
  7175. + cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
  7176. + info->uses_dma_out = 0;
  7177. +#ifdef SERIAL_DEBUG_OPEN
  7178. + printk(KERN_DEBUG "DMA irq '%s' freed\n",
  7179. + info->dma_out_irq_description);
  7180. +#endif
  7181. + }
  7182. +}
  7183. +
  7184. +/*
  7185. + * rs_wait_until_sent() --- wait until the transmitter is empty
  7186. + */
  7187. +static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
  7188. +{
  7189. + unsigned long orig_jiffies;
  7190. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  7191. + unsigned long curr_time = jiffies;
  7192. + unsigned long curr_time_usec = GET_JIFFIES_USEC();
  7193. + long elapsed_usec =
  7194. + (curr_time - info->last_tx_active) * (1000000/HZ) +
  7195. + curr_time_usec - info->last_tx_active_usec;
  7196. +
  7197. + /*
  7198. + * Check R_DMA_CHx_STATUS bit 0-6=number of available bytes in FIFO
  7199. + * R_DMA_CHx_HWSW bit 31-16=nbr of bytes left in DMA buffer (0=64k)
  7200. + */
  7201. + orig_jiffies = jiffies;
  7202. + while (info->xmit.head != info->xmit.tail || /* More in send queue */
  7203. + (*info->ostatusadr & 0x007f) || /* more in FIFO */
  7204. + (elapsed_usec < 2*info->char_time_usec)) {
  7205. + schedule_timeout_interruptible(1);
  7206. + if (signal_pending(current))
  7207. + break;
  7208. + if (timeout && time_after(jiffies, orig_jiffies + timeout))
  7209. + break;
  7210. + curr_time = jiffies;
  7211. + curr_time_usec = GET_JIFFIES_USEC();
  7212. + elapsed_usec =
  7213. + (curr_time - info->last_tx_active) * (1000000/HZ) +
  7214. + curr_time_usec - info->last_tx_active_usec;
  7215. + }
  7216. + set_current_state(TASK_RUNNING);
  7217. +}
  7218. +
  7219. +/*
  7220. + * rs_hangup() --- called by tty_hangup() when a hangup is signaled.
  7221. + */
  7222. +void
  7223. +rs_hangup(struct tty_struct *tty)
  7224. +{
  7225. + struct e100_serial * info = (struct e100_serial *)tty->driver_data;
  7226. +
  7227. + rs_flush_buffer(tty);
  7228. + shutdown(info);
  7229. + info->event = 0;
  7230. + info->count = 0;
  7231. + info->flags &= ~ASYNC_NORMAL_ACTIVE;
  7232. + info->port.tty = NULL;
  7233. + wake_up_interruptible(&info->open_wait);
  7234. +}
  7235. +
  7236. +/*
  7237. + * ------------------------------------------------------------
  7238. + * rs_open() and friends
  7239. + * ------------------------------------------------------------
  7240. + */
  7241. +static int
  7242. +block_til_ready(struct tty_struct *tty, struct file * filp,
  7243. + struct e100_serial *info)
  7244. +{
  7245. + DECLARE_WAITQUEUE(wait, current);
  7246. + unsigned long flags;
  7247. + int retval;
  7248. + int do_clocal = 0, extra_count = 0;
  7249. +
  7250. + /*
  7251. + * If the device is in the middle of being closed, then block
  7252. + * until it's done, and then try again.
  7253. + */
  7254. + if (tty_hung_up_p(filp) ||
  7255. + (info->flags & ASYNC_CLOSING)) {
  7256. + wait_event_interruptible_tty(info->close_wait,
  7257. + !(info->flags & ASYNC_CLOSING));
  7258. +#ifdef SERIAL_DO_RESTART
  7259. + if (info->flags & ASYNC_HUP_NOTIFY)
  7260. + return -EAGAIN;
  7261. + else
  7262. + return -ERESTARTSYS;
  7263. +#else
  7264. + return -EAGAIN;
  7265. +#endif
  7266. + }
  7267. +
  7268. + /*
  7269. + * If non-blocking mode is set, or the port is not enabled,
  7270. + * then make the check up front and then exit.
  7271. + */
  7272. + if ((filp->f_flags & O_NONBLOCK) ||
  7273. + (tty->flags & (1 << TTY_IO_ERROR))) {
  7274. + info->flags |= ASYNC_NORMAL_ACTIVE;
  7275. + return 0;
  7276. + }
  7277. +
  7278. + if (tty->termios->c_cflag & CLOCAL) {
  7279. + do_clocal = 1;
  7280. + }
  7281. +
  7282. + /*
  7283. + * Block waiting for the carrier detect and the line to become
  7284. + * free (i.e., not in use by the callout). While we are in
  7285. + * this loop, info->count is dropped by one, so that
  7286. + * rs_close() knows when to free things. We restore it upon
  7287. + * exit, either normal or abnormal.
  7288. + */
  7289. + retval = 0;
  7290. + add_wait_queue(&info->open_wait, &wait);
  7291. +#ifdef SERIAL_DEBUG_OPEN
  7292. + printk("block_til_ready before block: ttyS%d, count = %d\n",
  7293. + info->line, info->count);
  7294. +#endif
  7295. + local_irq_save(flags);
  7296. + if (!tty_hung_up_p(filp)) {
  7297. + extra_count++;
  7298. + info->count--;
  7299. + }
  7300. + local_irq_restore(flags);
  7301. + info->blocked_open++;
  7302. + while (1) {
  7303. + local_irq_save(flags);
  7304. + /* assert RTS and DTR */
  7305. + e100_rts(info, 1);
  7306. + e100_dtr(info, 1);
  7307. + local_irq_restore(flags);
  7308. + set_current_state(TASK_INTERRUPTIBLE);
  7309. + if (tty_hung_up_p(filp) ||
  7310. + !(info->flags & ASYNC_INITIALIZED)) {
  7311. +#ifdef SERIAL_DO_RESTART
  7312. + if (info->flags & ASYNC_HUP_NOTIFY)
  7313. + retval = -EAGAIN;
  7314. + else
  7315. + retval = -ERESTARTSYS;
  7316. +#else
  7317. + retval = -EAGAIN;
  7318. +#endif
  7319. + break;
  7320. + }
  7321. + if (!(info->flags & ASYNC_CLOSING) && do_clocal)
  7322. + /* && (do_clocal || DCD_IS_ASSERTED) */
  7323. + break;
  7324. + if (signal_pending(current)) {
  7325. + retval = -ERESTARTSYS;
  7326. + break;
  7327. + }
  7328. +#ifdef SERIAL_DEBUG_OPEN
  7329. + printk("block_til_ready blocking: ttyS%d, count = %d\n",
  7330. + info->line, info->count);
  7331. +#endif
  7332. + tty_unlock();
  7333. + schedule();
  7334. + tty_lock();
  7335. + }
  7336. + set_current_state(TASK_RUNNING);
  7337. + remove_wait_queue(&info->open_wait, &wait);
  7338. + if (extra_count)
  7339. + info->count++;
  7340. + info->blocked_open--;
  7341. +#ifdef SERIAL_DEBUG_OPEN
  7342. + printk("block_til_ready after blocking: ttyS%d, count = %d\n",
  7343. + info->line, info->count);
  7344. +#endif
  7345. + if (retval)
  7346. + return retval;
  7347. + info->flags |= ASYNC_NORMAL_ACTIVE;
  7348. + return 0;
  7349. +}
  7350. +
  7351. +static void
  7352. +deinit_port(struct e100_serial *info)
  7353. +{
  7354. + if (info->dma_out_enabled) {
  7355. + cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
  7356. + free_irq(info->dma_out_irq_nbr, info);
  7357. + }
  7358. + if (info->dma_in_enabled) {
  7359. + cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
  7360. + free_irq(info->dma_in_irq_nbr, info);
  7361. + }
  7362. +}
  7363. +
  7364. +/*
  7365. + * This routine is called whenever a serial port is opened.
  7366. + * It performs the serial-specific initialization for the tty structure.
  7367. + */
  7368. +static int
  7369. +rs_open(struct tty_struct *tty, struct file * filp)
  7370. +{
  7371. + struct e100_serial *info;
  7372. + int retval, line;
  7373. + unsigned long page;
  7374. + int allocated_resources = 0;
  7375. +
  7376. + /* find which port we want to open */
  7377. + line = tty->index;
  7378. +
  7379. + if (line < 0 || line >= NR_PORTS)
  7380. + return -ENODEV;
  7381. +
  7382. + /* find the corresponding e100_serial struct in the table */
  7383. + info = rs_table + line;
  7384. +
  7385. + /* don't allow the opening of ports that are not enabled in the HW config */
  7386. + if (!info->enabled)
  7387. + return -ENODEV;
  7388. +
  7389. +#ifdef SERIAL_DEBUG_OPEN
  7390. + printk("[%d] rs_open %s, count = %d\n", current->pid, tty->name,
  7391. + info->count);
  7392. +#endif
  7393. +
  7394. + info->count++;
  7395. + tty->driver_data = info;
  7396. + info->port.tty = tty;
  7397. +
  7398. + info->port.tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
  7399. +
  7400. + if (!tmp_buf) {
  7401. + page = get_zeroed_page(GFP_KERNEL);
  7402. + if (!page) {
  7403. + return -ENOMEM;
  7404. + }
  7405. + if (tmp_buf)
  7406. + free_page(page);
  7407. + else
  7408. + tmp_buf = (unsigned char *) page;
  7409. + }
  7410. +
  7411. + /*
  7412. + * If the port is in the middle of closing, bail out now
  7413. + */
  7414. + if (tty_hung_up_p(filp) ||
  7415. + (info->flags & ASYNC_CLOSING)) {
  7416. + wait_event_interruptible_tty(info->close_wait,
  7417. + !(info->flags & ASYNC_CLOSING));
  7418. +#ifdef SERIAL_DO_RESTART
  7419. + return ((info->flags & ASYNC_HUP_NOTIFY) ?
  7420. + -EAGAIN : -ERESTARTSYS);
  7421. +#else
  7422. + return -EAGAIN;
  7423. +#endif
  7424. + }
  7425. +
  7426. + /*
  7427. + * If DMA is enabled try to allocate the irq's.
  7428. + */
  7429. + if (info->count == 1) {
  7430. + allocated_resources = 1;
  7431. + if (info->dma_in_enabled) {
  7432. + if (request_irq(info->dma_in_irq_nbr,
  7433. + rec_interrupt,
  7434. + info->dma_in_irq_flags,
  7435. + info->dma_in_irq_description,
  7436. + info)) {
  7437. + printk(KERN_WARNING "DMA irq '%s' busy; "
  7438. + "falling back to non-DMA mode\n",
  7439. + info->dma_in_irq_description);
  7440. + /* Make sure we never try to use DMA in */
  7441. + /* for the port again. */
  7442. + info->dma_in_enabled = 0;
  7443. + } else if (cris_request_dma(info->dma_in_nbr,
  7444. + info->dma_in_irq_description,
  7445. + DMA_VERBOSE_ON_ERROR,
  7446. + info->dma_owner)) {
  7447. + free_irq(info->dma_in_irq_nbr, info);
  7448. + printk(KERN_WARNING "DMA '%s' busy; "
  7449. + "falling back to non-DMA mode\n",
  7450. + info->dma_in_irq_description);
  7451. + /* Make sure we never try to use DMA in */
  7452. + /* for the port again. */
  7453. + info->dma_in_enabled = 0;
  7454. + }
  7455. +#ifdef SERIAL_DEBUG_OPEN
  7456. + else
  7457. + printk(KERN_DEBUG "DMA irq '%s' allocated\n",
  7458. + info->dma_in_irq_description);
  7459. +#endif
  7460. + }
  7461. + if (info->dma_out_enabled) {
  7462. + if (request_irq(info->dma_out_irq_nbr,
  7463. + tr_interrupt,
  7464. + info->dma_out_irq_flags,
  7465. + info->dma_out_irq_description,
  7466. + info)) {
  7467. + printk(KERN_WARNING "DMA irq '%s' busy; "
  7468. + "falling back to non-DMA mode\n",
  7469. + info->dma_out_irq_description);
  7470. + /* Make sure we never try to use DMA out */
  7471. + /* for the port again. */
  7472. + info->dma_out_enabled = 0;
  7473. + } else if (cris_request_dma(info->dma_out_nbr,
  7474. + info->dma_out_irq_description,
  7475. + DMA_VERBOSE_ON_ERROR,
  7476. + info->dma_owner)) {
  7477. + free_irq(info->dma_out_irq_nbr, info);
  7478. + printk(KERN_WARNING "DMA '%s' busy; "
  7479. + "falling back to non-DMA mode\n",
  7480. + info->dma_out_irq_description);
  7481. + /* Make sure we never try to use DMA out */
  7482. + /* for the port again. */
  7483. + info->dma_out_enabled = 0;
  7484. + }
  7485. +#ifdef SERIAL_DEBUG_OPEN
  7486. + else
  7487. + printk(KERN_DEBUG "DMA irq '%s' allocated\n",
  7488. + info->dma_out_irq_description);
  7489. +#endif
  7490. + }
  7491. + }
  7492. +
  7493. + /*
  7494. + * Start up the serial port
  7495. + */
  7496. +
  7497. + retval = startup(info);
  7498. + if (retval) {
  7499. + if (allocated_resources)
  7500. + deinit_port(info);
  7501. +
  7502. + /* FIXME Decrease count info->count here too? */
  7503. + return retval;
  7504. + }
  7505. +
  7506. +
  7507. + retval = block_til_ready(tty, filp, info);
  7508. + if (retval) {
  7509. +#ifdef SERIAL_DEBUG_OPEN
  7510. + printk("rs_open returning after block_til_ready with %d\n",
  7511. + retval);
  7512. +#endif
  7513. + if (allocated_resources)
  7514. + deinit_port(info);
  7515. +
  7516. + return retval;
  7517. + }
  7518. +
  7519. + if ((info->count == 1) && (info->flags & ASYNC_SPLIT_TERMIOS)) {
  7520. + *tty->termios = info->normal_termios;
  7521. + change_speed(info);
  7522. + }
  7523. +
  7524. +#ifdef SERIAL_DEBUG_OPEN
  7525. + printk("rs_open ttyS%d successful...\n", info->line);
  7526. +#endif
  7527. + DLOG_INT_TRIG( log_int_pos = 0);
  7528. +
  7529. + DFLIP( if (info->line == SERIAL_DEBUG_LINE) {
  7530. + info->icount.rx = 0;
  7531. + } );
  7532. +
  7533. + return 0;
  7534. +}
  7535. +
  7536. +#ifdef CONFIG_PROC_FS
  7537. +/*
  7538. + * /proc fs routines....
  7539. + */
  7540. +
  7541. +static void seq_line_info(struct seq_file *m, struct e100_serial *info)
  7542. +{
  7543. + unsigned long tmp;
  7544. +
  7545. + seq_printf(m, "%d: uart:E100 port:%lX irq:%d",
  7546. + info->line, (unsigned long)info->ioport, info->irq);
  7547. +
  7548. + if (!info->ioport || (info->type == PORT_UNKNOWN)) {
  7549. + seq_printf(m, "\n");
  7550. + return;
  7551. + }
  7552. +
  7553. + seq_printf(m, " baud:%d", info->baud);
  7554. + seq_printf(m, " tx:%lu rx:%lu",
  7555. + (unsigned long)info->icount.tx,
  7556. + (unsigned long)info->icount.rx);
  7557. + tmp = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
  7558. + if (tmp)
  7559. + seq_printf(m, " tx_pend:%lu/%lu",
  7560. + (unsigned long)tmp,
  7561. + (unsigned long)SERIAL_XMIT_SIZE);
  7562. +
  7563. + seq_printf(m, " rx_pend:%lu/%lu",
  7564. + (unsigned long)info->recv_cnt,
  7565. + (unsigned long)info->max_recv_cnt);
  7566. +
  7567. +#if 1
  7568. + if (info->port.tty) {
  7569. + if (info->port.tty->stopped)
  7570. + seq_printf(m, " stopped:%i",
  7571. + (int)info->port.tty->stopped);
  7572. + if (info->port.tty->hw_stopped)
  7573. + seq_printf(m, " hw_stopped:%i",
  7574. + (int)info->port.tty->hw_stopped);
  7575. + }
  7576. +
  7577. + {
  7578. + unsigned char rstat = info->ioport[REG_STATUS];
  7579. + if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect))
  7580. + seq_printf(m, " xoff_detect:1");
  7581. + }
  7582. +
  7583. +#endif
  7584. +
  7585. + if (info->icount.frame)
  7586. + seq_printf(m, " fe:%lu", (unsigned long)info->icount.frame);
  7587. +
  7588. + if (info->icount.parity)
  7589. + seq_printf(m, " pe:%lu", (unsigned long)info->icount.parity);
  7590. +
  7591. + if (info->icount.brk)
  7592. + seq_printf(m, " brk:%lu", (unsigned long)info->icount.brk);
  7593. +
  7594. + if (info->icount.overrun)
  7595. + seq_printf(m, " oe:%lu", (unsigned long)info->icount.overrun);
  7596. +
  7597. + /*
  7598. + * Last thing is the RS-232 status lines
  7599. + */
  7600. + if (!E100_RTS_GET(info))
  7601. + seq_puts(m, "|RTS");
  7602. + if (!E100_CTS_GET(info))
  7603. + seq_puts(m, "|CTS");
  7604. + if (!E100_DTR_GET(info))
  7605. + seq_puts(m, "|DTR");
  7606. + if (!E100_DSR_GET(info))
  7607. + seq_puts(m, "|DSR");
  7608. + if (!E100_CD_GET(info))
  7609. + seq_puts(m, "|CD");
  7610. + if (!E100_RI_GET(info))
  7611. + seq_puts(m, "|RI");
  7612. + seq_puts(m, "\n");
  7613. +}
  7614. +
  7615. +
  7616. +static int crisv10_proc_show(struct seq_file *m, void *v)
  7617. +{
  7618. + int i;
  7619. +
  7620. + seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version);
  7621. +
  7622. + for (i = 0; i < NR_PORTS; i++) {
  7623. + if (!rs_table[i].enabled)
  7624. + continue;
  7625. + seq_line_info(m, &rs_table[i]);
  7626. + }
  7627. +#ifdef DEBUG_LOG_INCLUDED
  7628. + for (i = 0; i < debug_log_pos; i++) {
  7629. + seq_printf(m, "%-4i %lu.%lu ",
  7630. + i, debug_log[i].time,
  7631. + timer_data_to_ns(debug_log[i].timer_data));
  7632. + seq_printf(m, debug_log[i].string, debug_log[i].value);
  7633. + }
  7634. + seq_printf(m, "debug_log %i/%i\n", i, DEBUG_LOG_SIZE);
  7635. + debug_log_pos = 0;
  7636. +#endif
  7637. + return 0;
  7638. +}
  7639. +
  7640. +static int crisv10_proc_open(struct inode *inode, struct file *file)
  7641. +{
  7642. + return single_open(file, crisv10_proc_show, NULL);
  7643. +}
  7644. +
  7645. +static const struct file_operations crisv10_proc_fops = {
  7646. + .owner = THIS_MODULE,
  7647. + .open = crisv10_proc_open,
  7648. + .read = seq_read,
  7649. + .llseek = seq_lseek,
  7650. + .release = single_release,
  7651. +};
  7652. +#endif
  7653. +
  7654. +
  7655. +/* Finally, routines used to initialize the serial driver. */
  7656. +
  7657. +static void show_serial_version(void)
  7658. +{
  7659. + printk(KERN_INFO
  7660. + "ETRAX 100LX serial-driver %s, "
  7661. + "(c) 2000-2004 Axis Communications AB\r\n",
  7662. + &serial_version[11]); /* "$Revision: x.yy" */
  7663. +}
  7664. +
  7665. +/* rs_init inits the driver at boot (using the module_init chain) */
  7666. +
  7667. +static const struct tty_operations rs_ops = {
  7668. + .open = rs_open,
  7669. + .close = rs_close,
  7670. + .write = rs_write,
  7671. + .flush_chars = rs_flush_chars,
  7672. + .write_room = rs_write_room,
  7673. + .chars_in_buffer = rs_chars_in_buffer,
  7674. + .flush_buffer = rs_flush_buffer,
  7675. + .ioctl = rs_ioctl,
  7676. + .throttle = rs_throttle,
  7677. + .unthrottle = rs_unthrottle,
  7678. + .set_termios = rs_set_termios,
  7679. + .stop = rs_stop,
  7680. + .start = rs_start,
  7681. + .hangup = rs_hangup,
  7682. + .break_ctl = rs_break,
  7683. + .send_xchar = rs_send_xchar,
  7684. + .wait_until_sent = rs_wait_until_sent,
  7685. + .tiocmget = rs_tiocmget,
  7686. + .tiocmset = rs_tiocmset,
  7687. +#ifdef CONFIG_PROC_FS
  7688. + .proc_fops = &crisv10_proc_fops,
  7689. +#endif
  7690. +};
  7691. +
  7692. +static int __init rs_init(void)
  7693. +{
  7694. + int i;
  7695. + struct e100_serial *info;
  7696. + struct tty_driver *driver = alloc_tty_driver(NR_PORTS);
  7697. +
  7698. + if (!driver)
  7699. + return -ENOMEM;
  7700. +
  7701. + show_serial_version();
  7702. +
  7703. + /* Setup the timed flush handler system */
  7704. +
  7705. +#if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
  7706. + setup_timer(&flush_timer, timed_flush_handler, 0);
  7707. + mod_timer(&flush_timer, jiffies + 5);
  7708. +#endif
  7709. +
  7710. +#if defined(CONFIG_ETRAX_RS485)
  7711. +#if defined(CONFIG_ETRAX_RS485_ON_PA)
  7712. + if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit,
  7713. + rs485_pa_bit)) {
  7714. + printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
  7715. + "RS485 pin\n");
  7716. + put_tty_driver(driver);
  7717. + return -EBUSY;
  7718. + }
  7719. +#endif
  7720. +#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
  7721. + if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit,
  7722. + rs485_port_g_bit)) {
  7723. + printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
  7724. + "RS485 pin\n");
  7725. + put_tty_driver(driver);
  7726. + return -EBUSY;
  7727. + }
  7728. +#endif
  7729. +#endif
  7730. +
  7731. + /* Initialize the tty_driver structure */
  7732. +
  7733. + driver->driver_name = "serial";
  7734. + driver->name = "ttyS";
  7735. + driver->major = TTY_MAJOR;
  7736. + driver->minor_start = 64;
  7737. + driver->type = TTY_DRIVER_TYPE_SERIAL;
  7738. + driver->subtype = SERIAL_TYPE_NORMAL;
  7739. + driver->init_termios = tty_std_termios;
  7740. + driver->init_termios.c_cflag =
  7741. + B115200 | CS8 | CREAD | HUPCL | CLOCAL; /* is normally B9600 default... */
  7742. + driver->init_termios.c_ispeed = 115200;
  7743. + driver->init_termios.c_ospeed = 115200;
  7744. + driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
  7745. +
  7746. + tty_set_operations(driver, &rs_ops);
  7747. + serial_driver = driver;
  7748. + if (tty_register_driver(driver))
  7749. + panic("Couldn't register serial driver\n");
  7750. + /* do some initializing for the separate ports */
  7751. +
  7752. + for (i = 0, info = rs_table; i < NR_PORTS; i++,info++) {
  7753. + if (info->enabled) {
  7754. + if (cris_request_io_interface(info->io_if,
  7755. + info->io_if_description)) {
  7756. + printk(KERN_CRIT "ETRAX100LX async serial: "
  7757. + "Could not allocate IO pins for "
  7758. + "%s, port %d\n",
  7759. + info->io_if_description, i);
  7760. + info->enabled = 0;
  7761. + }
  7762. + }
  7763. + info->uses_dma_in = 0;
  7764. + info->uses_dma_out = 0;
  7765. + info->line = i;
  7766. + info->port.tty = NULL;
  7767. + info->type = PORT_ETRAX;
  7768. + info->tr_running = 0;
  7769. + info->forced_eop = 0;
  7770. + info->baud_base = DEF_BAUD_BASE;
  7771. + info->custom_divisor = 0;
  7772. + info->flags = 0;
  7773. + info->close_delay = 5*HZ/10;
  7774. + info->closing_wait = 30*HZ;
  7775. + info->x_char = 0;
  7776. + info->event = 0;
  7777. + info->count = 0;
  7778. + info->blocked_open = 0;
  7779. + info->normal_termios = driver->init_termios;
  7780. + init_waitqueue_head(&info->open_wait);
  7781. + init_waitqueue_head(&info->close_wait);
  7782. + info->xmit.buf = NULL;
  7783. + info->xmit.tail = info->xmit.head = 0;
  7784. + info->first_recv_buffer = info->last_recv_buffer = NULL;
  7785. + info->recv_cnt = info->max_recv_cnt = 0;
  7786. + info->last_tx_active_usec = 0;
  7787. + info->last_tx_active = 0;
  7788. +
  7789. +#if defined(CONFIG_ETRAX_RS485)
  7790. + /* Set sane defaults */
  7791. + info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND);
  7792. + info->rs485.flags |= SER_RS485_RTS_AFTER_SEND;
  7793. + info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
  7794. + info->rs485.delay_rts_before_send = 0;
  7795. + info->rs485.flags &= ~(SER_RS485_ENABLED);
  7796. +#endif
  7797. + INIT_WORK(&info->work, do_softint);
  7798. +
  7799. + if (info->enabled) {
  7800. + printk(KERN_INFO "%s%d at %p is a builtin UART with DMA\n",
  7801. + serial_driver->name, info->line, info->ioport);
  7802. + }
  7803. + }
  7804. +#ifdef CONFIG_ETRAX_FAST_TIMER
  7805. +#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
  7806. + memset(fast_timers, 0, sizeof(fast_timers));
  7807. +#endif
  7808. +#ifdef CONFIG_ETRAX_RS485
  7809. + memset(fast_timers_rs485, 0, sizeof(fast_timers_rs485));
  7810. +#endif
  7811. + fast_timer_init();
  7812. +#endif
  7813. +
  7814. +#ifndef CONFIG_SVINTO_SIM
  7815. +#ifndef CONFIG_ETRAX_KGDB
  7816. + /* Not needed in simulator. May only complicate stuff. */
  7817. + /* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */
  7818. +
  7819. + if (request_irq(SERIAL_IRQ_NBR, ser_interrupt,
  7820. + IRQF_SHARED | IRQF_DISABLED, "serial ", driver))
  7821. + panic("%s: Failed to request irq8", __func__);
  7822. +
  7823. +#endif
  7824. +#endif /* CONFIG_SVINTO_SIM */
  7825. +
  7826. + return 0;
  7827. +}
  7828. +
  7829. +/* this makes sure that rs_init is called during kernel boot */
  7830. +
  7831. +module_init(rs_init);
  7832. diff -Nur linux-2.6.39.orig/drivers/usb/host/hc-cris-dbg.h linux-2.6.39/drivers/usb/host/hc-cris-dbg.h
  7833. --- linux-2.6.39.orig/drivers/usb/host/hc-cris-dbg.h 1970-01-01 01:00:00.000000000 +0100
  7834. +++ linux-2.6.39/drivers/usb/host/hc-cris-dbg.h 2011-07-28 16:16:37.503543830 +0200
  7835. @@ -0,0 +1,146 @@
  7836. +
  7837. +/* macros for debug output */
  7838. +
  7839. +#define warn(fmt, args...) \
  7840. + printk(KERN_INFO "crisv10 warn: ");printk(fmt, ## args)
  7841. +
  7842. +#define hcd_dbg(hcd, fmt, args...) \
  7843. + dev_info(hcd->self.controller, fmt, ## args)
  7844. +#define hcd_err(hcd, fmt, args...) \
  7845. + dev_err(hcd->self.controller, fmt, ## args)
  7846. +#define hcd_info(hcd, fmt, args...) \
  7847. + dev_info(hcd->self.controller, fmt, ## args)
  7848. +#define hcd_warn(hcd, fmt, args...) \
  7849. + dev_warn(hcd->self.controller, fmt, ## args)
  7850. +
  7851. +/*
  7852. +#define devdrv_dbg(fmt, args...) \
  7853. + printk(KERN_INFO "usb_devdrv dbg: ");printk(fmt, ## args)
  7854. +*/
  7855. +#define devdrv_dbg(fmt, args...) {}
  7856. +
  7857. +#define devdrv_err(fmt, args...) \
  7858. + printk(KERN_ERR "usb_devdrv error: ");printk(fmt, ## args)
  7859. +#define devdrv_info(fmt, args...) \
  7860. + printk(KERN_INFO "usb_devdrv: ");printk(fmt, ## args)
  7861. +
  7862. +#define irq_dbg(fmt, args...) \
  7863. + printk(KERN_INFO "crisv10_irq dbg: ");printk(fmt, ## args)
  7864. +#define irq_err(fmt, args...) \
  7865. + printk(KERN_ERR "crisv10_irq error: ");printk(fmt, ## args)
  7866. +#define irq_warn(fmt, args...) \
  7867. + printk(KERN_INFO "crisv10_irq warn: ");printk(fmt, ## args)
  7868. +#define irq_info(fmt, args...) \
  7869. + printk(KERN_INFO "crisv10_hcd: ");printk(fmt, ## args)
  7870. +
  7871. +/*
  7872. +#define rh_dbg(fmt, args...) \
  7873. + printk(KERN_DEBUG "crisv10_rh dbg: ");printk(fmt, ## args)
  7874. +*/
  7875. +#define rh_dbg(fmt, args...) {}
  7876. +
  7877. +#define rh_err(fmt, args...) \
  7878. + printk(KERN_ERR "crisv10_rh error: ");printk(fmt, ## args)
  7879. +#define rh_warn(fmt, args...) \
  7880. + printk(KERN_INFO "crisv10_rh warning: ");printk(fmt, ## args)
  7881. +#define rh_info(fmt, args...) \
  7882. + printk(KERN_INFO "crisv10_rh: ");printk(fmt, ## args)
  7883. +
  7884. +/*
  7885. +#define tc_dbg(fmt, args...) \
  7886. + printk(KERN_INFO "crisv10_tc dbg: ");printk(fmt, ## args)
  7887. +*/
  7888. +#define tc_dbg(fmt, args...) {while(0){}}
  7889. +
  7890. +#define tc_err(fmt, args...) \
  7891. + printk(KERN_ERR "crisv10_tc error: ");printk(fmt, ## args)
  7892. +/*
  7893. +#define tc_warn(fmt, args...) \
  7894. + printk(KERN_INFO "crisv10_tc warning: ");printk(fmt, ## args)
  7895. +*/
  7896. +#define tc_warn(fmt, args...) {while(0){}}
  7897. +
  7898. +#define tc_info(fmt, args...) \
  7899. + printk(KERN_INFO "crisv10_tc: ");printk(fmt, ## args)
  7900. +
  7901. +
  7902. +/* Debug print-outs for various traffic types */
  7903. +
  7904. +#define intr_warn(fmt, args...) \
  7905. + printk(KERN_INFO "crisv10_intr warning: ");printk(fmt, ## args)
  7906. +
  7907. +#define intr_dbg(fmt, args...) \
  7908. + printk(KERN_DEBUG "crisv10_intr dbg: ");printk(fmt, ## args)
  7909. +/*
  7910. +#define intr_dbg(fmt, args...) {while(0){}}
  7911. +*/
  7912. +
  7913. +
  7914. +#define isoc_err(fmt, args...) \
  7915. + printk(KERN_ERR "crisv10_isoc error: ");printk(fmt, ## args)
  7916. +/*
  7917. +#define isoc_warn(fmt, args...) \
  7918. + printk(KERN_INFO "crisv10_isoc warning: ");printk(fmt, ## args)
  7919. +*/
  7920. +#define isoc_warn(fmt, args...) {while(0){}}
  7921. +
  7922. +/*
  7923. +#define isoc_dbg(fmt, args...) \
  7924. + printk(KERN_INFO "crisv10_isoc dbg: ");printk(fmt, ## args)
  7925. +*/
  7926. +#define isoc_dbg(fmt, args...) {while(0){}}
  7927. +
  7928. +/*
  7929. +#define timer_warn(fmt, args...) \
  7930. + printk(KERN_INFO "crisv10_timer warning: ");printk(fmt, ## args)
  7931. +*/
  7932. +#define timer_warn(fmt, args...) {while(0){}}
  7933. +
  7934. +/*
  7935. +#define timer_dbg(fmt, args...) \
  7936. + printk(KERN_INFO "crisv10_timer dbg: ");printk(fmt, ## args)
  7937. +*/
  7938. +#define timer_dbg(fmt, args...) {while(0){}}
  7939. +
  7940. +
  7941. +/* Debug printouts for events related to late finishing of URBs */
  7942. +
  7943. +#define late_dbg(fmt, args...) \
  7944. + printk(KERN_INFO "crisv10_late dbg: ");printk(fmt, ## args)
  7945. +/*
  7946. +#define late_dbg(fmt, args...) {while(0){}}
  7947. +*/
  7948. +
  7949. +#define late_warn(fmt, args...) \
  7950. + printk(KERN_INFO "crisv10_late warning: ");printk(fmt, ## args)
  7951. +/*
  7952. +#define errno_dbg(fmt, args...) \
  7953. + printk(KERN_INFO "crisv10_errno dbg: ");printk(fmt, ## args)
  7954. +*/
  7955. +#define errno_dbg(fmt, args...) {while(0){}}
  7956. +
  7957. +
  7958. +#define dma_dbg(fmt, args...) \
  7959. + printk(KERN_INFO "crisv10_dma dbg: ");printk(fmt, ## args)
  7960. +#define dma_err(fmt, args...) \
  7961. + printk(KERN_ERR "crisv10_dma error: ");printk(fmt, ## args)
  7962. +#define dma_warn(fmt, args...) \
  7963. + printk(KERN_INFO "crisv10_dma warning: ");printk(fmt, ## args)
  7964. +#define dma_info(fmt, args...) \
  7965. + printk(KERN_INFO "crisv10_dma: ");printk(fmt, ## args)
  7966. +
  7967. +
  7968. +
  7969. +#define str_dir(pipe) \
  7970. + (usb_pipeout(pipe) ? "out" : "in")
  7971. +#define str_type(pipe) \
  7972. + ({ \
  7973. + char *s = "?"; \
  7974. + switch (usb_pipetype(pipe)) { \
  7975. + case PIPE_ISOCHRONOUS: s = "iso"; break; \
  7976. + case PIPE_INTERRUPT: s = "intr"; break; \
  7977. + case PIPE_CONTROL: s = "ctrl"; break; \
  7978. + case PIPE_BULK: s = "bulk"; break; \
  7979. + }; \
  7980. + s; \
  7981. + })
  7982. diff -Nur linux-2.6.39.orig/drivers/usb/host/hc-crisv10.c linux-2.6.39/drivers/usb/host/hc-crisv10.c
  7983. --- linux-2.6.39.orig/drivers/usb/host/hc-crisv10.c 1970-01-01 01:00:00.000000000 +0100
  7984. +++ linux-2.6.39/drivers/usb/host/hc-crisv10.c 2011-07-28 16:16:37.633441816 +0200
  7985. @@ -0,0 +1,4801 @@
  7986. +/*
  7987. + *
  7988. + * ETRAX 100LX USB Host Controller Driver
  7989. + *
  7990. + * Copyright (C) 2005, 2006 Axis Communications AB
  7991. + *
  7992. + * Author: Konrad Eriksson <konrad.eriksson@axis.se>
  7993. + *
  7994. + */
  7995. +
  7996. +#include <linux/module.h>
  7997. +#include <linux/kernel.h>
  7998. +#include <linux/init.h>
  7999. +#include <linux/moduleparam.h>
  8000. +#include <linux/slab.h>
  8001. +#include <linux/spinlock.h>
  8002. +#include <linux/usb.h>
  8003. +#include <linux/platform_device.h>
  8004. +#include <linux/usb/hcd.h>
  8005. +
  8006. +#include <asm/io.h>
  8007. +#include <asm/irq.h>
  8008. +#include <arch/dma.h>
  8009. +#include <arch/io_interface_mux.h>
  8010. +
  8011. +#include "hc-crisv10.h"
  8012. +#include "hc-cris-dbg.h"
  8013. +
  8014. +
  8015. +/***************************************************************************/
  8016. +/***************************************************************************/
  8017. +/* Host Controller settings */
  8018. +/***************************************************************************/
  8019. +/***************************************************************************/
  8020. +
  8021. +#define VERSION "1.00 hinko.4"
  8022. +#define COPYRIGHT "(c) 2005, 2006 Axis Communications AB"
  8023. +#define DESCRIPTION "ETRAX 100LX USB Host Controller"
  8024. +
  8025. +#define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
  8026. +#define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
  8027. +#define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
  8028. +
  8029. +/* Number of physical ports in Etrax 100LX */
  8030. +#define USB_ROOT_HUB_PORTS 2
  8031. +
  8032. +const char hc_name[] = "hc-crisv10";
  8033. +const char product_desc[] = DESCRIPTION;
  8034. +
  8035. +/* The number of epids is, among other things, used for pre-allocating
  8036. + ctrl, bulk and isoc EP descriptors (one for each epid).
  8037. + Assumed to be > 1 when initiating the DMA lists. */
  8038. +#define NBR_OF_EPIDS 32
  8039. +
  8040. +/* Support interrupt traffic intervals up to 128 ms. */
  8041. +#define MAX_INTR_INTERVAL 128
  8042. +
  8043. +/* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
  8044. + table must be "invalid". By this we mean that we shouldn't care about epid
  8045. + attentions for this epid, or at least handle them differently from epid
  8046. + attentions for "valid" epids. This define determines which one to use
  8047. + (don't change it). */
  8048. +#define INVALID_EPID 31
  8049. +/* A special epid for the bulk dummys. */
  8050. +#define DUMMY_EPID 30
  8051. +
  8052. +/* Module settings */
  8053. +
  8054. +MODULE_DESCRIPTION(DESCRIPTION);
  8055. +MODULE_LICENSE("GPL");
  8056. +MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
  8057. +
  8058. +
  8059. +/* Module parameters */
  8060. +
  8061. +/* 0 = No ports enabled
  8062. + 1 = Only port 1 enabled (on board ethernet on devboard)
  8063. + 2 = Only port 2 enabled (external connector on devboard)
  8064. + 3 = Both ports enabled
  8065. +*/
  8066. +static unsigned int ports = 3;
  8067. +module_param(ports, uint, S_IRUGO);
  8068. +MODULE_PARM_DESC(ports, "Bitmask indicating USB ports to use");
  8069. +
  8070. +
  8071. +/***************************************************************************/
  8072. +/***************************************************************************/
  8073. +/* Shared global variables for this module */
  8074. +/***************************************************************************/
  8075. +/***************************************************************************/
  8076. +
  8077. +/* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
  8078. +static volatile struct USB_EP_Desc TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
  8079. +
  8080. +static volatile struct USB_EP_Desc TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
  8081. +
  8082. +/* EP descriptor lists for period transfers. Must be 32-bit aligned. */
  8083. +static volatile struct USB_EP_Desc TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
  8084. +static volatile struct USB_SB_Desc TxIntrSB_zout __attribute__ ((aligned (4)));
  8085. +
  8086. +static volatile struct USB_EP_Desc TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
  8087. +static volatile struct USB_SB_Desc TxIsocSB_zout __attribute__ ((aligned (4)));
  8088. +
  8089. +//static volatile struct USB_SB_Desc TxIsocSBList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
  8090. +
  8091. +/* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
  8092. + causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
  8093. + gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
  8094. + EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
  8095. + in each frame. */
  8096. +static volatile struct USB_EP_Desc TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
  8097. +
  8098. +/* List of URB pointers, where each points to the active URB for a epid.
  8099. + For Bulk, Ctrl and Intr this means which URB that currently is added to
  8100. + DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
  8101. + URB has completed is the queue examined and the first URB in queue is
  8102. + removed and moved to the activeUrbList while its state change to STARTED and
  8103. + its transfer(s) gets added to DMA list (exception Isoc where URBs enter
  8104. + state STARTED directly and added transfers added to DMA lists). */
  8105. +static struct urb *activeUrbList[NBR_OF_EPIDS];
  8106. +
  8107. +/* Additional software state info for each epid */
  8108. +static struct etrax_epid epid_state[NBR_OF_EPIDS];
  8109. +
  8110. +/* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
  8111. + even if there is new data waiting to be processed */
  8112. +static struct timer_list bulk_start_timer = TIMER_INITIALIZER(NULL, 0, 0);
  8113. +static struct timer_list bulk_eot_timer = TIMER_INITIALIZER(NULL, 0, 0);
  8114. +
  8115. +/* We want the start timer to expire before the eot timer, because the former
  8116. + might start traffic, thus making it unnecessary for the latter to time
  8117. + out. */
  8118. +#define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
  8119. +#define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
  8120. +
  8121. +/* Delay before a URB completion happen when it's scheduled to be delayed */
  8122. +#define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
  8123. +
  8124. +/* Simplifying macros for checking software state info of a epid */
  8125. +/* ----------------------------------------------------------------------- */
  8126. +#define epid_inuse(epid) epid_state[epid].inuse
  8127. +#define epid_out_traffic(epid) epid_state[epid].out_traffic
  8128. +#define epid_isoc(epid) (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
  8129. +#define epid_intr(epid) (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
  8130. +
  8131. +
  8132. +/***************************************************************************/
  8133. +/***************************************************************************/
  8134. +/* DEBUG FUNCTIONS */
  8135. +/***************************************************************************/
  8136. +/***************************************************************************/
  8137. +/* Note that these functions are always available in their "__" variants,
  8138. + for use in error situations. The "__" missing variants are controlled by
  8139. + the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
  8140. +static void __dump_urb(struct urb* purb)
  8141. +{
  8142. + struct crisv10_urb_priv *urb_priv = purb->hcpriv;
  8143. + int urb_num = -1;
  8144. + if(urb_priv) {
  8145. + urb_num = urb_priv->urb_num;
  8146. + }
  8147. + printk("\nURB:0x%x[%d]\n", (unsigned int)purb, urb_num);
  8148. + printk("dev :0x%08lx\n", (unsigned long)purb->dev);
  8149. + printk("pipe :0x%08x\n", purb->pipe);
  8150. + printk("status :%d\n", purb->status);
  8151. + printk("transfer_flags :0x%08x\n", purb->transfer_flags);
  8152. + printk("transfer_buffer :0x%08lx\n", (unsigned long)purb->transfer_buffer);
  8153. + printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
  8154. + printk("actual_length :%d\n", purb->actual_length);
  8155. + printk("setup_packet :0x%08lx\n", (unsigned long)purb->setup_packet);
  8156. + printk("start_frame :%d\n", purb->start_frame);
  8157. + printk("number_of_packets :%d\n", purb->number_of_packets);
  8158. + printk("interval :%d\n", purb->interval);
  8159. + printk("error_count :%d\n", purb->error_count);
  8160. + printk("context :0x%08lx\n", (unsigned long)purb->context);
  8161. + printk("complete :0x%08lx\n\n", (unsigned long)purb->complete);
  8162. +}
  8163. +
  8164. +static void __dump_in_desc(volatile struct USB_IN_Desc *in)
  8165. +{
  8166. + printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
  8167. + printk(" sw_len : 0x%04x (%d)\n", in->sw_len, in->sw_len);
  8168. + printk(" command : 0x%04x\n", in->command);
  8169. + printk(" next : 0x%08lx\n", in->next);
  8170. + printk(" buf : 0x%08lx\n", in->buf);
  8171. + printk(" hw_len : 0x%04x (%d)\n", in->hw_len, in->hw_len);
  8172. + printk(" status : 0x%04x\n\n", in->status);
  8173. +}
  8174. +
  8175. +static void __dump_sb_desc(volatile struct USB_SB_Desc *sb)
  8176. +{
  8177. + char tt = (sb->command & 0x30) >> 4;
  8178. + char *tt_string;
  8179. +
  8180. + switch (tt) {
  8181. + case 0:
  8182. + tt_string = "zout";
  8183. + break;
  8184. + case 1:
  8185. + tt_string = "in";
  8186. + break;
  8187. + case 2:
  8188. + tt_string = "out";
  8189. + break;
  8190. + case 3:
  8191. + tt_string = "setup";
  8192. + break;
  8193. + default:
  8194. + tt_string = "unknown (weird)";
  8195. + }
  8196. +
  8197. + printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb);
  8198. + printk(" command:0x%04x (", sb->command);
  8199. + printk("rem:%d ", (sb->command & 0x3f00) >> 8);
  8200. + printk("full:%d ", (sb->command & 0x40) >> 6);
  8201. + printk("tt:%d(%s) ", tt, tt_string);
  8202. + printk("intr:%d ", (sb->command & 0x8) >> 3);
  8203. + printk("eot:%d ", (sb->command & 0x2) >> 1);
  8204. + printk("eol:%d)", sb->command & 0x1);
  8205. + printk(" sw_len:0x%04x(%d)", sb->sw_len, sb->sw_len);
  8206. + printk(" next:0x%08lx", sb->next);
  8207. + printk(" buf:0x%08lx\n", sb->buf);
  8208. +}
  8209. +
  8210. +
  8211. +static void __dump_ep_desc(volatile struct USB_EP_Desc *ep)
  8212. +{
  8213. + printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep);
  8214. + printk(" command:0x%04x (", ep->command);
  8215. + printk("ep_id:%d ", (ep->command & 0x1f00) >> 8);
  8216. + printk("enable:%d ", (ep->command & 0x10) >> 4);
  8217. + printk("intr:%d ", (ep->command & 0x8) >> 3);
  8218. + printk("eof:%d ", (ep->command & 0x2) >> 1);
  8219. + printk("eol:%d)", ep->command & 0x1);
  8220. + printk(" hw_len:0x%04x(%d)", ep->hw_len, ep->hw_len);
  8221. + printk(" next:0x%08lx", ep->next);
  8222. + printk(" sub:0x%08lx\n", ep->sub);
  8223. +}
  8224. +
  8225. +static inline void __dump_ep_list(int pipe_type)
  8226. +{
  8227. + volatile struct USB_EP_Desc *ep;
  8228. + volatile struct USB_EP_Desc *first_ep;
  8229. + volatile struct USB_SB_Desc *sb;
  8230. +
  8231. + switch (pipe_type)
  8232. + {
  8233. + case PIPE_BULK:
  8234. + first_ep = &TxBulkEPList[0];
  8235. + break;
  8236. + case PIPE_CONTROL:
  8237. + first_ep = &TxCtrlEPList[0];
  8238. + break;
  8239. + case PIPE_INTERRUPT:
  8240. + first_ep = &TxIntrEPList[0];
  8241. + break;
  8242. + case PIPE_ISOCHRONOUS:
  8243. + first_ep = &TxIsocEPList[0];
  8244. + break;
  8245. + default:
  8246. + warn("Cannot dump unknown traffic type");
  8247. + return;
  8248. + }
  8249. + ep = first_ep;
  8250. +
  8251. + printk("\n\nDumping EP list...\n\n");
  8252. +
  8253. + do {
  8254. + __dump_ep_desc(ep);
  8255. + /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
  8256. + sb = ep->sub ? phys_to_virt(ep->sub) : 0;
  8257. + while (sb) {
  8258. + __dump_sb_desc(sb);
  8259. + sb = sb->next ? phys_to_virt(sb->next) : 0;
  8260. + }
  8261. + ep = (volatile struct USB_EP_Desc *)(phys_to_virt(ep->next));
  8262. +
  8263. + } while (ep != first_ep);
  8264. +}
  8265. +
  8266. +static inline void __dump_ept_data(int epid)
  8267. +{
  8268. + unsigned long flags;
  8269. + __u32 r_usb_ept_data;
  8270. +
  8271. + if (epid < 0 || epid > 31) {
  8272. + printk("Cannot dump ept data for invalid epid %d\n", epid);
  8273. + return;
  8274. + }
  8275. +
  8276. + local_irq_save(flags);
  8277. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
  8278. + nop();
  8279. + r_usb_ept_data = *R_USB_EPT_DATA;
  8280. + local_irq_restore(flags);
  8281. +
  8282. + printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
  8283. + if (r_usb_ept_data == 0) {
  8284. + /* No need for more detailed printing. */
  8285. + return;
  8286. + }
  8287. + printk(" valid : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
  8288. + printk(" hold : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
  8289. + printk(" error_count_in : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
  8290. + printk(" t_in : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
  8291. + printk(" low_speed : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
  8292. + printk(" port : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
  8293. + printk(" error_code : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
  8294. + printk(" t_out : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
  8295. + printk(" error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
  8296. + printk(" max_len : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
  8297. + printk(" ep : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
  8298. + printk(" dev : %d\n", (r_usb_ept_data & 0x0000003f));
  8299. +}
  8300. +
  8301. +static inline void __dump_ept_data_iso(int epid)
  8302. +{
  8303. + unsigned long flags;
  8304. + __u32 ept_data;
  8305. +
  8306. + if (epid < 0 || epid > 31) {
  8307. + printk("Cannot dump ept data for invalid epid %d\n", epid);
  8308. + return;
  8309. + }
  8310. +
  8311. + local_irq_save(flags);
  8312. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
  8313. + nop();
  8314. + ept_data = *R_USB_EPT_DATA_ISO;
  8315. + local_irq_restore(flags);
  8316. +
  8317. + printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data, epid);
  8318. + if (ept_data == 0) {
  8319. + /* No need for more detailed printing. */
  8320. + return;
  8321. + }
  8322. + printk(" valid : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, valid,
  8323. + ept_data));
  8324. + printk(" port : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, port,
  8325. + ept_data));
  8326. + printk(" error_code : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code,
  8327. + ept_data));
  8328. + printk(" max_len : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len,
  8329. + ept_data));
  8330. + printk(" ep : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, ep,
  8331. + ept_data));
  8332. + printk(" dev : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, dev,
  8333. + ept_data));
  8334. +}
  8335. +
  8336. +static inline void __dump_ept_data_list(void)
  8337. +{
  8338. + int i;
  8339. +
  8340. + printk("Dumping the whole R_USB_EPT_DATA list\n");
  8341. +
  8342. + for (i = 0; i < 32; i++) {
  8343. + __dump_ept_data(i);
  8344. + }
  8345. +}
  8346. +
  8347. +static void debug_epid(int epid) {
  8348. + int i;
  8349. +
  8350. + if(epid_isoc(epid)) {
  8351. + __dump_ept_data_iso(epid);
  8352. + } else {
  8353. + __dump_ept_data(epid);
  8354. + }
  8355. +
  8356. + printk("Bulk:\n");
  8357. + for(i = 0; i < 32; i++) {
  8358. + if(IO_EXTRACT(USB_EP_command, epid, TxBulkEPList[i].command) ==
  8359. + epid) {
  8360. + printk("%d: ", i); __dump_ep_desc(&(TxBulkEPList[i]));
  8361. + }
  8362. + }
  8363. +
  8364. + printk("Ctrl:\n");
  8365. + for(i = 0; i < 32; i++) {
  8366. + if(IO_EXTRACT(USB_EP_command, epid, TxCtrlEPList[i].command) ==
  8367. + epid) {
  8368. + printk("%d: ", i); __dump_ep_desc(&(TxCtrlEPList[i]));
  8369. + }
  8370. + }
  8371. +
  8372. + printk("Intr:\n");
  8373. + for(i = 0; i < MAX_INTR_INTERVAL; i++) {
  8374. + if(IO_EXTRACT(USB_EP_command, epid, TxIntrEPList[i].command) ==
  8375. + epid) {
  8376. + printk("%d: ", i); __dump_ep_desc(&(TxIntrEPList[i]));
  8377. + }
  8378. + }
  8379. +
  8380. + printk("Isoc:\n");
  8381. + for(i = 0; i < 32; i++) {
  8382. + if(IO_EXTRACT(USB_EP_command, epid, TxIsocEPList[i].command) ==
  8383. + epid) {
  8384. + printk("%d: ", i); __dump_ep_desc(&(TxIsocEPList[i]));
  8385. + }
  8386. + }
  8387. +
  8388. + __dump_ept_data_list();
  8389. + __dump_ep_list(PIPE_INTERRUPT);
  8390. + printk("\n\n");
  8391. +}
  8392. +
  8393. +
  8394. +
  8395. +char* hcd_status_to_str(__u8 bUsbStatus) {
  8396. + static char hcd_status_str[128];
  8397. + hcd_status_str[0] = '\0';
  8398. + if(bUsbStatus & IO_STATE(R_USB_STATUS, ourun, yes)) {
  8399. + strcat(hcd_status_str, "ourun ");
  8400. + }
  8401. + if(bUsbStatus & IO_STATE(R_USB_STATUS, perror, yes)) {
  8402. + strcat(hcd_status_str, "perror ");
  8403. + }
  8404. + if(bUsbStatus & IO_STATE(R_USB_STATUS, device_mode, yes)) {
  8405. + strcat(hcd_status_str, "device_mode ");
  8406. + }
  8407. + if(bUsbStatus & IO_STATE(R_USB_STATUS, host_mode, yes)) {
  8408. + strcat(hcd_status_str, "host_mode ");
  8409. + }
  8410. + if(bUsbStatus & IO_STATE(R_USB_STATUS, started, yes)) {
  8411. + strcat(hcd_status_str, "started ");
  8412. + }
  8413. + if(bUsbStatus & IO_STATE(R_USB_STATUS, running, yes)) {
  8414. + strcat(hcd_status_str, "running ");
  8415. + }
  8416. + return hcd_status_str;
  8417. +}
  8418. +
  8419. +
  8420. +char* sblist_to_str(struct USB_SB_Desc* sb_desc) {
  8421. + static char sblist_to_str_buff[128];
  8422. + char tmp[32], tmp2[32];
  8423. + sblist_to_str_buff[0] = '\0';
  8424. + while(sb_desc != NULL) {
  8425. + switch(IO_EXTRACT(USB_SB_command, tt, sb_desc->command)) {
  8426. + case 0: sprintf(tmp, "zout"); break;
  8427. + case 1: sprintf(tmp, "in"); break;
  8428. + case 2: sprintf(tmp, "out"); break;
  8429. + case 3: sprintf(tmp, "setup"); break;
  8430. + }
  8431. + sprintf(tmp2, "(%s %d)", tmp, sb_desc->sw_len);
  8432. + strcat(sblist_to_str_buff, tmp2);
  8433. + if(sb_desc->next != 0) {
  8434. + sb_desc = phys_to_virt(sb_desc->next);
  8435. + } else {
  8436. + sb_desc = NULL;
  8437. + }
  8438. + }
  8439. + return sblist_to_str_buff;
  8440. +}
  8441. +
  8442. +char* port_status_to_str(__u16 wPortStatus) {
  8443. + static char port_status_str[128];
  8444. + port_status_str[0] = '\0';
  8445. + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) {
  8446. + strcat(port_status_str, "connected ");
  8447. + }
  8448. + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) {
  8449. + strcat(port_status_str, "enabled ");
  8450. + }
  8451. + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, suspended, yes)) {
  8452. + strcat(port_status_str, "suspended ");
  8453. + }
  8454. + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes)) {
  8455. + strcat(port_status_str, "reset ");
  8456. + }
  8457. + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, speed, full)) {
  8458. + strcat(port_status_str, "full-speed ");
  8459. + } else {
  8460. + strcat(port_status_str, "low-speed ");
  8461. + }
  8462. + return port_status_str;
  8463. +}
  8464. +
  8465. +
  8466. +char* endpoint_to_str(struct usb_endpoint_descriptor *ed) {
  8467. + static char endpoint_to_str_buff[128];
  8468. + char tmp[32];
  8469. + int epnum = ed->bEndpointAddress & 0x0F;
  8470. + int dir = ed->bEndpointAddress & 0x80;
  8471. + int type = ed->bmAttributes & 0x03;
  8472. + endpoint_to_str_buff[0] = '\0';
  8473. + sprintf(endpoint_to_str_buff, "ep:%d ", epnum);
  8474. + switch(type) {
  8475. + case 0:
  8476. + sprintf(tmp, " ctrl");
  8477. + break;
  8478. + case 1:
  8479. + sprintf(tmp, " isoc");
  8480. + break;
  8481. + case 2:
  8482. + sprintf(tmp, " bulk");
  8483. + break;
  8484. + case 3:
  8485. + sprintf(tmp, " intr");
  8486. + break;
  8487. + }
  8488. + strcat(endpoint_to_str_buff, tmp);
  8489. + if(dir) {
  8490. + sprintf(tmp, " in");
  8491. + } else {
  8492. + sprintf(tmp, " out");
  8493. + }
  8494. + strcat(endpoint_to_str_buff, tmp);
  8495. +
  8496. + return endpoint_to_str_buff;
  8497. +}
  8498. +
  8499. +/* Debug helper functions for Transfer Controller */
  8500. +char* pipe_to_str(unsigned int pipe) {
  8501. + static char pipe_to_str_buff[128];
  8502. + char tmp[64];
  8503. + sprintf(pipe_to_str_buff, "dir:%s", str_dir(pipe));
  8504. + sprintf(tmp, " type:%s", str_type(pipe));
  8505. + strcat(pipe_to_str_buff, tmp);
  8506. +
  8507. + sprintf(tmp, " dev:%d", usb_pipedevice(pipe));
  8508. + strcat(pipe_to_str_buff, tmp);
  8509. + sprintf(tmp, " ep:%d", usb_pipeendpoint(pipe));
  8510. + strcat(pipe_to_str_buff, tmp);
  8511. + return pipe_to_str_buff;
  8512. +}
  8513. +
  8514. +
  8515. +#define USB_DEBUG_DESC 1
  8516. +
  8517. +#ifdef USB_DEBUG_DESC
  8518. +#define dump_in_desc(x) __dump_in_desc(x)
  8519. +#define dump_sb_desc(...) __dump_sb_desc(...)
  8520. +#define dump_ep_desc(x) __dump_ep_desc(x)
  8521. +#define dump_ept_data(x) __dump_ept_data(x)
  8522. +#else
  8523. +#define dump_in_desc(...) do {} while (0)
  8524. +#define dump_sb_desc(...) do {} while (0)
  8525. +#define dump_ep_desc(...) do {} while (0)
  8526. +#endif
  8527. +
  8528. +
  8529. +/* Uncomment this to enable massive function call trace
  8530. + #define USB_DEBUG_TRACE */
  8531. +//#define USB_DEBUG_TRACE 1
  8532. +
  8533. +#ifdef USB_DEBUG_TRACE
  8534. +#define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
  8535. +#define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
  8536. +#else
  8537. +#define DBFENTER do {} while (0)
  8538. +#define DBFEXIT do {} while (0)
  8539. +#endif
  8540. +
  8541. +#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
  8542. +{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
  8543. +
  8544. +/* Most helpful debugging aid */
  8545. +#define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
  8546. +
  8547. +
  8548. +/***************************************************************************/
  8549. +/***************************************************************************/
  8550. +/* Forward declarations */
  8551. +/***************************************************************************/
  8552. +/***************************************************************************/
  8553. +void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg);
  8554. +void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg);
  8555. +void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg);
  8556. +void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg);
  8557. +
  8558. +void rh_port_status_change(__u16[]);
  8559. +int rh_clear_port_feature(__u8, __u16);
  8560. +int rh_set_port_feature(__u8, __u16);
  8561. +static void rh_disable_port(unsigned int port);
  8562. +
  8563. +static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
  8564. + int timer);
  8565. +
  8566. +//static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
  8567. +// int mem_flags);
  8568. +static int tc_setup_epid(struct urb *urb, int mem_flags);
  8569. +static void tc_free_epid(struct usb_host_endpoint *ep);
  8570. +static int tc_allocate_epid(void);
  8571. +static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status);
  8572. +static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
  8573. + int status);
  8574. +
  8575. +static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
  8576. + int mem_flags);
  8577. +static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb);
  8578. +
  8579. +static inline struct urb *urb_list_first(int epid);
  8580. +static inline void urb_list_add(struct urb *urb, int epid,
  8581. + int mem_flags);
  8582. +static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid);
  8583. +static inline void urb_list_del(struct urb *urb, int epid);
  8584. +static inline void urb_list_move_last(struct urb *urb, int epid);
  8585. +static inline struct urb *urb_list_next(struct urb *urb, int epid);
  8586. +
  8587. +int create_sb_for_urb(struct urb *urb, int mem_flags);
  8588. +int init_intr_urb(struct urb *urb, int mem_flags);
  8589. +
  8590. +static inline void etrax_epid_set(__u8 index, __u32 data);
  8591. +static inline void etrax_epid_clear_error(__u8 index);
  8592. +static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
  8593. + __u8 toggle);
  8594. +static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout);
  8595. +static inline __u32 etrax_epid_get(__u8 index);
  8596. +
  8597. +/* We're accessing the same register position in Etrax so
  8598. + when we do full access the internal difference doesn't matter */
  8599. +#define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
  8600. +#define etrax_epid_iso_get(index) etrax_epid_get(index)
  8601. +
  8602. +
  8603. +//static void tc_dma_process_isoc_urb(struct urb *urb);
  8604. +static void tc_dma_process_queue(int epid);
  8605. +static void tc_dma_unlink_intr_urb(struct urb *urb);
  8606. +static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc);
  8607. +static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc);
  8608. +
  8609. +static void tc_bulk_start_timer_func(unsigned long dummy);
  8610. +static void tc_bulk_eot_timer_func(unsigned long dummy);
  8611. +
  8612. +
  8613. +/*************************************************************/
  8614. +/*************************************************************/
  8615. +/* Host Controler Driver block */
  8616. +/*************************************************************/
  8617. +/*************************************************************/
  8618. +
  8619. +/* HCD operations */
  8620. +static irqreturn_t crisv10_hcd_top_irq(int irq, void*);
  8621. +static int crisv10_hcd_reset(struct usb_hcd *);
  8622. +static int crisv10_hcd_start(struct usb_hcd *);
  8623. +static void crisv10_hcd_stop(struct usb_hcd *);
  8624. +#ifdef CONFIG_PM
  8625. +static int crisv10_hcd_suspend(struct device *, u32, u32);
  8626. +static int crisv10_hcd_resume(struct device *, u32);
  8627. +#endif /* CONFIG_PM */
  8628. +static int crisv10_hcd_get_frame(struct usb_hcd *);
  8629. +
  8630. +//static int tc_urb_enqueue(struct usb_hcd *, struct usb_host_endpoint *ep, struct urb *, gfp_t mem_flags);
  8631. +static int tc_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
  8632. +//static int tc_urb_dequeue(struct usb_hcd *, struct urb *);
  8633. +static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
  8634. +static void tc_endpoint_disable(struct usb_hcd *, struct usb_host_endpoint *ep);
  8635. +
  8636. +static int rh_status_data_request(struct usb_hcd *, char *);
  8637. +static int rh_control_request(struct usb_hcd *, u16, u16, u16, char*, u16);
  8638. +
  8639. +#ifdef CONFIG_PM
  8640. +static int crisv10_hcd_hub_suspend(struct usb_hcd *);
  8641. +static int crisv10_hcd_hub_resume(struct usb_hcd *);
  8642. +#endif /* CONFIG_PM */
  8643. +#ifdef CONFIG_USB_OTG
  8644. +static int crisv10_hcd_start_port_reset(struct usb_hcd *, unsigned);
  8645. +#endif /* CONFIG_USB_OTG */
  8646. +
  8647. +/* host controller driver interface */
  8648. +static const struct hc_driver crisv10_hc_driver =
  8649. + {
  8650. + .description = hc_name,
  8651. + .product_desc = product_desc,
  8652. + .hcd_priv_size = sizeof(struct crisv10_hcd),
  8653. +
  8654. + /* Attaching IRQ handler manualy in probe() */
  8655. + /* .irq = crisv10_hcd_irq, */
  8656. +
  8657. + .flags = HCD_USB11,
  8658. +
  8659. + /* called to init HCD and root hub */
  8660. + .reset = crisv10_hcd_reset,
  8661. + .start = crisv10_hcd_start,
  8662. +
  8663. + /* cleanly make HCD stop writing memory and doing I/O */
  8664. + .stop = crisv10_hcd_stop,
  8665. +
  8666. + /* return current frame number */
  8667. + .get_frame_number = crisv10_hcd_get_frame,
  8668. +
  8669. +
  8670. + /* Manage i/o requests via the Transfer Controller */
  8671. + .urb_enqueue = tc_urb_enqueue,
  8672. + .urb_dequeue = tc_urb_dequeue,
  8673. +
  8674. + /* hw synch, freeing endpoint resources that urb_dequeue can't */
  8675. + .endpoint_disable = tc_endpoint_disable,
  8676. +
  8677. +
  8678. + /* Root Hub support */
  8679. + .hub_status_data = rh_status_data_request,
  8680. + .hub_control = rh_control_request,
  8681. +#ifdef CONFIG_PM
  8682. + .hub_suspend = rh_suspend_request,
  8683. + .hub_resume = rh_resume_request,
  8684. +#endif /* CONFIG_PM */
  8685. +#ifdef CONFIG_USB_OTG
  8686. + .start_port_reset = crisv10_hcd_start_port_reset,
  8687. +#endif /* CONFIG_USB_OTG */
  8688. + };
  8689. +
  8690. +
  8691. +/*
  8692. + * conversion between pointers to a hcd and the corresponding
  8693. + * crisv10_hcd
  8694. + */
  8695. +
  8696. +static inline struct crisv10_hcd *hcd_to_crisv10_hcd(struct usb_hcd *hcd)
  8697. +{
  8698. + return (struct crisv10_hcd *) hcd->hcd_priv;
  8699. +}
  8700. +
  8701. +static inline struct usb_hcd *crisv10_hcd_to_hcd(struct crisv10_hcd *hcd)
  8702. +{
  8703. + return container_of((void *) hcd, struct usb_hcd, hcd_priv);
  8704. +}
  8705. +
  8706. +/* check if specified port is in use */
  8707. +static inline int port_in_use(unsigned int port)
  8708. +{
  8709. + return ports & (1 << port);
  8710. +}
  8711. +
  8712. +/* number of ports in use */
  8713. +static inline unsigned int num_ports(void)
  8714. +{
  8715. + unsigned int i, num = 0;
  8716. + for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
  8717. + if (port_in_use(i))
  8718. + num++;
  8719. + return num;
  8720. +}
  8721. +
  8722. +/* map hub port number to the port number used internally by the HC */
  8723. +static inline unsigned int map_port(unsigned int port)
  8724. +{
  8725. + unsigned int i, num = 0;
  8726. + for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
  8727. + if (port_in_use(i))
  8728. + if (++num == port)
  8729. + return i;
  8730. + return -1;
  8731. +}
  8732. +
  8733. +/* size of descriptors in slab cache */
  8734. +#ifndef MAX
  8735. +#define MAX(x, y) ((x) > (y) ? (x) : (y))
  8736. +#endif
  8737. +
  8738. +
  8739. +/******************************************************************/
  8740. +/* Hardware Interrupt functions */
  8741. +/******************************************************************/
  8742. +
  8743. +/* Fast interrupt handler for HC */
  8744. +static irqreturn_t crisv10_hcd_top_irq(int irq, void *vcd)
  8745. +{
  8746. + struct usb_hcd *hcd = vcd;
  8747. + struct crisv10_irq_reg reg;
  8748. + __u32 irq_mask;
  8749. + unsigned long flags;
  8750. +
  8751. + DBFENTER;
  8752. +
  8753. + ASSERT(hcd != NULL);
  8754. + reg.hcd = hcd;
  8755. +
  8756. + /* Turn of other interrupts while handling these sensitive cases */
  8757. + local_irq_save(flags);
  8758. +
  8759. + /* Read out which interrupts that are flaged */
  8760. + irq_mask = *R_USB_IRQ_MASK_READ;
  8761. + reg.r_usb_irq_mask_read = irq_mask;
  8762. +
  8763. + /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
  8764. + R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
  8765. + clears the ourun and perror fields of R_USB_STATUS. */
  8766. + reg.r_usb_status = *R_USB_STATUS;
  8767. +
  8768. + /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
  8769. + interrupts. */
  8770. + reg.r_usb_epid_attn = *R_USB_EPID_ATTN;
  8771. +
  8772. + /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
  8773. + port_status interrupt. */
  8774. + reg.r_usb_rh_port_status_1 = *R_USB_RH_PORT_STATUS_1;
  8775. + reg.r_usb_rh_port_status_2 = *R_USB_RH_PORT_STATUS_2;
  8776. +
  8777. + /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
  8778. + /* Note: the lower 11 bits contain the actual frame number, sent with each
  8779. + sof. */
  8780. + reg.r_usb_fm_number = *R_USB_FM_NUMBER;
  8781. +
  8782. + /* Interrupts are handled in order of priority. */
  8783. + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
  8784. + crisv10_hcd_port_status_irq(&reg);
  8785. + }
  8786. + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
  8787. + crisv10_hcd_epid_attn_irq(&reg);
  8788. + }
  8789. + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
  8790. + crisv10_hcd_ctl_status_irq(&reg);
  8791. + }
  8792. + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
  8793. + crisv10_hcd_isoc_eof_irq(&reg);
  8794. + }
  8795. + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
  8796. + /* Update/restart the bulk start timer since obviously the channel is
  8797. + running. */
  8798. + mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
  8799. + /* Update/restart the bulk eot timer since we just received an bulk eot
  8800. + interrupt. */
  8801. + mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
  8802. +
  8803. + /* Check for finished bulk transfers on epids */
  8804. + check_finished_bulk_tx_epids(hcd, 0);
  8805. + }
  8806. + local_irq_restore(flags);
  8807. +
  8808. + DBFEXIT;
  8809. + return IRQ_HANDLED;
  8810. +}
  8811. +
  8812. +
  8813. +void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg) {
  8814. + struct usb_hcd *hcd = reg->hcd;
  8815. + struct crisv10_urb_priv *urb_priv;
  8816. + int epid;
  8817. + DBFENTER;
  8818. +
  8819. + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
  8820. + if (test_bit(epid, (void *)&reg->r_usb_epid_attn)) {
  8821. + struct urb *urb;
  8822. + __u32 ept_data;
  8823. + int error_code;
  8824. +
  8825. + if (epid == DUMMY_EPID || epid == INVALID_EPID) {
  8826. + /* We definitely don't care about these ones. Besides, they are
  8827. + always disabled, so any possible disabling caused by the
  8828. + epid attention interrupt is irrelevant. */
  8829. + warn("Got epid_attn for INVALID_EPID or DUMMY_EPID (%d).", epid);
  8830. + continue;
  8831. + }
  8832. +
  8833. + if(!epid_inuse(epid)) {
  8834. + irq_err("Epid attention on epid:%d that isn't in use\n", epid);
  8835. + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
  8836. + debug_epid(epid);
  8837. + continue;
  8838. + }
  8839. +
  8840. + /* Note that although there are separate R_USB_EPT_DATA and
  8841. + R_USB_EPT_DATA_ISO registers, they are located at the same address and
  8842. + are of the same size. In other words, this read should be ok for isoc
  8843. + also. */
  8844. + ept_data = etrax_epid_get(epid);
  8845. + error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, ept_data);
  8846. +
  8847. + /* Get the active URB for this epid. We blatantly assume
  8848. + that only this URB could have caused the epid attention. */
  8849. + urb = activeUrbList[epid];
  8850. + if (urb == NULL) {
  8851. + irq_err("Attention on epid:%d error:%d with no active URB.\n",
  8852. + epid, error_code);
  8853. + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
  8854. + debug_epid(epid);
  8855. + continue;
  8856. + }
  8857. +
  8858. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  8859. + ASSERT(urb_priv);
  8860. +
  8861. + /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
  8862. + if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
  8863. +
  8864. + /* Isoc traffic doesn't have error_count_in/error_count_out. */
  8865. + if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
  8866. + (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, ept_data) == 3 ||
  8867. + IO_EXTRACT(R_USB_EPT_DATA, error_count_out, ept_data) == 3)) {
  8868. + /* Check if URB allready is marked for late-finish, we can get
  8869. + several 3rd error for Intr traffic when a device is unplugged */
  8870. + if(urb_priv->later_data == NULL) {
  8871. + /* 3rd error. */
  8872. + irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid,
  8873. + str_dir(urb->pipe), str_type(urb->pipe),
  8874. + (unsigned int)urb, urb_priv->urb_num);
  8875. +
  8876. + tc_finish_urb_later(hcd, urb, -EPROTO);
  8877. + }
  8878. +
  8879. + } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
  8880. + irq_warn("Perror for epid:%d\n", epid);
  8881. + printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
  8882. + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
  8883. + __dump_urb(urb);
  8884. + debug_epid(epid);
  8885. +
  8886. + if (!(ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
  8887. + /* invalid ep_id */
  8888. + panic("Perror because of invalid epid."
  8889. + " Deconfigured too early?");
  8890. + } else {
  8891. + /* past eof1, near eof, zout transfer, setup transfer */
  8892. + /* Dump the urb and the relevant EP descriptor. */
  8893. + panic("Something wrong with DMA descriptor contents."
  8894. + " Too much traffic inserted?");
  8895. + }
  8896. + } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
  8897. + /* buffer ourun */
  8898. + printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
  8899. + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
  8900. + __dump_urb(urb);
  8901. + debug_epid(epid);
  8902. +
  8903. + panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid);
  8904. + } else {
  8905. + irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid,
  8906. + str_dir(urb->pipe), str_type(urb->pipe));
  8907. + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
  8908. + __dump_urb(urb);
  8909. + debug_epid(epid);
  8910. + }
  8911. +
  8912. + } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
  8913. + stall)) {
  8914. + /* Not really a protocol error, just says that the endpoint gave
  8915. + a stall response. Note that error_code cannot be stall for isoc. */
  8916. + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  8917. + panic("Isoc traffic cannot stall");
  8918. + }
  8919. +
  8920. + tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid,
  8921. + str_dir(urb->pipe), str_type(urb->pipe), (unsigned int)urb);
  8922. + tc_finish_urb(hcd, urb, -EPIPE);
  8923. +
  8924. + } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
  8925. + bus_error)) {
  8926. + /* Two devices responded to a transaction request. Must be resolved
  8927. + by software. FIXME: Reset ports? */
  8928. + panic("Bus error for epid %d."
  8929. + " Two devices responded to transaction request\n",
  8930. + epid);
  8931. +
  8932. + } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
  8933. + buffer_error)) {
  8934. + /* DMA overrun or underrun. */
  8935. + irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid,
  8936. + str_dir(urb->pipe), str_type(urb->pipe));
  8937. +
  8938. + /* It seems that error_code = buffer_error in
  8939. + R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
  8940. + are the same error. */
  8941. + tc_finish_urb(hcd, urb, -EPROTO);
  8942. + } else {
  8943. + irq_warn("Unknown attention on epid:%d (%s %s)\n", epid,
  8944. + str_dir(urb->pipe), str_type(urb->pipe));
  8945. + dump_ept_data(epid);
  8946. + }
  8947. + }
  8948. + }
  8949. + DBFEXIT;
  8950. +}
  8951. +
  8952. +void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg)
  8953. +{
  8954. + __u16 port_reg[USB_ROOT_HUB_PORTS];
  8955. + DBFENTER;
  8956. + port_reg[0] = reg->r_usb_rh_port_status_1;
  8957. + port_reg[1] = reg->r_usb_rh_port_status_2;
  8958. + rh_port_status_change(port_reg);
  8959. + DBFEXIT;
  8960. +}
  8961. +
  8962. +void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg)
  8963. +{
  8964. + int epid;
  8965. + struct urb *urb;
  8966. + struct crisv10_urb_priv *urb_priv;
  8967. +
  8968. + DBFENTER;
  8969. +
  8970. + for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
  8971. +
  8972. + /* Only check epids that are in use, is valid and has SB list */
  8973. + if (!epid_inuse(epid) || epid == INVALID_EPID ||
  8974. + TxIsocEPList[epid].sub == 0 || epid == DUMMY_EPID) {
  8975. + /* Nothing here to see. */
  8976. + continue;
  8977. + }
  8978. + ASSERT(epid_isoc(epid));
  8979. +
  8980. + /* Get the active URB for this epid (if any). */
  8981. + urb = activeUrbList[epid];
  8982. + if (urb == 0) {
  8983. + isoc_warn("Ignoring NULL urb for epid:%d\n", epid);
  8984. + continue;
  8985. + }
  8986. + if(!epid_out_traffic(epid)) {
  8987. + /* Sanity check. */
  8988. + ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
  8989. +
  8990. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  8991. + ASSERT(urb_priv);
  8992. +
  8993. + if (urb_priv->urb_state == NOT_STARTED) {
  8994. + /* If ASAP is not set and urb->start_frame is the current frame,
  8995. + start the transfer. */
  8996. + if (!(urb->transfer_flags & URB_ISO_ASAP) &&
  8997. + (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
  8998. + /* EP should not be enabled if we're waiting for start_frame */
  8999. + ASSERT((TxIsocEPList[epid].command &
  9000. + IO_STATE(USB_EP_command, enable, yes)) == 0);
  9001. +
  9002. + isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid);
  9003. + TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
  9004. +
  9005. + /* This urb is now active. */
  9006. + urb_priv->urb_state = STARTED;
  9007. + continue;
  9008. + }
  9009. + }
  9010. + }
  9011. + }
  9012. +
  9013. + DBFEXIT;
  9014. +}
  9015. +
  9016. +void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg)
  9017. +{
  9018. + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(reg->hcd);
  9019. +
  9020. + DBFENTER;
  9021. + ASSERT(crisv10_hcd);
  9022. +
  9023. + irq_dbg("ctr_status_irq, controller status: %s\n",
  9024. + hcd_status_to_str(reg->r_usb_status));
  9025. +
  9026. + /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
  9027. + list for the corresponding epid? */
  9028. + if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
  9029. + panic("USB controller got ourun.");
  9030. + }
  9031. + if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
  9032. +
  9033. + /* Before, etrax_usb_do_intr_recover was called on this epid if it was
  9034. + an interrupt pipe. I don't see how re-enabling all EP descriptors
  9035. + will help if there was a programming error. */
  9036. + panic("USB controller got perror.");
  9037. + }
  9038. +
  9039. + /* Keep track of USB Controller, if it's running or not */
  9040. + if(reg->r_usb_status & IO_STATE(R_USB_STATUS, running, yes)) {
  9041. + crisv10_hcd->running = 1;
  9042. + } else {
  9043. + crisv10_hcd->running = 0;
  9044. + }
  9045. +
  9046. + if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
  9047. + /* We should never operate in device mode. */
  9048. + panic("USB controller in device mode.");
  9049. + }
  9050. +
  9051. + /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
  9052. + using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
  9053. + set_bit(HCD_FLAG_SAW_IRQ, &reg->hcd->flags);
  9054. +
  9055. + DBFEXIT;
  9056. +}
  9057. +
  9058. +
  9059. +/******************************************************************/
  9060. +/* Host Controller interface functions */
  9061. +/******************************************************************/
  9062. +
  9063. +static inline void crisv10_ready_wait(void) {
  9064. + volatile int timeout = 10000;
  9065. + /* Check the busy bit of USB controller in Etrax */
  9066. + while((*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy)) &&
  9067. + (timeout-- > 0));
  9068. + if(timeout == 0) {
  9069. + warn("Timeout while waiting for USB controller to be idle\n");
  9070. + }
  9071. +}
  9072. +
  9073. +/* reset host controller */
  9074. +static int crisv10_hcd_reset(struct usb_hcd *hcd)
  9075. +{
  9076. + DBFENTER;
  9077. + hcd_dbg(hcd, "reset\n");
  9078. +
  9079. +
  9080. + /* Reset the USB interface. */
  9081. + /*
  9082. + *R_USB_COMMAND =
  9083. + IO_STATE(R_USB_COMMAND, port_sel, nop) |
  9084. + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
  9085. + IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
  9086. + nop();
  9087. + */
  9088. + DBFEXIT;
  9089. + return 0;
  9090. +}
  9091. +
  9092. +/* start host controller */
  9093. +static int crisv10_hcd_start(struct usb_hcd *hcd)
  9094. +{
  9095. + DBFENTER;
  9096. + hcd_dbg(hcd, "start\n");
  9097. +
  9098. + crisv10_ready_wait();
  9099. +
  9100. + /* Start processing of USB traffic. */
  9101. + *R_USB_COMMAND =
  9102. + IO_STATE(R_USB_COMMAND, port_sel, nop) |
  9103. + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
  9104. + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
  9105. +
  9106. + nop();
  9107. +
  9108. + hcd->state = HC_STATE_RUNNING;
  9109. +
  9110. + DBFEXIT;
  9111. + return 0;
  9112. +}
  9113. +
  9114. +/* stop host controller */
  9115. +static void crisv10_hcd_stop(struct usb_hcd *hcd)
  9116. +{
  9117. + DBFENTER;
  9118. + hcd_dbg(hcd, "stop\n");
  9119. + crisv10_hcd_reset(hcd);
  9120. + DBFEXIT;
  9121. +}
  9122. +
  9123. +/* return the current frame number */
  9124. +static int crisv10_hcd_get_frame(struct usb_hcd *hcd)
  9125. +{
  9126. + DBFENTER;
  9127. + DBFEXIT;
  9128. + return (*R_USB_FM_NUMBER & 0x7ff);
  9129. +}
  9130. +
  9131. +#ifdef CONFIG_USB_OTG
  9132. +
  9133. +static int crisv10_hcd_start_port_reset(struct usb_hcd *hcd, unsigned port)
  9134. +{
  9135. + return 0; /* no-op for now */
  9136. +}
  9137. +
  9138. +#endif /* CONFIG_USB_OTG */
  9139. +
  9140. +
  9141. +/******************************************************************/
  9142. +/* Root Hub functions */
  9143. +/******************************************************************/
  9144. +
  9145. +/* root hub status */
  9146. +static const struct usb_hub_status rh_hub_status =
  9147. + {
  9148. + .wHubStatus = 0,
  9149. + .wHubChange = 0,
  9150. + };
  9151. +
  9152. +/* root hub descriptor */
  9153. +static const u8 rh_hub_descr[] =
  9154. + {
  9155. + 0x09, /* bDescLength */
  9156. + 0x29, /* bDescriptorType */
  9157. + USB_ROOT_HUB_PORTS, /* bNbrPorts */
  9158. + 0x00, /* wHubCharacteristics */
  9159. + 0x00,
  9160. + 0x01, /* bPwrOn2pwrGood */
  9161. + 0x00, /* bHubContrCurrent */
  9162. + 0x00, /* DeviceRemovable */
  9163. + 0xff /* PortPwrCtrlMask */
  9164. + };
  9165. +
  9166. +/* Actual holder of root hub status*/
  9167. +struct crisv10_rh rh;
  9168. +
  9169. +/* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
  9170. +int rh_init(void) {
  9171. + int i;
  9172. + /* Reset port status flags */
  9173. + for (i = 0; i < USB_ROOT_HUB_PORTS; i++) {
  9174. + rh.wPortChange[i] = 0;
  9175. + rh.wPortStatusPrev[i] = 0;
  9176. + }
  9177. + return 0;
  9178. +}
  9179. +
  9180. +#define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
  9181. + (1<<USB_PORT_FEAT_ENABLE)|\
  9182. + (1<<USB_PORT_FEAT_SUSPEND)|\
  9183. + (1<<USB_PORT_FEAT_RESET))
  9184. +
  9185. +/* Handle port status change interrupt (called from bottom part interrupt) */
  9186. +void rh_port_status_change(__u16 port_reg[]) {
  9187. + int i;
  9188. + __u16 wChange;
  9189. +
  9190. + for(i = 0; i < USB_ROOT_HUB_PORTS; i++) {
  9191. + /* Xor out changes since last read, masked for important flags */
  9192. + wChange = (port_reg[i] & RH_FEAT_MASK) ^ rh.wPortStatusPrev[i];
  9193. + /* Or changes together with (if any) saved changes */
  9194. + rh.wPortChange[i] |= wChange;
  9195. + /* Save new status */
  9196. + rh.wPortStatusPrev[i] = port_reg[i];
  9197. +
  9198. + if(wChange) {
  9199. + rh_dbg("Interrupt port_status change port%d: %s Current-status:%s\n", i+1,
  9200. + port_status_to_str(wChange),
  9201. + port_status_to_str(port_reg[i]));
  9202. + }
  9203. + }
  9204. +}
  9205. +
  9206. +/* Construct port status change bitmap for the root hub */
  9207. +static int rh_status_data_request(struct usb_hcd *hcd, char *buf)
  9208. +{
  9209. + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
  9210. + unsigned int i;
  9211. +
  9212. +// DBFENTER;
  9213. +
  9214. + /*
  9215. + * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
  9216. + * return bitmap indicating ports with status change
  9217. + */
  9218. + *buf = 0;
  9219. + spin_lock(&crisv10_hcd->lock);
  9220. + for (i = 1; i <= crisv10_hcd->num_ports; i++) {
  9221. + if (rh.wPortChange[map_port(i)]) {
  9222. + *buf |= (1 << i);
  9223. + rh_dbg("rh_status_data_request, change on port %d: %s Current Status: %s\n", i,
  9224. + port_status_to_str(rh.wPortChange[map_port(i)]),
  9225. + port_status_to_str(rh.wPortStatusPrev[map_port(i)]));
  9226. + }
  9227. + }
  9228. + spin_unlock(&crisv10_hcd->lock);
  9229. +
  9230. +// DBFEXIT;
  9231. +
  9232. + return *buf == 0 ? 0 : 1;
  9233. +}
  9234. +
  9235. +/* Handle a control request for the root hub (called from hcd_driver) */
  9236. +static int rh_control_request(struct usb_hcd *hcd,
  9237. + u16 typeReq,
  9238. + u16 wValue,
  9239. + u16 wIndex,
  9240. + char *buf,
  9241. + u16 wLength) {
  9242. +
  9243. + struct crisv10_hcd *crisv10_hcd = hcd_to_crisv10_hcd(hcd);
  9244. + int retval = 0;
  9245. + int len;
  9246. + DBFENTER;
  9247. +
  9248. + switch (typeReq) {
  9249. + case GetHubDescriptor:
  9250. + rh_dbg("GetHubDescriptor\n");
  9251. + len = min_t(unsigned int, sizeof rh_hub_descr, wLength);
  9252. + memcpy(buf, rh_hub_descr, len);
  9253. + buf[2] = crisv10_hcd->num_ports;
  9254. + break;
  9255. + case GetHubStatus:
  9256. + rh_dbg("GetHubStatus\n");
  9257. + len = min_t(unsigned int, sizeof rh_hub_status, wLength);
  9258. + memcpy(buf, &rh_hub_status, len);
  9259. + break;
  9260. + case GetPortStatus:
  9261. + if (!wIndex || wIndex > crisv10_hcd->num_ports)
  9262. + goto error;
  9263. + rh_dbg("GetportStatus, port:%d change:%s status:%s\n", wIndex,
  9264. + port_status_to_str(rh.wPortChange[map_port(wIndex)]),
  9265. + port_status_to_str(rh.wPortStatusPrev[map_port(wIndex)]));
  9266. + *(u16 *) buf = cpu_to_le16(rh.wPortStatusPrev[map_port(wIndex)]);
  9267. + *(u16 *) (buf + 2) = cpu_to_le16(rh.wPortChange[map_port(wIndex)]);
  9268. + break;
  9269. + case SetHubFeature:
  9270. + rh_dbg("SetHubFeature\n");
  9271. + case ClearHubFeature:
  9272. + rh_dbg("ClearHubFeature\n");
  9273. + switch (wValue) {
  9274. + case C_HUB_OVER_CURRENT:
  9275. + case C_HUB_LOCAL_POWER:
  9276. + rh_warn("Not implemented hub request:%d \n", typeReq);
  9277. + /* not implemented */
  9278. + break;
  9279. + default:
  9280. + goto error;
  9281. + }
  9282. + break;
  9283. + case SetPortFeature:
  9284. + if (!wIndex || wIndex > crisv10_hcd->num_ports)
  9285. + goto error;
  9286. + if(rh_set_port_feature(map_port(wIndex), wValue))
  9287. + goto error;
  9288. + break;
  9289. + case ClearPortFeature:
  9290. + if (!wIndex || wIndex > crisv10_hcd->num_ports)
  9291. + goto error;
  9292. + if(rh_clear_port_feature(map_port(wIndex), wValue))
  9293. + goto error;
  9294. + break;
  9295. + default:
  9296. + rh_warn("Unknown hub request: %d\n", typeReq);
  9297. + error:
  9298. + retval = -EPIPE;
  9299. + }
  9300. + DBFEXIT;
  9301. + return retval;
  9302. +}
  9303. +
  9304. +int rh_set_port_feature(__u8 bPort, __u16 wFeature) {
  9305. + __u8 bUsbCommand = 0;
  9306. + switch(wFeature) {
  9307. + case USB_PORT_FEAT_RESET:
  9308. + rh_dbg("SetPortFeature: reset\n");
  9309. + bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, reset);
  9310. + goto set;
  9311. + break;
  9312. + case USB_PORT_FEAT_SUSPEND:
  9313. + rh_dbg("SetPortFeature: suspend\n");
  9314. + bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, suspend);
  9315. + goto set;
  9316. + break;
  9317. + case USB_PORT_FEAT_POWER:
  9318. + rh_dbg("SetPortFeature: power\n");
  9319. + break;
  9320. + case USB_PORT_FEAT_C_CONNECTION:
  9321. + rh_dbg("SetPortFeature: c_connection\n");
  9322. + break;
  9323. + case USB_PORT_FEAT_C_RESET:
  9324. + rh_dbg("SetPortFeature: c_reset\n");
  9325. + break;
  9326. + case USB_PORT_FEAT_C_OVER_CURRENT:
  9327. + rh_dbg("SetPortFeature: c_over_current\n");
  9328. + break;
  9329. +
  9330. + set:
  9331. + /* Select which port via the port_sel field */
  9332. + bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
  9333. +
  9334. + /* Make sure the controller isn't busy. */
  9335. + crisv10_ready_wait();
  9336. + /* Send out the actual command to the USB controller */
  9337. + *R_USB_COMMAND = bUsbCommand;
  9338. +
  9339. + /* If port reset then also bring USB controller into running state */
  9340. + if(wFeature == USB_PORT_FEAT_RESET) {
  9341. + /* Wait a while for controller to first become started after port reset */
  9342. + udelay(12000); /* 12ms blocking wait */
  9343. +
  9344. + /* Make sure the controller isn't busy. */
  9345. + crisv10_ready_wait();
  9346. +
  9347. + /* If all enabled ports were disabled the host controller goes down into
  9348. + started mode, so we need to bring it back into the running state.
  9349. + (This is safe even if it's already in the running state.) */
  9350. + *R_USB_COMMAND =
  9351. + IO_STATE(R_USB_COMMAND, port_sel, nop) |
  9352. + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
  9353. + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
  9354. + }
  9355. +
  9356. + break;
  9357. + default:
  9358. + rh_dbg("SetPortFeature: unknown feature\n");
  9359. + return -1;
  9360. + }
  9361. + return 0;
  9362. +}
  9363. +
  9364. +int rh_clear_port_feature(__u8 bPort, __u16 wFeature) {
  9365. + switch(wFeature) {
  9366. + case USB_PORT_FEAT_ENABLE:
  9367. + rh_dbg("ClearPortFeature: enable\n");
  9368. + rh_disable_port(bPort);
  9369. + break;
  9370. + case USB_PORT_FEAT_SUSPEND:
  9371. + rh_dbg("ClearPortFeature: suspend\n");
  9372. + break;
  9373. + case USB_PORT_FEAT_POWER:
  9374. + rh_dbg("ClearPortFeature: power\n");
  9375. + break;
  9376. +
  9377. + case USB_PORT_FEAT_C_ENABLE:
  9378. + rh_dbg("ClearPortFeature: c_enable\n");
  9379. + goto clear;
  9380. + case USB_PORT_FEAT_C_SUSPEND:
  9381. + rh_dbg("ClearPortFeature: c_suspend\n");
  9382. + goto clear;
  9383. + case USB_PORT_FEAT_C_CONNECTION:
  9384. + rh_dbg("ClearPortFeature: c_connection\n");
  9385. + goto clear;
  9386. + case USB_PORT_FEAT_C_OVER_CURRENT:
  9387. + rh_dbg("ClearPortFeature: c_over_current\n");
  9388. + goto clear;
  9389. + case USB_PORT_FEAT_C_RESET:
  9390. + rh_dbg("ClearPortFeature: c_reset\n");
  9391. + goto clear;
  9392. + clear:
  9393. + rh.wPortChange[bPort] &= ~(1 << (wFeature - 16));
  9394. + break;
  9395. + default:
  9396. + rh_dbg("ClearPortFeature: unknown feature\n");
  9397. + return -1;
  9398. + }
  9399. + return 0;
  9400. +}
  9401. +
  9402. +
  9403. +#ifdef CONFIG_PM
  9404. +/* Handle a suspend request for the root hub (called from hcd_driver) */
  9405. +static int rh_suspend_request(struct usb_hcd *hcd)
  9406. +{
  9407. + return 0; /* no-op for now */
  9408. +}
  9409. +
  9410. +/* Handle a resume request for the root hub (called from hcd_driver) */
  9411. +static int rh_resume_request(struct usb_hcd *hcd)
  9412. +{
  9413. + return 0; /* no-op for now */
  9414. +}
  9415. +#endif /* CONFIG_PM */
  9416. +
  9417. +
  9418. +
  9419. +/* Wrapper function for workaround port disable registers in USB controller */
  9420. +static void rh_disable_port(unsigned int port) {
  9421. + volatile int timeout = 10000;
  9422. + volatile char* usb_portx_disable;
  9423. + switch(port) {
  9424. + case 0:
  9425. + usb_portx_disable = R_USB_PORT1_DISABLE;
  9426. + break;
  9427. + case 1:
  9428. + usb_portx_disable = R_USB_PORT2_DISABLE;
  9429. + break;
  9430. + default:
  9431. + /* Invalid port index */
  9432. + return;
  9433. + }
  9434. + /* Set disable flag in special register */
  9435. + *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
  9436. + /* Wait until not enabled anymore */
  9437. + while((rh.wPortStatusPrev[port] &
  9438. + IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
  9439. + (timeout-- > 0));
  9440. + if(timeout == 0) {
  9441. + warn("Timeout while waiting for port %d to become disabled\n", port);
  9442. + }
  9443. + /* clear disable flag in special register */
  9444. + *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
  9445. + rh_info("Physical port %d disabled\n", port+1);
  9446. +}
  9447. +
  9448. +
  9449. +/******************************************************************/
  9450. +/* Transfer Controller (TC) functions */
  9451. +/******************************************************************/
  9452. +
  9453. +/* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
  9454. + dynamically?
  9455. + To adjust it dynamically we would have to get an interrupt when we reach
  9456. + the end of the rx descriptor list, or when we get close to the end, and
  9457. + then allocate more descriptors. */
  9458. +#define NBR_OF_RX_DESC 512
  9459. +#define RX_DESC_BUF_SIZE 1024
  9460. +#define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
  9461. +
  9462. +
  9463. +/* Local variables for Transfer Controller */
  9464. +/* --------------------------------------- */
  9465. +
  9466. +/* This is a circular (double-linked) list of the active urbs for each epid.
  9467. + The head is never removed, and new urbs are linked onto the list as
  9468. + urb_entry_t elements. Don't reference urb_list directly; use the wrapper
  9469. + functions instead (which includes spin_locks) */
  9470. +static struct list_head urb_list[NBR_OF_EPIDS];
  9471. +
  9472. +/* Read about the need and usage of this lock in submit_ctrl_urb. */
  9473. +/* Lock for URB lists for each EPID */
  9474. +static spinlock_t urb_list_lock;
  9475. +
  9476. +/* Lock for EPID array register (R_USB_EPT_x) in Etrax */
  9477. +static spinlock_t etrax_epid_lock;
  9478. +
  9479. +/* Lock for dma8 sub0 handling */
  9480. +static spinlock_t etrax_dma8_sub0_lock;
  9481. +
  9482. +/* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
  9483. + Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
  9484. + cache aligned. */
  9485. +static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
  9486. +static volatile struct USB_IN_Desc RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
  9487. +
  9488. +/* Pointers into RxDescList. */
  9489. +static volatile struct USB_IN_Desc *myNextRxDesc;
  9490. +static volatile struct USB_IN_Desc *myLastRxDesc;
  9491. +
  9492. +/* A zout transfer makes a memory access at the address of its buf pointer,
  9493. + which means that setting this buf pointer to 0 will cause an access to the
  9494. + flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
  9495. + (depending on DMA burst size) transfer.
  9496. + Instead, we set it to 1, and point it to this buffer. */
  9497. +static int zout_buffer[4] __attribute__ ((aligned (4)));
  9498. +
  9499. +/* Cache for allocating new EP and SB descriptors. */
  9500. +//static kmem_cache_t *usb_desc_cache;
  9501. +static struct kmem_cache *usb_desc_cache;
  9502. +
  9503. +/* Cache for the data allocated in the isoc descr top half. */
  9504. +//static kmem_cache_t *isoc_compl_cache;
  9505. +static struct kmem_cache *isoc_compl_cache;
  9506. +
  9507. +/* Cache for the data allocated when delayed finishing of URBs */
  9508. +//static kmem_cache_t *later_data_cache;
  9509. +static struct kmem_cache *later_data_cache;
  9510. +
  9511. +/* Counter to keep track of how many Isoc EP we have sat up. Used to enable
  9512. + and disable iso_eof interrupt. We only need these interrupts when we have
  9513. + Isoc data endpoints (consumes CPU cycles).
  9514. + FIXME: This could be more fine granular, so this interrupt is only enabled
  9515. + when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
  9516. +static int isoc_epid_counter;
  9517. +
  9518. +/* Protecting wrapper functions for R_USB_EPT_x */
  9519. +/* -------------------------------------------- */
  9520. +static inline void etrax_epid_set(__u8 index, __u32 data) {
  9521. + unsigned long flags;
  9522. + spin_lock_irqsave(&etrax_epid_lock, flags);
  9523. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
  9524. + nop();
  9525. + *R_USB_EPT_DATA = data;
  9526. + spin_unlock_irqrestore(&etrax_epid_lock, flags);
  9527. +}
  9528. +
  9529. +static inline void etrax_epid_clear_error(__u8 index) {
  9530. + unsigned long flags;
  9531. + spin_lock_irqsave(&etrax_epid_lock, flags);
  9532. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
  9533. + nop();
  9534. + *R_USB_EPT_DATA &=
  9535. + ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
  9536. + IO_MASK(R_USB_EPT_DATA, error_count_out) |
  9537. + IO_MASK(R_USB_EPT_DATA, error_code));
  9538. + spin_unlock_irqrestore(&etrax_epid_lock, flags);
  9539. +}
  9540. +
  9541. +static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
  9542. + __u8 toggle) {
  9543. + unsigned long flags;
  9544. + spin_lock_irqsave(&etrax_epid_lock, flags);
  9545. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
  9546. + nop();
  9547. + if(dirout) {
  9548. + *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
  9549. + *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
  9550. + } else {
  9551. + *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
  9552. + *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
  9553. + }
  9554. + spin_unlock_irqrestore(&etrax_epid_lock, flags);
  9555. +}
  9556. +
  9557. +static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout) {
  9558. + unsigned long flags;
  9559. + __u8 toggle;
  9560. + spin_lock_irqsave(&etrax_epid_lock, flags);
  9561. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
  9562. + nop();
  9563. + if (dirout) {
  9564. + toggle = IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
  9565. + } else {
  9566. + toggle = IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
  9567. + }
  9568. + spin_unlock_irqrestore(&etrax_epid_lock, flags);
  9569. + return toggle;
  9570. +}
  9571. +
  9572. +
  9573. +static inline __u32 etrax_epid_get(__u8 index) {
  9574. + unsigned long flags;
  9575. + __u32 data;
  9576. + spin_lock_irqsave(&etrax_epid_lock, flags);
  9577. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
  9578. + nop();
  9579. + data = *R_USB_EPT_DATA;
  9580. + spin_unlock_irqrestore(&etrax_epid_lock, flags);
  9581. + return data;
  9582. +}
  9583. +
  9584. +
  9585. +
  9586. +
  9587. +/* Main functions for Transfer Controller */
  9588. +/* -------------------------------------- */
  9589. +
  9590. +/* Init structs, memories and lists used by Transfer Controller */
  9591. +int tc_init(struct usb_hcd *hcd) {
  9592. + int i;
  9593. + /* Clear software state info for all epids */
  9594. + memset(epid_state, 0, sizeof(struct etrax_epid) * NBR_OF_EPIDS);
  9595. +
  9596. + /* Set Invalid and Dummy as being in use and disabled */
  9597. + epid_state[INVALID_EPID].inuse = 1;
  9598. + epid_state[DUMMY_EPID].inuse = 1;
  9599. + epid_state[INVALID_EPID].disabled = 1;
  9600. + epid_state[DUMMY_EPID].disabled = 1;
  9601. +
  9602. + /* Clear counter for how many Isoc epids we have sat up */
  9603. + isoc_epid_counter = 0;
  9604. +
  9605. + /* Initialize the urb list by initiating a head for each list.
  9606. + Also reset list hodling active URB for each epid */
  9607. + for (i = 0; i < NBR_OF_EPIDS; i++) {
  9608. + INIT_LIST_HEAD(&urb_list[i]);
  9609. + activeUrbList[i] = NULL;
  9610. + }
  9611. +
  9612. + /* Init lock for URB lists */
  9613. + spin_lock_init(&urb_list_lock);
  9614. + /* Init lock for Etrax R_USB_EPT register */
  9615. + spin_lock_init(&etrax_epid_lock);
  9616. + /* Init lock for Etrax dma8 sub0 handling */
  9617. + spin_lock_init(&etrax_dma8_sub0_lock);
  9618. +
  9619. + /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
  9620. +
  9621. + /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
  9622. + allocate SB descriptors from this cache. This is ok since
  9623. + sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
  9624. +// usb_desc_cache = kmem_cache_create("usb_desc_cache",
  9625. +// sizeof(struct USB_EP_Desc), 0,
  9626. +// SLAB_HWCACHE_ALIGN, 0, 0);
  9627. + usb_desc_cache = kmem_cache_create(
  9628. + "usb_desc_cache",
  9629. + sizeof(struct USB_EP_Desc),
  9630. + 0,
  9631. + SLAB_HWCACHE_ALIGN,
  9632. + NULL);
  9633. + if(usb_desc_cache == NULL) {
  9634. + return -ENOMEM;
  9635. + }
  9636. +
  9637. + /* Create slab cache for speedy allocation of memory for isoc bottom-half
  9638. + interrupt handling */
  9639. +// isoc_compl_cache =
  9640. +// kmem_cache_create("isoc_compl_cache",
  9641. +// sizeof(struct crisv10_isoc_complete_data),
  9642. +// 0, SLAB_HWCACHE_ALIGN, 0, 0);
  9643. + isoc_compl_cache = kmem_cache_create(
  9644. + "isoc_compl_cache",
  9645. + sizeof(struct crisv10_isoc_complete_data),
  9646. + 0,
  9647. + SLAB_HWCACHE_ALIGN,
  9648. + NULL
  9649. + );
  9650. +
  9651. + if(isoc_compl_cache == NULL) {
  9652. + return -ENOMEM;
  9653. + }
  9654. +
  9655. + /* Create slab cache for speedy allocation of memory for later URB finish
  9656. + struct */
  9657. +// later_data_cache =
  9658. +// kmem_cache_create("later_data_cache",
  9659. +// sizeof(struct urb_later_data),
  9660. +// 0, SLAB_HWCACHE_ALIGN, 0, 0);
  9661. +
  9662. + later_data_cache = kmem_cache_create(
  9663. + "later_data_cache",
  9664. + sizeof(struct urb_later_data),
  9665. + 0,
  9666. + SLAB_HWCACHE_ALIGN,
  9667. + NULL
  9668. + );
  9669. +
  9670. + if(later_data_cache == NULL) {
  9671. + return -ENOMEM;
  9672. + }
  9673. +
  9674. +
  9675. + /* Initiate the bulk start timer. */
  9676. + init_timer(&bulk_start_timer);
  9677. + bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
  9678. + bulk_start_timer.function = tc_bulk_start_timer_func;
  9679. + add_timer(&bulk_start_timer);
  9680. +
  9681. +
  9682. + /* Initiate the bulk eot timer. */
  9683. + init_timer(&bulk_eot_timer);
  9684. + bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
  9685. + bulk_eot_timer.function = tc_bulk_eot_timer_func;
  9686. + bulk_eot_timer.data = (unsigned long)hcd;
  9687. + add_timer(&bulk_eot_timer);
  9688. +
  9689. + return 0;
  9690. +}
  9691. +
  9692. +/* Uninitialize all resources used by Transfer Controller */
  9693. +void tc_destroy(void) {
  9694. +
  9695. + /* Destroy all slab cache */
  9696. + kmem_cache_destroy(usb_desc_cache);
  9697. + kmem_cache_destroy(isoc_compl_cache);
  9698. + kmem_cache_destroy(later_data_cache);
  9699. +
  9700. + /* Remove timers */
  9701. + del_timer(&bulk_start_timer);
  9702. + del_timer(&bulk_eot_timer);
  9703. +}
  9704. +
  9705. +static void restart_dma8_sub0(void) {
  9706. + unsigned long flags;
  9707. + spin_lock_irqsave(&etrax_dma8_sub0_lock, flags);
  9708. + /* Verify that the dma is not running */
  9709. + if ((*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd)) == 0) {
  9710. + struct USB_EP_Desc *ep = (struct USB_EP_Desc *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
  9711. + while (DUMMY_EPID == IO_EXTRACT(USB_EP_command, epid, ep->command)) {
  9712. + ep = (struct USB_EP_Desc *)phys_to_virt(ep->next);
  9713. + }
  9714. + /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID.
  9715. + * ep->next is already a physical address. virt_to_phys is needed, see
  9716. + * http://mhonarc.axis.se/dev-etrax/msg08630.html
  9717. + */
  9718. + //*R_DMA_CH8_SUB0_EP = ep->next;
  9719. + *R_DMA_CH8_SUB0_EP = virt_to_phys(ep);
  9720. + /* Restart the DMA */
  9721. + *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
  9722. + }
  9723. + spin_unlock_irqrestore(&etrax_dma8_sub0_lock, flags);
  9724. +}
  9725. +
  9726. +/* queue an URB with the transfer controller (called from hcd_driver) */
  9727. +//static int tc_urb_enqueue(struct usb_hcd *hcd,
  9728. +// struct usb_host_endpoint *ep,
  9729. +// struct urb *urb,
  9730. +// gfp_t mem_flags) {
  9731. +static int tc_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
  9732. +{
  9733. + int epid;
  9734. + int retval;
  9735. +// int bustime = 0;
  9736. + int maxpacket;
  9737. + unsigned long flags;
  9738. + struct crisv10_urb_priv *urb_priv;
  9739. + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
  9740. + DBFENTER;
  9741. +
  9742. + if(!(crisv10_hcd->running)) {
  9743. + /* The USB Controller is not running, probably because no device is
  9744. + attached. No idea to enqueue URBs then */
  9745. + tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
  9746. + (unsigned int)urb);
  9747. + return -ENOENT;
  9748. + }
  9749. +
  9750. + maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
  9751. +
  9752. + /* hinko ignore usb_pipeisoc */
  9753. +#if 0
  9754. + /* Special case check for In Isoc transfers. Specification states that each
  9755. + In Isoc transfer consists of one packet and therefore it should fit into
  9756. + the transfer-buffer of an URB.
  9757. + We do the check here to be sure (an invalid scenario can be produced with
  9758. + parameters to the usbtest suite) */
  9759. + if(usb_pipeisoc(urb->pipe) && usb_pipein(urb->pipe) &&
  9760. + (urb->transfer_buffer_length < maxpacket)) {
  9761. + tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb->transfer_buffer_length, maxpacket);
  9762. + return -EMSGSIZE;
  9763. + }
  9764. +
  9765. + /* Check if there is enough bandwidth for periodic transfer */
  9766. + if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe)) {
  9767. + /* only check (and later claim) if not already claimed */
  9768. + if (urb->bandwidth == 0) {
  9769. + bustime = usb_check_bandwidth(urb->dev, urb);
  9770. + if (bustime < 0) {
  9771. + tc_err("Not enough periodic bandwidth\n");
  9772. + return -ENOSPC;
  9773. + }
  9774. + }
  9775. + }
  9776. +#endif
  9777. +
  9778. + /* Check if there is a epid for URBs destination, if not this function
  9779. + set up one. */
  9780. + //epid = tc_setup_epid(ep, urb, mem_flags);
  9781. + epid = tc_setup_epid(urb, mem_flags);
  9782. + if (epid < 0) {
  9783. + tc_err("Failed setup epid:%d for URB:0x%x\n", epid, (unsigned int)urb);
  9784. + DBFEXIT;
  9785. + return -ENOMEM;
  9786. + }
  9787. +
  9788. + if(urb == activeUrbList[epid]) {
  9789. + tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb);
  9790. + return -ENXIO;
  9791. + }
  9792. +
  9793. + if(urb_list_entry(urb, epid)) {
  9794. + tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb);
  9795. + return -ENXIO;
  9796. + }
  9797. +
  9798. + /* If we actively have flaged endpoint as disabled then refuse submition */
  9799. + if(epid_state[epid].disabled) {
  9800. + return -ENOENT;
  9801. + }
  9802. +
  9803. + /* Allocate and init HC-private data for URB */
  9804. + if(urb_priv_create(hcd, urb, epid, mem_flags) != 0) {
  9805. + DBFEXIT;
  9806. + return -ENOMEM;
  9807. + }
  9808. + urb_priv = urb->hcpriv;
  9809. +
  9810. + tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
  9811. + (unsigned int)urb, urb_priv->urb_num, epid,
  9812. + pipe_to_str(urb->pipe), urb->transfer_buffer_length);
  9813. +
  9814. + /* Create and link SBs required for this URB */
  9815. + retval = create_sb_for_urb(urb, mem_flags);
  9816. + if(retval != 0) {
  9817. + tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb,
  9818. + urb_priv->urb_num);
  9819. + urb_priv_free(hcd, urb);
  9820. + DBFEXIT;
  9821. + return retval;
  9822. + }
  9823. +
  9824. + /* Init intr EP pool if this URB is a INTR transfer. This pool is later
  9825. + used when inserting EPs in the TxIntrEPList. We do the alloc here
  9826. + so we can't run out of memory later */
  9827. + if(usb_pipeint(urb->pipe)) {
  9828. + retval = init_intr_urb(urb, mem_flags);
  9829. + if(retval != 0) {
  9830. + tc_warn("Failed to init Intr URB\n");
  9831. + urb_priv_free(hcd, urb);
  9832. + DBFEXIT;
  9833. + return retval;
  9834. + }
  9835. + }
  9836. +
  9837. + /* Disable other access when inserting USB */
  9838. +
  9839. + /* BUG on sleeping inside int disabled if using local_irq_save/local_irq_restore
  9840. + * her - because urb_list_add() and tc_dma_process_queue() save irqs again !??!
  9841. + */
  9842. +// local_irq_save(flags);
  9843. +
  9844. + /* hinko ignore usb_pipeisoc */
  9845. +#if 0
  9846. + /* Claim bandwidth, if needed */
  9847. + if(bustime) {
  9848. + usb_claim_bandwidth(urb->dev, urb, bustime, 0);
  9849. + }
  9850. +
  9851. + /* Add URB to EP queue */
  9852. + urb_list_add(urb, epid, mem_flags);
  9853. +
  9854. + if(usb_pipeisoc(urb->pipe)) {
  9855. + /* Special processing of Isoc URBs. */
  9856. + tc_dma_process_isoc_urb(urb);
  9857. + } else {
  9858. + /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
  9859. + tc_dma_process_queue(epid);
  9860. + }
  9861. +#endif
  9862. + /* Add URB to EP queue */
  9863. + urb_list_add(urb, epid, mem_flags);
  9864. +
  9865. + /*hinko link/unlink urb -> ep */
  9866. + spin_lock_irqsave(&crisv10_hcd->lock, flags);
  9867. + //spin_lock(&crisv10_hcd->lock);
  9868. + retval = usb_hcd_link_urb_to_ep(hcd, urb);
  9869. + if (retval) {
  9870. + spin_unlock_irqrestore(&crisv10_hcd->lock, flags);
  9871. + tc_warn("Failed to link urb to ep\n");
  9872. + urb_priv_free(hcd, urb);
  9873. + DBFEXIT;
  9874. + return retval;
  9875. + }
  9876. + spin_unlock_irqrestore(&crisv10_hcd->lock, flags);
  9877. + //spin_unlock(&crisv10_hcd->lock);
  9878. +
  9879. + /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
  9880. + tc_dma_process_queue(epid);
  9881. +
  9882. +// local_irq_restore(flags);
  9883. +
  9884. + DBFEXIT;
  9885. + return 0;
  9886. +}
  9887. +
  9888. +/* remove an URB from the transfer controller queues (called from hcd_driver)*/
  9889. +//static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
  9890. +static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  9891. +{
  9892. + struct crisv10_urb_priv *urb_priv;
  9893. + unsigned long flags;
  9894. + int epid;
  9895. +
  9896. + DBFENTER;
  9897. + /* Disable interrupts here since a descriptor interrupt for the isoc epid
  9898. + will modify the sb list. This could possibly be done more granular, but
  9899. + urb_dequeue should not be used frequently anyway.
  9900. + */
  9901. + local_irq_save(flags);
  9902. +
  9903. + urb_priv = urb->hcpriv;
  9904. +
  9905. + if (!urb_priv) {
  9906. + /* This happens if a device driver calls unlink on an urb that
  9907. + was never submitted (lazy driver) or if the urb was completed
  9908. + while dequeue was being called. */
  9909. + tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb);
  9910. + local_irq_restore(flags);
  9911. + return 0;
  9912. + }
  9913. + epid = urb_priv->epid;
  9914. +
  9915. + tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
  9916. + (urb == activeUrbList[epid]) ? "active" : "queued",
  9917. + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
  9918. + str_type(urb->pipe), epid, urb->status,
  9919. + (urb_priv->later_data) ? "later-sched" : "");
  9920. +
  9921. + /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
  9922. + that isn't active can be dequeued by just removing it from the queue */
  9923. + if(usb_pipebulk(urb->pipe) || usb_pipecontrol(urb->pipe) ||
  9924. + usb_pipeint(urb->pipe)) {
  9925. +
  9926. + /* Check if URB haven't gone further than the queue */
  9927. + if(urb != activeUrbList[epid]) {
  9928. + ASSERT(urb_priv->later_data == NULL);
  9929. + tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
  9930. + " (not active)\n", (unsigned int)urb, urb_priv->urb_num,
  9931. + str_dir(urb->pipe), str_type(urb->pipe), epid);
  9932. +
  9933. + /* Finish the URB with error status from USB core */
  9934. + tc_finish_urb(hcd, urb, urb->status);
  9935. + local_irq_restore(flags);
  9936. + return 0;
  9937. + }
  9938. + }
  9939. +
  9940. + /* Set URB status to Unlink for handling when interrupt comes. */
  9941. + urb_priv->urb_state = UNLINK;
  9942. +
  9943. + /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
  9944. + switch(usb_pipetype(urb->pipe)) {
  9945. + case PIPE_BULK:
  9946. + /* Check if EP still is enabled */
  9947. + if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  9948. + /* The EP was enabled, disable it. */
  9949. + TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  9950. + }
  9951. + /* Kicking dummy list out of the party. */
  9952. + TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
  9953. + break;
  9954. + case PIPE_CONTROL:
  9955. + /* Check if EP still is enabled */
  9956. + if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  9957. + /* The EP was enabled, disable it. */
  9958. + TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  9959. + }
  9960. + break;
  9961. + case PIPE_ISOCHRONOUS:
  9962. + /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
  9963. + finish_isoc_urb(). Because there might the case when URB is dequeued
  9964. + but there are other valid URBs waiting */
  9965. +
  9966. + /* Check if In Isoc EP still is enabled */
  9967. + if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  9968. + /* The EP was enabled, disable it. */
  9969. + TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  9970. + }
  9971. + break;
  9972. + case PIPE_INTERRUPT:
  9973. + /* Special care is taken for interrupt URBs. EPs are unlinked in
  9974. + tc_finish_urb */
  9975. + break;
  9976. + default:
  9977. + break;
  9978. + }
  9979. +
  9980. + /* Asynchronous unlink, finish the URB later from scheduled or other
  9981. + event (data finished, error) */
  9982. + tc_finish_urb_later(hcd, urb, urb->status);
  9983. +
  9984. + local_irq_restore(flags);
  9985. + DBFEXIT;
  9986. + return 0;
  9987. +}
  9988. +
  9989. +
  9990. +static void tc_sync_finish_epid(struct usb_hcd *hcd, int epid) {
  9991. + volatile int timeout = 10000;
  9992. + struct urb* urb;
  9993. + struct crisv10_urb_priv* urb_priv;
  9994. + unsigned long flags;
  9995. +
  9996. + volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
  9997. + volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
  9998. + volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
  9999. +
  10000. + int type = epid_state[epid].type;
  10001. +
  10002. + /* Setting this flag will cause enqueue() to return -ENOENT for new
  10003. + submitions on this endpoint and finish_urb() wont process queue further */
  10004. + epid_state[epid].disabled = 1;
  10005. +
  10006. + switch(type) {
  10007. + case PIPE_BULK:
  10008. + /* Check if EP still is enabled */
  10009. + if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  10010. + /* The EP was enabled, disable it. */
  10011. + TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  10012. + tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
  10013. +
  10014. + /* Do busy-wait until DMA not using this EP descriptor anymore */
  10015. + while((*R_DMA_CH8_SUB0_EP ==
  10016. + virt_to_phys(&TxBulkEPList[epid])) &&
  10017. + (timeout-- > 0));
  10018. + if(timeout == 0) {
  10019. + warn("Timeout while waiting for DMA-TX-Bulk to leave EP for"
  10020. + " epid:%d\n", epid);
  10021. + }
  10022. + }
  10023. + break;
  10024. +
  10025. + case PIPE_CONTROL:
  10026. + /* Check if EP still is enabled */
  10027. + if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  10028. + /* The EP was enabled, disable it. */
  10029. + TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  10030. + tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
  10031. +
  10032. + /* Do busy-wait until DMA not using this EP descriptor anymore */
  10033. + while((*R_DMA_CH8_SUB1_EP ==
  10034. + virt_to_phys(&TxCtrlEPList[epid])) &&
  10035. + (timeout-- > 0));
  10036. + if(timeout == 0) {
  10037. + warn("Timeout while waiting for DMA-TX-Ctrl to leave EP for"
  10038. + " epid:%d\n", epid);
  10039. + }
  10040. + }
  10041. + break;
  10042. +
  10043. + case PIPE_INTERRUPT:
  10044. + local_irq_save(flags);
  10045. + /* Disable all Intr EPs belonging to epid */
  10046. + first_ep = &TxIntrEPList[0];
  10047. + curr_ep = first_ep;
  10048. + do {
  10049. + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
  10050. + if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
  10051. + /* Disable EP */
  10052. + next_ep->command &= ~IO_MASK(USB_EP_command, enable);
  10053. + }
  10054. + curr_ep = phys_to_virt(curr_ep->next);
  10055. + } while (curr_ep != first_ep);
  10056. +
  10057. + local_irq_restore(flags);
  10058. + break;
  10059. +
  10060. + case PIPE_ISOCHRONOUS:
  10061. + /* Check if EP still is enabled */
  10062. + if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  10063. + tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid);
  10064. + /* The EP was enabled, disable it. */
  10065. + TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  10066. +
  10067. + while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
  10068. + (timeout-- > 0));
  10069. + if(timeout == 0) {
  10070. + warn("Timeout while waiting for DMA-TX-Isoc to leave EP for"
  10071. + " epid:%d\n", epid);
  10072. + }
  10073. + }
  10074. + break;
  10075. + }
  10076. +
  10077. + local_irq_save(flags);
  10078. +
  10079. + /* Finish if there is active URB for this endpoint */
  10080. + if(activeUrbList[epid] != NULL) {
  10081. + urb = activeUrbList[epid];
  10082. + urb_priv = urb->hcpriv;
  10083. + ASSERT(urb_priv);
  10084. + tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
  10085. + (urb == activeUrbList[epid]) ? "active" : "queued",
  10086. + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
  10087. + str_type(urb->pipe), epid, urb->status,
  10088. + (urb_priv->later_data) ? "later-sched" : "");
  10089. +
  10090. + tc_finish_urb(hcd, activeUrbList[epid], -ENOENT);
  10091. + ASSERT(activeUrbList[epid] == NULL);
  10092. + }
  10093. +
  10094. + /* Finish any queued URBs for this endpoint. There won't be any resubmitions
  10095. + because epid_disabled causes enqueue() to fail for this endpoint */
  10096. + while((urb = urb_list_first(epid)) != NULL) {
  10097. + urb_priv = urb->hcpriv;
  10098. + ASSERT(urb_priv);
  10099. +
  10100. + tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
  10101. + (urb == activeUrbList[epid]) ? "active" : "queued",
  10102. + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
  10103. + str_type(urb->pipe), epid, urb->status,
  10104. + (urb_priv->later_data) ? "later-sched" : "");
  10105. +
  10106. + tc_finish_urb(hcd, urb, -ENOENT);
  10107. + }
  10108. + epid_state[epid].disabled = 0;
  10109. + local_irq_restore(flags);
  10110. +}
  10111. +
  10112. +/* free resources associated with an endpoint (called from hcd_driver) */
  10113. +static void tc_endpoint_disable(struct usb_hcd *hcd,
  10114. + struct usb_host_endpoint *ep) {
  10115. + DBFENTER;
  10116. + /* Only free epid if it has been allocated. We get two endpoint_disable
  10117. + requests for ctrl endpoints so ignore the second one */
  10118. + if(ep->hcpriv != NULL) {
  10119. + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
  10120. + int epid = ep_priv->epid;
  10121. + tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
  10122. + (unsigned int)ep, (unsigned int)ep->hcpriv,
  10123. + endpoint_to_str(&(ep->desc)), epid);
  10124. +
  10125. + tc_sync_finish_epid(hcd, epid);
  10126. +
  10127. + ASSERT(activeUrbList[epid] == NULL);
  10128. + ASSERT(list_empty(&urb_list[epid]));
  10129. +
  10130. + tc_free_epid(ep);
  10131. + } else {
  10132. + tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep,
  10133. + (unsigned int)ep->hcpriv, endpoint_to_str(&(ep->desc)));
  10134. + }
  10135. + DBFEXIT;
  10136. +}
  10137. +
  10138. +//static void tc_finish_urb_later_proc(void *data) {
  10139. +static void tc_finish_urb_later_proc(struct work_struct *work) {
  10140. + unsigned long flags;
  10141. + //struct urb_later_data* uld = (struct urb_later_data*)data;
  10142. + struct urb_later_data* uld = container_of(work, struct urb_later_data, ws.work);
  10143. + local_irq_save(flags);
  10144. + if(uld->urb == NULL) {
  10145. + late_dbg("Later finish of URB = NULL (allready finished)\n");
  10146. + } else {
  10147. + struct crisv10_urb_priv* urb_priv = uld->urb->hcpriv;
  10148. + ASSERT(urb_priv);
  10149. + if(urb_priv->urb_num == uld->urb_num) {
  10150. + late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld->urb),
  10151. + urb_priv->urb_num);
  10152. + if(uld->status != uld->urb->status) {
  10153. + errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
  10154. + uld->urb->status, uld->status);
  10155. + }
  10156. + if(uld != urb_priv->later_data) {
  10157. + panic("Scheduled uld not same as URBs uld\n");
  10158. + }
  10159. + tc_finish_urb(uld->hcd, uld->urb, uld->status);
  10160. + } else {
  10161. + late_warn("Ignoring later finish of URB:0x%x[%d]"
  10162. + ", urb_num doesn't match current URB:0x%x[%d]",
  10163. + (unsigned int)(uld->urb), uld->urb_num,
  10164. + (unsigned int)(uld->urb), urb_priv->urb_num);
  10165. + }
  10166. + }
  10167. + local_irq_restore(flags);
  10168. + kmem_cache_free(later_data_cache, uld);
  10169. +}
  10170. +
  10171. +static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
  10172. + int status) {
  10173. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  10174. + struct urb_later_data* uld;
  10175. +
  10176. + ASSERT(urb_priv);
  10177. +
  10178. + if(urb_priv->later_data != NULL) {
  10179. + /* Later-finish allready scheduled for this URB, just update status to
  10180. + return when finishing later */
  10181. + errno_dbg("Later-finish schedule change URB status:%d with new"
  10182. + " status:%d\n", urb_priv->later_data->status, status);
  10183. +
  10184. + urb_priv->later_data->status = status;
  10185. + return;
  10186. + }
  10187. +
  10188. + uld = kmem_cache_alloc(later_data_cache, GFP_ATOMIC);
  10189. + ASSERT(uld);
  10190. +
  10191. + uld->hcd = hcd;
  10192. + uld->urb = urb;
  10193. + uld->urb_num = urb_priv->urb_num;
  10194. + uld->status = status;
  10195. +
  10196. + //INIT_WORK(&uld->ws, tc_finish_urb_later_proc, uld);
  10197. + INIT_DELAYED_WORK(&uld->ws, tc_finish_urb_later_proc);
  10198. + urb_priv->later_data = uld;
  10199. +
  10200. + /* Schedule the finishing of the URB to happen later */
  10201. + schedule_delayed_work(&uld->ws, LATER_TIMER_DELAY);
  10202. +}
  10203. +
  10204. + /* hinko ignore usb_pipeisoc */
  10205. +#if 0
  10206. +static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
  10207. + int status);
  10208. +#endif
  10209. +
  10210. +static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status) {
  10211. + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
  10212. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  10213. + int epid;
  10214. + char toggle;
  10215. + int urb_num;
  10216. + unsigned long flags;
  10217. +
  10218. + DBFENTER;
  10219. + ASSERT(urb_priv != NULL);
  10220. + epid = urb_priv->epid;
  10221. + urb_num = urb_priv->urb_num;
  10222. +
  10223. + if(urb != activeUrbList[epid]) {
  10224. + if(urb_list_entry(urb, epid)) {
  10225. + /* Remove this URB from the list. Only happens when URB are finished
  10226. + before having been processed (dequeing) */
  10227. + urb_list_del(urb, epid);
  10228. + } else {
  10229. + tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
  10230. + " epid:%d\n", (unsigned int)urb, urb_num, epid);
  10231. + }
  10232. + }
  10233. +
  10234. + /* Cancel any pending later-finish of this URB */
  10235. + if(urb_priv->later_data) {
  10236. + urb_priv->later_data->urb = NULL;
  10237. + }
  10238. +
  10239. + /* For an IN pipe, we always set the actual length, regardless of whether
  10240. + there was an error or not (which means the device driver can use the data
  10241. + if it wants to). */
  10242. + if(usb_pipein(urb->pipe)) {
  10243. + urb->actual_length = urb_priv->rx_offset;
  10244. + } else {
  10245. + /* Set actual_length for OUT urbs also; the USB mass storage driver seems
  10246. + to want that. */
  10247. + if (status == 0 && urb->status == -EINPROGRESS) {
  10248. + urb->actual_length = urb->transfer_buffer_length;
  10249. + } else {
  10250. + /* We wouldn't know of any partial writes if there was an error. */
  10251. + urb->actual_length = 0;
  10252. + }
  10253. + }
  10254. +
  10255. +
  10256. + /* URB status mangling */
  10257. + if(urb->status == -EINPROGRESS) {
  10258. + /* The USB core hasn't changed the status, let's set our finish status */
  10259. + urb->status = status;
  10260. +
  10261. + if ((status == 0) && (urb->transfer_flags & URB_SHORT_NOT_OK) &&
  10262. + usb_pipein(urb->pipe) &&
  10263. + (urb->actual_length != urb->transfer_buffer_length)) {
  10264. + /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
  10265. + max length) is to be treated as an error. */
  10266. + errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
  10267. + " data:%d\n", (unsigned int)urb, urb_num,
  10268. + urb->actual_length);
  10269. + urb->status = -EREMOTEIO;
  10270. + }
  10271. +
  10272. + if(urb_priv->urb_state == UNLINK) {
  10273. + /* URB has been requested to be unlinked asynchronously */
  10274. + urb->status = -ECONNRESET;
  10275. + errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
  10276. + (unsigned int)urb, urb_num, urb->status);
  10277. + }
  10278. + } else {
  10279. + /* The USB Core wants to signal some error via the URB, pass it through */
  10280. + }
  10281. +
  10282. + /* hinko ignore usb_pipeisoc */
  10283. +#if 0
  10284. + /* use completely different finish function for Isoc URBs */
  10285. + if(usb_pipeisoc(urb->pipe)) {
  10286. + tc_finish_isoc_urb(hcd, urb, status);
  10287. + return;
  10288. + }
  10289. +#endif
  10290. +
  10291. + /* Do special unlinking of EPs for Intr traffic */
  10292. + if(usb_pipeint(urb->pipe)) {
  10293. + tc_dma_unlink_intr_urb(urb);
  10294. + }
  10295. +
  10296. + /* hinko ignore usb_pipeisoc */
  10297. +#if 0
  10298. + /* Release allocated bandwidth for periodic transfers */
  10299. + if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe))
  10300. + usb_release_bandwidth(urb->dev, urb, 0);
  10301. +#endif
  10302. +
  10303. + /* This URB is active on EP */
  10304. + if(urb == activeUrbList[epid]) {
  10305. + /* We need to fiddle with the toggle bits because the hardware doesn't do
  10306. + it for us. */
  10307. + toggle = etrax_epid_get_toggle(epid, usb_pipeout(urb->pipe));
  10308. + usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  10309. + usb_pipeout(urb->pipe), toggle);
  10310. +
  10311. + /* Checks for Ctrl and Bulk EPs */
  10312. + switch(usb_pipetype(urb->pipe)) {
  10313. + case PIPE_BULK:
  10314. + /* Check so Bulk EP realy is disabled before finishing active URB */
  10315. + ASSERT((TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
  10316. + IO_STATE(USB_EP_command, enable, no));
  10317. + /* Disable sub-pointer for EP to avoid next tx_interrupt() to
  10318. + process Bulk EP. */
  10319. + TxBulkEPList[epid].sub = 0;
  10320. + /* No need to wait for the DMA before changing the next pointer.
  10321. + The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
  10322. + the last one (INVALID_EPID) for actual traffic. */
  10323. + TxBulkEPList[epid].next =
  10324. + virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
  10325. + break;
  10326. + case PIPE_CONTROL:
  10327. + /* Check so Ctrl EP realy is disabled before finishing active URB */
  10328. + ASSERT((TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
  10329. + IO_STATE(USB_EP_command, enable, no));
  10330. + /* Disable sub-pointer for EP to avoid next tx_interrupt() to
  10331. + process Ctrl EP. */
  10332. + TxCtrlEPList[epid].sub = 0;
  10333. + break;
  10334. + }
  10335. + }
  10336. +
  10337. + /* Free HC-private URB data*/
  10338. + urb_priv_free(hcd, urb);
  10339. +
  10340. + if(urb->status) {
  10341. + errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
  10342. + (unsigned int)urb, urb_num, str_dir(urb->pipe),
  10343. + str_type(urb->pipe), urb->actual_length, urb->status);
  10344. + } else {
  10345. + tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
  10346. + (unsigned int)urb, urb_num, str_dir(urb->pipe),
  10347. + str_type(urb->pipe), urb->actual_length, urb->status);
  10348. + }
  10349. +
  10350. + /* If we just finished an active URB, clear active pointer. */
  10351. + if (urb == activeUrbList[epid]) {
  10352. + /* Make URB not active on EP anymore */
  10353. + activeUrbList[epid] = NULL;
  10354. +
  10355. + if(urb->status == 0) {
  10356. + /* URB finished sucessfully, process queue to see if there are any more
  10357. + URBs waiting before we call completion function.*/
  10358. + if(crisv10_hcd->running) {
  10359. + /* Only process queue if USB controller is running */
  10360. + tc_dma_process_queue(epid);
  10361. + } else {
  10362. + tc_warn("No processing of queue for epid:%d, USB Controller not"
  10363. + " running\n", epid);
  10364. + }
  10365. + }
  10366. + }
  10367. +
  10368. + /* Hand the URB from HCD to its USB device driver, using its completion
  10369. + functions */
  10370. +// usb_hcd_giveback_urb (hcd, urb);
  10371. + /**
  10372. + * usb_hcd_unlink_urb_from_ep - remove an URB from its endpoint queue
  10373. + * @hcd: host controller to which @urb was submitted
  10374. + * @urb: URB being unlinked
  10375. + *
  10376. + * Host controller drivers should call this routine before calling
  10377. + * usb_hcd_giveback_urb(). The HCD's private spinlock must be held and
  10378. + * interrupts must be disabled. The actions carried out here are required
  10379. + * for URB completion.
  10380. + */
  10381. +
  10382. + /*hinko link/unlink urb -> ep */
  10383. + //spin_lock(&crisv10_hcd->lock);
  10384. + spin_lock_irqsave(&crisv10_hcd->lock, flags);
  10385. + usb_hcd_unlink_urb_from_ep(hcd, urb);
  10386. + usb_hcd_giveback_urb(hcd, urb, status);
  10387. + //spin_unlock(&crisv10_hcd->lock);
  10388. + spin_unlock_irqrestore(&crisv10_hcd->lock, flags);
  10389. +
  10390. + /* Check the queue once more if the URB returned with error, because we
  10391. + didn't do it before the completion function because the specification
  10392. + states that the queue should not restart until all it's unlinked
  10393. + URBs have been fully retired, with the completion functions run */
  10394. + if(crisv10_hcd->running) {
  10395. + /* Only process queue if USB controller is running */
  10396. + tc_dma_process_queue(epid);
  10397. + } else {
  10398. + tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
  10399. + epid);
  10400. + }
  10401. +
  10402. + DBFEXIT;
  10403. +}
  10404. +
  10405. + /* hinko ignore usb_pipeisoc */
  10406. +#if 0
  10407. +static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
  10408. + int status) {
  10409. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  10410. + int epid, i;
  10411. + volatile int timeout = 10000;
  10412. +
  10413. + ASSERT(urb_priv);
  10414. + epid = urb_priv->epid;
  10415. +
  10416. + ASSERT(usb_pipeisoc(urb->pipe));
  10417. +
  10418. + /* Set that all isoc packets have status and length set before
  10419. + completing the urb. */
  10420. + for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++){
  10421. + urb->iso_frame_desc[i].actual_length = 0;
  10422. + urb->iso_frame_desc[i].status = -EPROTO;
  10423. + }
  10424. +
  10425. + /* Check if the URB is currently active (done or error) */
  10426. + if(urb == activeUrbList[epid]) {
  10427. + /* Check if there are another In Isoc URB queued for this epid */
  10428. + if (!list_empty(&urb_list[epid])&& !epid_state[epid].disabled) {
  10429. + /* Move it from queue to active and mark it started so Isoc transfers
  10430. + won't be interrupted.
  10431. + All Isoc URBs data transfers are already added to DMA lists so we
  10432. + don't have to insert anything in DMA lists here. */
  10433. + activeUrbList[epid] = urb_list_first(epid);
  10434. + ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_state =
  10435. + STARTED;
  10436. + urb_list_del(activeUrbList[epid], epid);
  10437. +
  10438. + if(urb->status) {
  10439. + errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
  10440. + " status:%d, new waiting URB:0x%x[%d]\n",
  10441. + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
  10442. + str_type(urb->pipe), urb_priv->isoc_packet_counter,
  10443. + urb->number_of_packets, urb->status,
  10444. + (unsigned int)activeUrbList[epid],
  10445. + ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_num);
  10446. + }
  10447. +
  10448. + } else { /* No other URB queued for this epid */
  10449. + if(urb->status) {
  10450. + errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
  10451. + " status:%d, no new URB waiting\n",
  10452. + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
  10453. + str_type(urb->pipe), urb_priv->isoc_packet_counter,
  10454. + urb->number_of_packets, urb->status);
  10455. + }
  10456. +
  10457. + /* Check if EP is still enabled, then shut it down. */
  10458. + if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  10459. + isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid);
  10460. +
  10461. + /* Should only occur for In Isoc EPs where SB isn't consumed. */
  10462. + ASSERT(usb_pipein(urb->pipe));
  10463. +
  10464. + /* Disable it and wait for it to stop */
  10465. + TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  10466. +
  10467. + /* Ah, the luxury of busy-wait. */
  10468. + while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
  10469. + (timeout-- > 0));
  10470. + if(timeout == 0) {
  10471. + warn("Timeout while waiting for DMA-TX-Isoc to leave EP for epid:%d\n", epid);
  10472. + }
  10473. + }
  10474. +
  10475. + /* Unlink SB to say that epid is finished. */
  10476. + TxIsocEPList[epid].sub = 0;
  10477. + TxIsocEPList[epid].hw_len = 0;
  10478. +
  10479. + /* No URB active for EP anymore */
  10480. + activeUrbList[epid] = NULL;
  10481. + }
  10482. + } else { /* Finishing of not active URB (queued up with SBs thought) */
  10483. + isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
  10484. + " SB queued but not active\n",
  10485. + (unsigned int)urb, str_dir(urb->pipe),
  10486. + urb_priv->isoc_packet_counter, urb->number_of_packets,
  10487. + urb->status);
  10488. + if(usb_pipeout(urb->pipe)) {
  10489. + /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
  10490. + struct USB_SB_Desc *iter_sb, *prev_sb, *next_sb;
  10491. +
  10492. + iter_sb = TxIsocEPList[epid].sub ?
  10493. + phys_to_virt(TxIsocEPList[epid].sub) : 0;
  10494. + prev_sb = 0;
  10495. +
  10496. + /* SB that is linked before this URBs first SB */
  10497. + while (iter_sb && (iter_sb != urb_priv->first_sb)) {
  10498. + prev_sb = iter_sb;
  10499. + iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
  10500. + }
  10501. +
  10502. + if (iter_sb == 0) {
  10503. + /* Unlink of the URB currently being transmitted. */
  10504. + prev_sb = 0;
  10505. + iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
  10506. + }
  10507. +
  10508. + while (iter_sb && (iter_sb != urb_priv->last_sb)) {
  10509. + iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
  10510. + }
  10511. +
  10512. + if (iter_sb) {
  10513. + next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
  10514. + } else {
  10515. + /* This should only happen if the DMA has completed
  10516. + processing the SB list for this EP while interrupts
  10517. + are disabled. */
  10518. + isoc_dbg("Isoc urb not found, already sent?\n");
  10519. + next_sb = 0;
  10520. + }
  10521. + if (prev_sb) {
  10522. + prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
  10523. + } else {
  10524. + TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
  10525. + }
  10526. + }
  10527. + }
  10528. +
  10529. + /* Free HC-private URB data*/
  10530. + urb_priv_free(hcd, urb);
  10531. +
  10532. + usb_release_bandwidth(urb->dev, urb, 0);
  10533. +
  10534. + /* Hand the URB from HCD to its USB device driver, using its completion
  10535. + functions */
  10536. + usb_hcd_giveback_urb (hcd, urb);
  10537. +}
  10538. +#endif
  10539. +
  10540. +static __u32 urb_num = 0;
  10541. +
  10542. +/* allocate and initialize URB private data */
  10543. +static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
  10544. + int mem_flags) {
  10545. + struct crisv10_urb_priv *urb_priv;
  10546. +
  10547. + urb_priv = kmalloc(sizeof *urb_priv, mem_flags);
  10548. + if (!urb_priv)
  10549. + return -ENOMEM;
  10550. + memset(urb_priv, 0, sizeof *urb_priv);
  10551. +
  10552. + urb_priv->epid = epid;
  10553. + urb_priv->urb_state = NOT_STARTED;
  10554. +
  10555. + urb->hcpriv = urb_priv;
  10556. + /* Assign URB a sequence number, and increment counter */
  10557. + urb_priv->urb_num = urb_num;
  10558. + urb_num++;
  10559. + return 0;
  10560. +}
  10561. +
  10562. +/* free URB private data */
  10563. +static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb) {
  10564. + int i;
  10565. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  10566. + ASSERT(urb_priv != 0);
  10567. +
  10568. + /* Check it has any SBs linked that needs to be freed*/
  10569. + if(urb_priv->first_sb != NULL) {
  10570. + struct USB_SB_Desc *next_sb, *first_sb, *last_sb;
  10571. + int i = 0;
  10572. + first_sb = urb_priv->first_sb;
  10573. + last_sb = urb_priv->last_sb;
  10574. + ASSERT(last_sb);
  10575. + while(first_sb != last_sb) {
  10576. + next_sb = (struct USB_SB_Desc *)phys_to_virt(first_sb->next);
  10577. + kmem_cache_free(usb_desc_cache, first_sb);
  10578. + first_sb = next_sb;
  10579. + i++;
  10580. + }
  10581. + kmem_cache_free(usb_desc_cache, last_sb);
  10582. + i++;
  10583. + }
  10584. +
  10585. + /* Check if it has any EPs in its Intr pool that also needs to be freed */
  10586. + if(urb_priv->intr_ep_pool_length > 0) {
  10587. + for(i = 0; i < urb_priv->intr_ep_pool_length; i++) {
  10588. + kfree(urb_priv->intr_ep_pool[i]);
  10589. + }
  10590. + /*
  10591. + tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
  10592. + urb_priv->intr_ep_pool_length, (unsigned int)urb);
  10593. + */
  10594. + }
  10595. +
  10596. + kfree(urb_priv);
  10597. + urb->hcpriv = NULL;
  10598. +}
  10599. +
  10600. +static int ep_priv_create(struct usb_host_endpoint *ep, int mem_flags) {
  10601. + struct crisv10_ep_priv *ep_priv;
  10602. +
  10603. + ep_priv = kmalloc(sizeof *ep_priv, mem_flags);
  10604. + if (!ep_priv)
  10605. + return -ENOMEM;
  10606. + memset(ep_priv, 0, sizeof *ep_priv);
  10607. +
  10608. + ep->hcpriv = ep_priv;
  10609. + return 0;
  10610. +}
  10611. +
  10612. +static void ep_priv_free(struct usb_host_endpoint *ep) {
  10613. + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
  10614. + ASSERT(ep_priv);
  10615. + kfree(ep_priv);
  10616. + ep->hcpriv = NULL;
  10617. +}
  10618. +
  10619. +/* EPID handling functions, managing EP-list in Etrax through wrappers */
  10620. +/* ------------------------------------------------------------------- */
  10621. +
  10622. +/* Sets up a new EPID for an endpoint or returns existing if found */
  10623. +//static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
  10624. +// int mem_flags) {
  10625. +static int tc_setup_epid(struct urb *urb, int mem_flags)
  10626. +{
  10627. + int epid;
  10628. + char devnum, endpoint, out_traffic, slow;
  10629. + int maxlen;
  10630. + __u32 epid_data;
  10631. + struct usb_host_endpoint *ep = urb->ep;
  10632. + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
  10633. +
  10634. + DBFENTER;
  10635. +
  10636. + /* Check if a valid epid already is setup for this endpoint */
  10637. + if(ep_priv != NULL) {
  10638. + return ep_priv->epid;
  10639. + }
  10640. +
  10641. + /* We must find and initiate a new epid for this urb. */
  10642. + epid = tc_allocate_epid();
  10643. +
  10644. + if (epid == -1) {
  10645. + /* Failed to allocate a new epid. */
  10646. + DBFEXIT;
  10647. + return epid;
  10648. + }
  10649. +
  10650. + /* We now have a new epid to use. Claim it. */
  10651. + epid_state[epid].inuse = 1;
  10652. +
  10653. + /* Init private data for new endpoint */
  10654. + if(ep_priv_create(ep, mem_flags) != 0) {
  10655. + return -ENOMEM;
  10656. + }
  10657. + ep_priv = ep->hcpriv;
  10658. + ep_priv->epid = epid;
  10659. +
  10660. + devnum = usb_pipedevice(urb->pipe);
  10661. + endpoint = usb_pipeendpoint(urb->pipe);
  10662. + slow = (urb->dev->speed == USB_SPEED_LOW);
  10663. + maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
  10664. +
  10665. + if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
  10666. + /* We want both IN and OUT control traffic to be put on the same
  10667. + EP/SB list. */
  10668. + out_traffic = 1;
  10669. + } else {
  10670. + out_traffic = usb_pipeout(urb->pipe);
  10671. + }
  10672. +
  10673. + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  10674. + epid_data = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
  10675. + /* FIXME: Change any to the actual port? */
  10676. + IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
  10677. + IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
  10678. + IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
  10679. + IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
  10680. + etrax_epid_iso_set(epid, epid_data);
  10681. + } else {
  10682. + epid_data = IO_STATE(R_USB_EPT_DATA, valid, yes) |
  10683. + IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
  10684. + /* FIXME: Change any to the actual port? */
  10685. + IO_STATE(R_USB_EPT_DATA, port, any) |
  10686. + IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
  10687. + IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
  10688. + IO_FIELD(R_USB_EPT_DATA, dev, devnum);
  10689. + etrax_epid_set(epid, epid_data);
  10690. + }
  10691. +
  10692. + epid_state[epid].out_traffic = out_traffic;
  10693. + epid_state[epid].type = usb_pipetype(urb->pipe);
  10694. +
  10695. + tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n",
  10696. + (unsigned int)ep, epid, devnum, endpoint, maxlen,
  10697. + str_type(urb->pipe), out_traffic ? "out" : "in",
  10698. + slow ? "low" : "full");
  10699. +
  10700. + /* Enable Isoc eof interrupt if we set up the first Isoc epid */
  10701. + if(usb_pipeisoc(urb->pipe)) {
  10702. + isoc_epid_counter++;
  10703. + if(isoc_epid_counter == 1) {
  10704. + isoc_warn("Enabled Isoc eof interrupt\n");
  10705. + *R_USB_IRQ_MASK_SET |= IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
  10706. + }
  10707. + }
  10708. +
  10709. + DBFEXIT;
  10710. + return epid;
  10711. +}
  10712. +
  10713. +static void tc_free_epid(struct usb_host_endpoint *ep) {
  10714. + unsigned long flags;
  10715. + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
  10716. + int epid;
  10717. + volatile int timeout = 10000;
  10718. +
  10719. + DBFENTER;
  10720. +
  10721. + if (ep_priv == NULL) {
  10722. + tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep);
  10723. + DBFEXIT;
  10724. + return;
  10725. + }
  10726. +
  10727. + epid = ep_priv->epid;
  10728. +
  10729. + /* Disable Isoc eof interrupt if we free the last Isoc epid */
  10730. + if(epid_isoc(epid)) {
  10731. + ASSERT(isoc_epid_counter > 0);
  10732. + isoc_epid_counter--;
  10733. + if(isoc_epid_counter == 0) {
  10734. + *R_USB_IRQ_MASK_SET &= ~IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
  10735. + isoc_warn("Disabled Isoc eof interrupt\n");
  10736. + }
  10737. + }
  10738. +
  10739. + /* Take lock manualy instead of in epid_x_x wrappers,
  10740. + because we need to be polling here */
  10741. + spin_lock_irqsave(&etrax_epid_lock, flags);
  10742. +
  10743. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
  10744. + nop();
  10745. + while((*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) &&
  10746. + (timeout-- > 0));
  10747. + if(timeout == 0) {
  10748. + warn("Timeout while waiting for epid:%d to drop hold\n", epid);
  10749. + }
  10750. + /* This will, among other things, set the valid field to 0. */
  10751. + *R_USB_EPT_DATA = 0;
  10752. + spin_unlock_irqrestore(&etrax_epid_lock, flags);
  10753. +
  10754. + /* Free resource in software state info list */
  10755. + epid_state[epid].inuse = 0;
  10756. +
  10757. + /* Free private endpoint data */
  10758. + ep_priv_free(ep);
  10759. +
  10760. + DBFEXIT;
  10761. +}
  10762. +
  10763. +static int tc_allocate_epid(void) {
  10764. + int i;
  10765. + DBFENTER;
  10766. + for (i = 0; i < NBR_OF_EPIDS; i++) {
  10767. + if (!epid_inuse(i)) {
  10768. + DBFEXIT;
  10769. + return i;
  10770. + }
  10771. + }
  10772. +
  10773. + tc_warn("Found no free epids\n");
  10774. + DBFEXIT;
  10775. + return -1;
  10776. +}
  10777. +
  10778. +
  10779. +/* Wrappers around the list functions (include/linux/list.h). */
  10780. +/* ---------------------------------------------------------- */
  10781. +static inline int __urb_list_empty(int epid) {
  10782. + int retval;
  10783. + retval = list_empty(&urb_list[epid]);
  10784. + return retval;
  10785. +}
  10786. +
  10787. +/* Returns first urb for this epid, or NULL if list is empty. */
  10788. +static inline struct urb *urb_list_first(int epid) {
  10789. + unsigned long flags;
  10790. + struct urb *first_urb = 0;
  10791. + spin_lock_irqsave(&urb_list_lock, flags);
  10792. + if (!__urb_list_empty(epid)) {
  10793. + /* Get the first urb (i.e. head->next). */
  10794. + urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
  10795. + first_urb = urb_entry->urb;
  10796. + }
  10797. + spin_unlock_irqrestore(&urb_list_lock, flags);
  10798. + return first_urb;
  10799. +}
  10800. +
  10801. +/* Adds an urb_entry last in the list for this epid. */
  10802. +static inline void urb_list_add(struct urb *urb, int epid, int mem_flags) {
  10803. + unsigned long flags;
  10804. + urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), mem_flags);
  10805. + ASSERT(urb_entry);
  10806. +
  10807. + urb_entry->urb = urb;
  10808. + spin_lock_irqsave(&urb_list_lock, flags);
  10809. + list_add_tail(&urb_entry->list, &urb_list[epid]);
  10810. + spin_unlock_irqrestore(&urb_list_lock, flags);
  10811. +}
  10812. +
  10813. +/* Search through the list for an element that contains this urb. (The list
  10814. + is expected to be short and the one we are about to delete will often be
  10815. + the first in the list.)
  10816. + Should be protected by spin_locks in calling function */
  10817. +static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid) {
  10818. + struct list_head *entry;
  10819. + struct list_head *tmp;
  10820. + urb_entry_t *urb_entry;
  10821. +
  10822. + list_for_each_safe(entry, tmp, &urb_list[epid]) {
  10823. + urb_entry = list_entry(entry, urb_entry_t, list);
  10824. + ASSERT(urb_entry);
  10825. + ASSERT(urb_entry->urb);
  10826. +
  10827. + if (urb_entry->urb == urb) {
  10828. + return urb_entry;
  10829. + }
  10830. + }
  10831. + return 0;
  10832. +}
  10833. +
  10834. +/* Same function as above but for global use. Protects list by spinlock */
  10835. +static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid) {
  10836. + unsigned long flags;
  10837. + urb_entry_t *urb_entry;
  10838. + spin_lock_irqsave(&urb_list_lock, flags);
  10839. + urb_entry = __urb_list_entry(urb, epid);
  10840. + spin_unlock_irqrestore(&urb_list_lock, flags);
  10841. + return (urb_entry);
  10842. +}
  10843. +
  10844. +/* Delete an urb from the list. */
  10845. +static inline void urb_list_del(struct urb *urb, int epid) {
  10846. + unsigned long flags;
  10847. + urb_entry_t *urb_entry;
  10848. +
  10849. + /* Delete entry and free. */
  10850. + spin_lock_irqsave(&urb_list_lock, flags);
  10851. + urb_entry = __urb_list_entry(urb, epid);
  10852. + ASSERT(urb_entry);
  10853. +
  10854. + list_del(&urb_entry->list);
  10855. + spin_unlock_irqrestore(&urb_list_lock, flags);
  10856. + kfree(urb_entry);
  10857. +}
  10858. +
  10859. +/* Move an urb to the end of the list. */
  10860. +static inline void urb_list_move_last(struct urb *urb, int epid) {
  10861. + unsigned long flags;
  10862. + urb_entry_t *urb_entry;
  10863. +
  10864. + spin_lock_irqsave(&urb_list_lock, flags);
  10865. + urb_entry = __urb_list_entry(urb, epid);
  10866. + ASSERT(urb_entry);
  10867. +
  10868. + list_del(&urb_entry->list);
  10869. + list_add_tail(&urb_entry->list, &urb_list[epid]);
  10870. + spin_unlock_irqrestore(&urb_list_lock, flags);
  10871. +}
  10872. +
  10873. +/* Get the next urb in the list. */
  10874. +static inline struct urb *urb_list_next(struct urb *urb, int epid) {
  10875. + unsigned long flags;
  10876. + urb_entry_t *urb_entry;
  10877. +
  10878. + spin_lock_irqsave(&urb_list_lock, flags);
  10879. + urb_entry = __urb_list_entry(urb, epid);
  10880. + ASSERT(urb_entry);
  10881. +
  10882. + if (urb_entry->list.next != &urb_list[epid]) {
  10883. + struct list_head *elem = urb_entry->list.next;
  10884. + urb_entry = list_entry(elem, urb_entry_t, list);
  10885. + spin_unlock_irqrestore(&urb_list_lock, flags);
  10886. + return urb_entry->urb;
  10887. + } else {
  10888. + spin_unlock_irqrestore(&urb_list_lock, flags);
  10889. + return NULL;
  10890. + }
  10891. +}
  10892. +
  10893. +struct USB_EP_Desc* create_ep(int epid, struct USB_SB_Desc* sb_desc,
  10894. + int mem_flags) {
  10895. + struct USB_EP_Desc *ep_desc;
  10896. + ep_desc = (struct USB_EP_Desc *) kmem_cache_alloc(usb_desc_cache, mem_flags);
  10897. + if(ep_desc == NULL)
  10898. + return NULL;
  10899. + memset(ep_desc, 0, sizeof(struct USB_EP_Desc));
  10900. +
  10901. + ep_desc->hw_len = 0;
  10902. + ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
  10903. + IO_STATE(USB_EP_command, enable, yes));
  10904. + if(sb_desc == NULL) {
  10905. + ep_desc->sub = 0;
  10906. + } else {
  10907. + ep_desc->sub = virt_to_phys(sb_desc);
  10908. + }
  10909. + return ep_desc;
  10910. +}
  10911. +
  10912. +#define TT_ZOUT 0
  10913. +#define TT_IN 1
  10914. +#define TT_OUT 2
  10915. +#define TT_SETUP 3
  10916. +
  10917. +#define CMD_EOL IO_STATE(USB_SB_command, eol, yes)
  10918. +#define CMD_INTR IO_STATE(USB_SB_command, intr, yes)
  10919. +#define CMD_FULL IO_STATE(USB_SB_command, full, yes)
  10920. +
  10921. +/* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT
  10922. + SBs. Also used by create_sb_in() to avoid same allocation procedure at two
  10923. + places */
  10924. +struct USB_SB_Desc* create_sb(struct USB_SB_Desc* sb_prev, int tt, void* data,
  10925. + int datalen, int mem_flags) {
  10926. + struct USB_SB_Desc *sb_desc;
  10927. + sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
  10928. + if(sb_desc == NULL)
  10929. + return NULL;
  10930. + memset(sb_desc, 0, sizeof(struct USB_SB_Desc));
  10931. +
  10932. + sb_desc->command = IO_FIELD(USB_SB_command, tt, tt) |
  10933. + IO_STATE(USB_SB_command, eot, yes);
  10934. +
  10935. + sb_desc->sw_len = datalen;
  10936. + if(data != NULL) {
  10937. + sb_desc->buf = virt_to_phys(data);
  10938. + } else {
  10939. + sb_desc->buf = 0;
  10940. + }
  10941. + if(sb_prev != NULL) {
  10942. + sb_prev->next = virt_to_phys(sb_desc);
  10943. + }
  10944. + return sb_desc;
  10945. +}
  10946. +
  10947. +/* Creates a copy of an existing SB by allocation space for it and copy
  10948. + settings */
  10949. +struct USB_SB_Desc* create_sb_copy(struct USB_SB_Desc* sb_orig, int mem_flags) {
  10950. + struct USB_SB_Desc *sb_desc;
  10951. + sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
  10952. + if(sb_desc == NULL)
  10953. + return NULL;
  10954. +
  10955. + memcpy(sb_desc, sb_orig, sizeof(struct USB_SB_Desc));
  10956. + return sb_desc;
  10957. +}
  10958. +
  10959. +/* A specific create_sb function for creation of in SBs. This is due to
  10960. + that datalen in In SBs shows how many packets we are expecting. It also
  10961. + sets up the rem field to show if how many bytes we expect in last packet
  10962. + if it's not a full one */
  10963. +struct USB_SB_Desc* create_sb_in(struct USB_SB_Desc* sb_prev, int datalen,
  10964. + int maxlen, int mem_flags) {
  10965. + struct USB_SB_Desc *sb_desc;
  10966. + sb_desc = create_sb(sb_prev, TT_IN, NULL,
  10967. + datalen ? (datalen - 1) / maxlen + 1 : 0, mem_flags);
  10968. + if(sb_desc == NULL)
  10969. + return NULL;
  10970. + sb_desc->command |= IO_FIELD(USB_SB_command, rem, datalen % maxlen);
  10971. + return sb_desc;
  10972. +}
  10973. +
  10974. +void set_sb_cmds(struct USB_SB_Desc *sb_desc, __u16 flags) {
  10975. + sb_desc->command |= flags;
  10976. +}
  10977. +
  10978. +int create_sb_for_urb(struct urb *urb, int mem_flags) {
  10979. + int is_out = !usb_pipein(urb->pipe);
  10980. + int type = usb_pipetype(urb->pipe);
  10981. + int maxlen = usb_maxpacket(urb->dev, urb->pipe, is_out);
  10982. + int buf_len = urb->transfer_buffer_length;
  10983. + void *buf = buf_len > 0 ? urb->transfer_buffer : NULL;
  10984. + struct USB_SB_Desc *sb_desc = NULL;
  10985. +
  10986. + struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  10987. + ASSERT(urb_priv != NULL);
  10988. +
  10989. + switch(type) {
  10990. + case PIPE_CONTROL:
  10991. + /* Setup stage */
  10992. + sb_desc = create_sb(NULL, TT_SETUP, urb->setup_packet, 8, mem_flags);
  10993. + if(sb_desc == NULL)
  10994. + return -ENOMEM;
  10995. + set_sb_cmds(sb_desc, CMD_FULL);
  10996. +
  10997. + /* Attach first SB to URB */
  10998. + urb_priv->first_sb = sb_desc;
  10999. +
  11000. + if (is_out) { /* Out Control URB */
  11001. + /* If this Control OUT transfer has an optional data stage we add
  11002. + an OUT token before the mandatory IN (status) token */
  11003. + if ((buf_len > 0) && buf) {
  11004. + sb_desc = create_sb(sb_desc, TT_OUT, buf, buf_len, mem_flags);
  11005. + if(sb_desc == NULL)
  11006. + return -ENOMEM;
  11007. + set_sb_cmds(sb_desc, CMD_FULL);
  11008. + }
  11009. +
  11010. + /* Status stage */
  11011. + /* The data length has to be exactly 1. This is due to a requirement
  11012. + of the USB specification that a host must be prepared to receive
  11013. + data in the status phase */
  11014. + sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
  11015. + if(sb_desc == NULL)
  11016. + return -ENOMEM;
  11017. + } else { /* In control URB */
  11018. + /* Data stage */
  11019. + sb_desc = create_sb_in(sb_desc, buf_len, maxlen, mem_flags);
  11020. + if(sb_desc == NULL)
  11021. + return -ENOMEM;
  11022. +
  11023. + /* Status stage */
  11024. + /* Read comment at zout_buffer declaration for an explanation to this. */
  11025. + sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
  11026. + if(sb_desc == NULL)
  11027. + return -ENOMEM;
  11028. + /* Set descriptor interrupt flag for in URBs so we can finish URB after
  11029. + zout-packet has been sent */
  11030. + set_sb_cmds(sb_desc, CMD_INTR | CMD_FULL);
  11031. + }
  11032. + /* Set end-of-list flag in last SB */
  11033. + set_sb_cmds(sb_desc, CMD_EOL);
  11034. + /* Attach last SB to URB */
  11035. + urb_priv->last_sb = sb_desc;
  11036. + break;
  11037. +
  11038. + case PIPE_BULK:
  11039. + if (is_out) { /* Out Bulk URB */
  11040. + sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
  11041. + if(sb_desc == NULL)
  11042. + return -ENOMEM;
  11043. + /* The full field is set to yes, even if we don't actually check that
  11044. + this is a full-length transfer (i.e., that transfer_buffer_length %
  11045. + maxlen = 0).
  11046. + Setting full prevents the USB controller from sending an empty packet
  11047. + in that case. However, if URB_ZERO_PACKET was set we want that. */
  11048. + if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
  11049. + set_sb_cmds(sb_desc, CMD_FULL);
  11050. + }
  11051. + } else { /* In Bulk URB */
  11052. + sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
  11053. + if(sb_desc == NULL)
  11054. + return -ENOMEM;
  11055. + }
  11056. + /* Set end-of-list flag for last SB */
  11057. + set_sb_cmds(sb_desc, CMD_EOL);
  11058. +
  11059. + /* Attach SB to URB */
  11060. + urb_priv->first_sb = sb_desc;
  11061. + urb_priv->last_sb = sb_desc;
  11062. + break;
  11063. +
  11064. + case PIPE_INTERRUPT:
  11065. + if(is_out) { /* Out Intr URB */
  11066. + sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
  11067. + if(sb_desc == NULL)
  11068. + return -ENOMEM;
  11069. +
  11070. + /* The full field is set to yes, even if we don't actually check that
  11071. + this is a full-length transfer (i.e., that transfer_buffer_length %
  11072. + maxlen = 0).
  11073. + Setting full prevents the USB controller from sending an empty packet
  11074. + in that case. However, if URB_ZERO_PACKET was set we want that. */
  11075. + if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
  11076. + set_sb_cmds(sb_desc, CMD_FULL);
  11077. + }
  11078. + /* Only generate TX interrupt if it's a Out URB*/
  11079. + set_sb_cmds(sb_desc, CMD_INTR);
  11080. +
  11081. + } else { /* In Intr URB */
  11082. + sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
  11083. + if(sb_desc == NULL)
  11084. + return -ENOMEM;
  11085. + }
  11086. + /* Set end-of-list flag for last SB */
  11087. + set_sb_cmds(sb_desc, CMD_EOL);
  11088. +
  11089. + /* Attach SB to URB */
  11090. + urb_priv->first_sb = sb_desc;
  11091. + urb_priv->last_sb = sb_desc;
  11092. +
  11093. + break;
  11094. + case PIPE_ISOCHRONOUS:
  11095. + if(is_out) { /* Out Isoc URB */
  11096. + int i;
  11097. + if(urb->number_of_packets == 0) {
  11098. + tc_err("Can't create SBs for Isoc URB with zero packets\n");
  11099. + return -EPIPE;
  11100. + }
  11101. + /* Create one SB descriptor for each packet and link them together. */
  11102. + for(i = 0; i < urb->number_of_packets; i++) {
  11103. + if (urb->iso_frame_desc[i].length > 0) {
  11104. +
  11105. + sb_desc = create_sb(sb_desc, TT_OUT, urb->transfer_buffer +
  11106. + urb->iso_frame_desc[i].offset,
  11107. + urb->iso_frame_desc[i].length, mem_flags);
  11108. + if(sb_desc == NULL)
  11109. + return -ENOMEM;
  11110. +
  11111. + /* Check if it's a full length packet */
  11112. + if (urb->iso_frame_desc[i].length ==
  11113. + usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
  11114. + set_sb_cmds(sb_desc, CMD_FULL);
  11115. + }
  11116. +
  11117. + } else { /* zero length packet */
  11118. + sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
  11119. + if(sb_desc == NULL)
  11120. + return -ENOMEM;
  11121. + set_sb_cmds(sb_desc, CMD_FULL);
  11122. + }
  11123. + /* Attach first SB descriptor to URB */
  11124. + if (i == 0) {
  11125. + urb_priv->first_sb = sb_desc;
  11126. + }
  11127. + }
  11128. + /* Set interrupt and end-of-list flags in last SB */
  11129. + set_sb_cmds(sb_desc, CMD_INTR | CMD_EOL);
  11130. + /* Attach last SB descriptor to URB */
  11131. + urb_priv->last_sb = sb_desc;
  11132. + tc_dbg("Created %d out SBs for Isoc URB:0x%x\n",
  11133. + urb->number_of_packets, (unsigned int)urb);
  11134. + } else { /* In Isoc URB */
  11135. + /* Actual number of packets is not relevant for periodic in traffic as
  11136. + long as it is more than zero. Set to 1 always. */
  11137. + sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
  11138. + if(sb_desc == NULL)
  11139. + return -ENOMEM;
  11140. + /* Set end-of-list flags for SB */
  11141. + set_sb_cmds(sb_desc, CMD_EOL);
  11142. +
  11143. + /* Attach SB to URB */
  11144. + urb_priv->first_sb = sb_desc;
  11145. + urb_priv->last_sb = sb_desc;
  11146. + }
  11147. + break;
  11148. + default:
  11149. + tc_err("Unknown pipe-type\n");
  11150. + return -EPIPE;
  11151. + break;
  11152. + }
  11153. + return 0;
  11154. +}
  11155. +
  11156. +int init_intr_urb(struct urb *urb, int mem_flags) {
  11157. + struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  11158. + struct USB_EP_Desc* ep_desc;
  11159. + int interval;
  11160. + int i;
  11161. + int ep_count;
  11162. +
  11163. + ASSERT(urb_priv != NULL);
  11164. + ASSERT(usb_pipeint(urb->pipe));
  11165. + /* We can't support interval longer than amount of eof descriptors in
  11166. + TxIntrEPList */
  11167. + if(urb->interval > MAX_INTR_INTERVAL) {
  11168. + tc_err("Interrupt interval %dms too big (max: %dms)\n", urb->interval,
  11169. + MAX_INTR_INTERVAL);
  11170. + return -EINVAL;
  11171. + }
  11172. +
  11173. + /* We assume that the SB descriptors already have been setup */
  11174. + ASSERT(urb_priv->first_sb != NULL);
  11175. +
  11176. + /* Round of the interval to 2^n, it is obvious that this code favours
  11177. + smaller numbers, but that is actually a good thing */
  11178. + /* FIXME: The "rounding error" for larger intervals will be quite
  11179. + large. For in traffic this shouldn't be a problem since it will only
  11180. + mean that we "poll" more often. */
  11181. + interval = urb->interval;
  11182. + for (i = 0; interval; i++) {
  11183. + interval = interval >> 1;
  11184. + }
  11185. + urb_priv->interval = 1 << (i - 1);
  11186. +
  11187. + /* We can only have max interval for Out Interrupt due to that we can only
  11188. + handle one linked in EP for a certain epid in the Intr descr array at the
  11189. + time. The USB Controller in the Etrax 100LX continues to process Intr EPs
  11190. + so we have no way of knowing which one that caused the actual transfer if
  11191. + we have several linked in. */
  11192. + if(usb_pipeout(urb->pipe)) {
  11193. + urb_priv->interval = MAX_INTR_INTERVAL;
  11194. + }
  11195. +
  11196. + /* Calculate amount of EPs needed */
  11197. + ep_count = MAX_INTR_INTERVAL / urb_priv->interval;
  11198. +
  11199. + for(i = 0; i < ep_count; i++) {
  11200. + ep_desc = create_ep(urb_priv->epid, urb_priv->first_sb, mem_flags);
  11201. + if(ep_desc == NULL) {
  11202. + /* Free any descriptors that we may have allocated before failure */
  11203. + while(i > 0) {
  11204. + i--;
  11205. + kfree(urb_priv->intr_ep_pool[i]);
  11206. + }
  11207. + return -ENOMEM;
  11208. + }
  11209. + urb_priv->intr_ep_pool[i] = ep_desc;
  11210. + }
  11211. + urb_priv->intr_ep_pool_length = ep_count;
  11212. + return 0;
  11213. +}
  11214. +
  11215. +/* DMA RX/TX functions */
  11216. +/* ----------------------- */
  11217. +
  11218. +static void tc_dma_init_rx_list(void) {
  11219. + int i;
  11220. +
  11221. + /* Setup descriptor list except last one */
  11222. + for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) {
  11223. + RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
  11224. + RxDescList[i].command = 0;
  11225. + RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]);
  11226. + RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
  11227. + RxDescList[i].hw_len = 0;
  11228. + RxDescList[i].status = 0;
  11229. +
  11230. + /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as
  11231. + USB_IN_Desc for the relevant fields.) */
  11232. + prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]);
  11233. +
  11234. + }
  11235. + /* Special handling of last descriptor */
  11236. + RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
  11237. + RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes);
  11238. + RxDescList[i].next = virt_to_phys(&RxDescList[0]);
  11239. + RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
  11240. + RxDescList[i].hw_len = 0;
  11241. + RxDescList[i].status = 0;
  11242. +
  11243. + /* Setup list pointers that show progress in list */
  11244. + myNextRxDesc = &RxDescList[0];
  11245. + myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
  11246. +
  11247. + flush_etrax_cache();
  11248. + /* Point DMA to first descriptor in list and start it */
  11249. + *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc);
  11250. + *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start);
  11251. +}
  11252. +
  11253. +
  11254. +static void tc_dma_init_tx_bulk_list(void) {
  11255. + int i;
  11256. + volatile struct USB_EP_Desc *epDescr;
  11257. +
  11258. + for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
  11259. + epDescr = &(TxBulkEPList[i]);
  11260. + CHECK_ALIGN(epDescr);
  11261. + epDescr->hw_len = 0;
  11262. + epDescr->command = IO_FIELD(USB_EP_command, epid, i);
  11263. + epDescr->sub = 0;
  11264. + epDescr->next = virt_to_phys(&TxBulkEPList[i + 1]);
  11265. +
  11266. + /* Initiate two EPs, disabled and with the eol flag set. No need for any
  11267. + preserved epid. */
  11268. +
  11269. + /* The first one has the intr flag set so we get an interrupt when the DMA
  11270. + channel is about to become disabled. */
  11271. + CHECK_ALIGN(&TxBulkDummyEPList[i][0]);
  11272. + TxBulkDummyEPList[i][0].hw_len = 0;
  11273. + TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
  11274. + IO_STATE(USB_EP_command, eol, yes) |
  11275. + IO_STATE(USB_EP_command, intr, yes));
  11276. + TxBulkDummyEPList[i][0].sub = 0;
  11277. + TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]);
  11278. +
  11279. + /* The second one. */
  11280. + CHECK_ALIGN(&TxBulkDummyEPList[i][1]);
  11281. + TxBulkDummyEPList[i][1].hw_len = 0;
  11282. + TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
  11283. + IO_STATE(USB_EP_command, eol, yes));
  11284. + TxBulkDummyEPList[i][1].sub = 0;
  11285. + /* The last dummy's next pointer is the same as the current EP's next pointer. */
  11286. + TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]);
  11287. + }
  11288. +
  11289. + /* Special handling of last descr in list, make list circular */
  11290. + epDescr = &TxBulkEPList[i];
  11291. + CHECK_ALIGN(epDescr);
  11292. + epDescr->hw_len = 0;
  11293. + epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
  11294. + IO_FIELD(USB_EP_command, epid, i);
  11295. + epDescr->sub = 0;
  11296. + epDescr->next = virt_to_phys(&TxBulkEPList[0]);
  11297. +
  11298. + /* Init DMA sub-channel pointers to last item in each list */
  11299. + *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]);
  11300. + /* No point in starting the bulk channel yet.
  11301. + *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
  11302. +}
  11303. +
  11304. +static void tc_dma_init_tx_ctrl_list(void) {
  11305. + int i;
  11306. + volatile struct USB_EP_Desc *epDescr;
  11307. +
  11308. + for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
  11309. + epDescr = &(TxCtrlEPList[i]);
  11310. + CHECK_ALIGN(epDescr);
  11311. + epDescr->hw_len = 0;
  11312. + epDescr->command = IO_FIELD(USB_EP_command, epid, i);
  11313. + epDescr->sub = 0;
  11314. + epDescr->next = virt_to_phys(&TxCtrlEPList[i + 1]);
  11315. + }
  11316. + /* Special handling of last descr in list, make list circular */
  11317. + epDescr = &TxCtrlEPList[i];
  11318. + CHECK_ALIGN(epDescr);
  11319. + epDescr->hw_len = 0;
  11320. + epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
  11321. + IO_FIELD(USB_EP_command, epid, i);
  11322. + epDescr->sub = 0;
  11323. + epDescr->next = virt_to_phys(&TxCtrlEPList[0]);
  11324. +
  11325. + /* Init DMA sub-channel pointers to last item in each list */
  11326. + *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[i]);
  11327. + /* No point in starting the ctrl channel yet.
  11328. + *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
  11329. +}
  11330. +
  11331. +
  11332. +static void tc_dma_init_tx_intr_list(void) {
  11333. + int i;
  11334. +
  11335. + TxIntrSB_zout.sw_len = 1;
  11336. + TxIntrSB_zout.next = 0;
  11337. + TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]);
  11338. + TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
  11339. + IO_STATE(USB_SB_command, tt, zout) |
  11340. + IO_STATE(USB_SB_command, full, yes) |
  11341. + IO_STATE(USB_SB_command, eot, yes) |
  11342. + IO_STATE(USB_SB_command, eol, yes));
  11343. +
  11344. + for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) {
  11345. + CHECK_ALIGN(&TxIntrEPList[i]);
  11346. + TxIntrEPList[i].hw_len = 0;
  11347. + TxIntrEPList[i].command =
  11348. + (IO_STATE(USB_EP_command, eof, yes) |
  11349. + IO_STATE(USB_EP_command, enable, yes) |
  11350. + IO_FIELD(USB_EP_command, epid, INVALID_EPID));
  11351. + TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
  11352. + TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]);
  11353. + }
  11354. +
  11355. + /* Special handling of last descr in list, make list circular */
  11356. + CHECK_ALIGN(&TxIntrEPList[i]);
  11357. + TxIntrEPList[i].hw_len = 0;
  11358. + TxIntrEPList[i].command =
  11359. + (IO_STATE(USB_EP_command, eof, yes) |
  11360. + IO_STATE(USB_EP_command, eol, yes) |
  11361. + IO_STATE(USB_EP_command, enable, yes) |
  11362. + IO_FIELD(USB_EP_command, epid, INVALID_EPID));
  11363. + TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
  11364. + TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]);
  11365. +
  11366. + intr_dbg("Initiated Intr EP descriptor list\n");
  11367. +
  11368. +
  11369. + /* Connect DMA 8 sub-channel 2 to first in list */
  11370. + *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]);
  11371. +}
  11372. +
  11373. +static void tc_dma_init_tx_isoc_list(void) {
  11374. + int i;
  11375. +
  11376. + DBFENTER;
  11377. +
  11378. + /* Read comment at zout_buffer declaration for an explanation to this. */
  11379. + TxIsocSB_zout.sw_len = 1;
  11380. + TxIsocSB_zout.next = 0;
  11381. + TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]);
  11382. + TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
  11383. + IO_STATE(USB_SB_command, tt, zout) |
  11384. + IO_STATE(USB_SB_command, full, yes) |
  11385. + IO_STATE(USB_SB_command, eot, yes) |
  11386. + IO_STATE(USB_SB_command, eol, yes));
  11387. +
  11388. + /* The last isochronous EP descriptor is a dummy. */
  11389. + for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
  11390. + CHECK_ALIGN(&TxIsocEPList[i]);
  11391. + TxIsocEPList[i].hw_len = 0;
  11392. + TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
  11393. + TxIsocEPList[i].sub = 0;
  11394. + TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]);
  11395. + }
  11396. +
  11397. + CHECK_ALIGN(&TxIsocEPList[i]);
  11398. + TxIsocEPList[i].hw_len = 0;
  11399. +
  11400. + /* Must enable the last EP descr to get eof interrupt. */
  11401. + TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) |
  11402. + IO_STATE(USB_EP_command, eof, yes) |
  11403. + IO_STATE(USB_EP_command, eol, yes) |
  11404. + IO_FIELD(USB_EP_command, epid, INVALID_EPID));
  11405. + TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout);
  11406. + TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]);
  11407. +
  11408. + *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]);
  11409. + *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
  11410. +}
  11411. +
  11412. +static int tc_dma_init(struct usb_hcd *hcd) {
  11413. + tc_dma_init_rx_list();
  11414. + tc_dma_init_tx_bulk_list();
  11415. + tc_dma_init_tx_ctrl_list();
  11416. + tc_dma_init_tx_intr_list();
  11417. + tc_dma_init_tx_isoc_list();
  11418. +
  11419. + if (cris_request_dma(USB_TX_DMA_NBR,
  11420. + "ETRAX 100LX built-in USB (Tx)",
  11421. + DMA_VERBOSE_ON_ERROR,
  11422. + dma_usb)) {
  11423. + err("Could not allocate DMA ch 8 for USB");
  11424. + return -EBUSY;
  11425. + }
  11426. +
  11427. + if (cris_request_dma(USB_RX_DMA_NBR,
  11428. + "ETRAX 100LX built-in USB (Rx)",
  11429. + DMA_VERBOSE_ON_ERROR,
  11430. + dma_usb)) {
  11431. + err("Could not allocate DMA ch 9 for USB");
  11432. + return -EBUSY;
  11433. + }
  11434. +
  11435. + *R_IRQ_MASK2_SET =
  11436. + /* Note that these interrupts are not used. */
  11437. + IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) |
  11438. + /* Sub channel 1 (ctrl) descr. interrupts are used. */
  11439. + IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) |
  11440. + IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) |
  11441. + /* Sub channel 3 (isoc) descr. interrupts are used. */
  11442. + IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set);
  11443. +
  11444. + /* Note that the dma9_descr interrupt is not used. */
  11445. + *R_IRQ_MASK2_SET =
  11446. + IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) |
  11447. + IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set);
  11448. +
  11449. + if (request_irq(ETRAX_USB_RX_IRQ, tc_dma_rx_interrupt, 0,
  11450. + "ETRAX 100LX built-in USB (Rx)", hcd)) {
  11451. + err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ);
  11452. + return -EBUSY;
  11453. + }
  11454. +
  11455. + if (request_irq(ETRAX_USB_TX_IRQ, tc_dma_tx_interrupt, 0,
  11456. + "ETRAX 100LX built-in USB (Tx)", hcd)) {
  11457. + err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ);
  11458. + return -EBUSY;
  11459. + }
  11460. +
  11461. + return 0;
  11462. +}
  11463. +
  11464. +static void tc_dma_destroy(void) {
  11465. + free_irq(ETRAX_USB_RX_IRQ, NULL);
  11466. + free_irq(ETRAX_USB_TX_IRQ, NULL);
  11467. +
  11468. + cris_free_dma(USB_TX_DMA_NBR, "ETRAX 100LX built-in USB (Tx)");
  11469. + cris_free_dma(USB_RX_DMA_NBR, "ETRAX 100LX built-in USB (Rx)");
  11470. +
  11471. +}
  11472. +
  11473. +static void tc_dma_link_intr_urb(struct urb *urb);
  11474. +
  11475. +/* Handle processing of Bulk, Ctrl and Intr queues */
  11476. +static void tc_dma_process_queue(int epid) {
  11477. + struct urb *urb;
  11478. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  11479. + unsigned long flags;
  11480. + char toggle;
  11481. +
  11482. + if(epid_state[epid].disabled) {
  11483. + /* Don't process any URBs on a disabled endpoint */
  11484. + return;
  11485. + }
  11486. +
  11487. + /* Do not disturb us while fiddling with EPs and epids */
  11488. + local_irq_save(flags);
  11489. +
  11490. + /* For bulk, Ctrl and Intr can we only have one URB active at a time for
  11491. + a specific EP. */
  11492. + if(activeUrbList[epid] != NULL) {
  11493. + /* An URB is already active on EP, skip checking queue */
  11494. + local_irq_restore(flags);
  11495. + return;
  11496. + }
  11497. +
  11498. + urb = urb_list_first(epid);
  11499. + if(urb == NULL) {
  11500. + /* No URB waiting in EP queue. Nothing do to */
  11501. + local_irq_restore(flags);
  11502. + return;
  11503. + }
  11504. +
  11505. + urb_priv = urb->hcpriv;
  11506. + ASSERT(urb_priv != NULL);
  11507. + ASSERT(urb_priv->urb_state == NOT_STARTED);
  11508. + ASSERT(!usb_pipeisoc(urb->pipe));
  11509. +
  11510. + /* Remove this URB from the queue and move it to active */
  11511. + activeUrbList[epid] = urb;
  11512. + urb_list_del(urb, epid);
  11513. +
  11514. + urb_priv->urb_state = STARTED;
  11515. +
  11516. + /* Reset error counters (regardless of which direction this traffic is). */
  11517. + etrax_epid_clear_error(epid);
  11518. +
  11519. + /* Special handling of Intr EP lists */
  11520. + if(usb_pipeint(urb->pipe)) {
  11521. + tc_dma_link_intr_urb(urb);
  11522. + local_irq_restore(flags);
  11523. + return;
  11524. + }
  11525. +
  11526. + /* Software must preset the toggle bits for Bulk and Ctrl */
  11527. + if(usb_pipecontrol(urb->pipe)) {
  11528. + /* Toggle bits are initialized only during setup transaction in a
  11529. + CTRL transfer */
  11530. + etrax_epid_set_toggle(epid, 0, 0);
  11531. + etrax_epid_set_toggle(epid, 1, 0);
  11532. + } else {
  11533. + toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  11534. + usb_pipeout(urb->pipe));
  11535. + etrax_epid_set_toggle(epid, usb_pipeout(urb->pipe), toggle);
  11536. + }
  11537. +
  11538. + tc_dbg("Added SBs from (URB:0x%x %s %s) to epid %d: %s\n",
  11539. + (unsigned int)urb, str_dir(urb->pipe), str_type(urb->pipe), epid,
  11540. + sblist_to_str(urb_priv->first_sb));
  11541. +
  11542. + /* We start the DMA sub channel without checking if it's running or not,
  11543. + because:
  11544. + 1) If it's already running, issuing the start command is a nop.
  11545. + 2) We avoid a test-and-set race condition. */
  11546. + switch(usb_pipetype(urb->pipe)) {
  11547. + case PIPE_BULK:
  11548. + /* Assert that the EP descriptor is disabled. */
  11549. + ASSERT(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
  11550. +
  11551. + /* Set up and enable the EP descriptor. */
  11552. + TxBulkEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
  11553. + TxBulkEPList[epid].hw_len = 0;
  11554. + TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
  11555. +
  11556. + /* Check if the dummy list is already with us (if several urbs were queued). */
  11557. + if (usb_pipein(urb->pipe) && (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0]))) {
  11558. + tc_dbg("Inviting dummy list to the party for urb 0x%lx, epid %d",
  11559. + (unsigned long)urb, epid);
  11560. +
  11561. + /* We don't need to check if the DMA is at this EP or not before changing the
  11562. + next pointer, since we will do it in one 32-bit write (EP descriptors are
  11563. + 32-bit aligned). */
  11564. + TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]);
  11565. + }
  11566. +
  11567. + restart_dma8_sub0();
  11568. +
  11569. + /* Update/restart the bulk start timer since we just started the channel.*/
  11570. + mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
  11571. + /* Update/restart the bulk eot timer since we just inserted traffic. */
  11572. + mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
  11573. + break;
  11574. + case PIPE_CONTROL:
  11575. + /* Assert that the EP descriptor is disabled. */
  11576. + ASSERT(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
  11577. +
  11578. + /* Set up and enable the EP descriptor. */
  11579. + TxCtrlEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
  11580. + TxCtrlEPList[epid].hw_len = 0;
  11581. + TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
  11582. +
  11583. + *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
  11584. + break;
  11585. + }
  11586. + local_irq_restore(flags);
  11587. +}
  11588. +
  11589. +static void tc_dma_link_intr_urb(struct urb *urb) {
  11590. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  11591. + volatile struct USB_EP_Desc *tmp_ep;
  11592. + struct USB_EP_Desc *ep_desc;
  11593. + int i = 0, epid;
  11594. + int pool_idx = 0;
  11595. +
  11596. + ASSERT(urb_priv != NULL);
  11597. + epid = urb_priv->epid;
  11598. + ASSERT(urb_priv->interval > 0);
  11599. + ASSERT(urb_priv->intr_ep_pool_length > 0);
  11600. +
  11601. + tmp_ep = &TxIntrEPList[0];
  11602. +
  11603. + /* Only insert one EP descriptor in list for Out Intr URBs.
  11604. + We can only handle Out Intr with interval of 128ms because
  11605. + it's not possible to insert several Out Intr EPs because they
  11606. + are not consumed by the DMA. */
  11607. + if(usb_pipeout(urb->pipe)) {
  11608. + ep_desc = urb_priv->intr_ep_pool[0];
  11609. + ASSERT(ep_desc);
  11610. + ep_desc->next = tmp_ep->next;
  11611. + tmp_ep->next = virt_to_phys(ep_desc);
  11612. + i++;
  11613. + } else {
  11614. + /* Loop through Intr EP descriptor list and insert EP for URB at
  11615. + specified interval */
  11616. + do {
  11617. + /* Each EP descriptor with eof flag sat signals a new frame */
  11618. + if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) {
  11619. + /* Insert a EP from URBs EP pool at correct interval */
  11620. + if ((i % urb_priv->interval) == 0) {
  11621. + ep_desc = urb_priv->intr_ep_pool[pool_idx];
  11622. + ASSERT(ep_desc);
  11623. + ep_desc->next = tmp_ep->next;
  11624. + tmp_ep->next = virt_to_phys(ep_desc);
  11625. + pool_idx++;
  11626. + ASSERT(pool_idx <= urb_priv->intr_ep_pool_length);
  11627. + }
  11628. + i++;
  11629. + }
  11630. + tmp_ep = (struct USB_EP_Desc *)phys_to_virt(tmp_ep->next);
  11631. + } while(tmp_ep != &TxIntrEPList[0]);
  11632. + }
  11633. +
  11634. + intr_dbg("Added SBs to intr epid %d: %s interval:%d (%d EP)\n", epid,
  11635. + sblist_to_str(urb_priv->first_sb), urb_priv->interval, pool_idx);
  11636. +
  11637. + /* We start the DMA sub channel without checking if it's running or not,
  11638. + because:
  11639. + 1) If it's already running, issuing the start command is a nop.
  11640. + 2) We avoid a test-and-set race condition. */
  11641. + *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
  11642. +}
  11643. +
  11644. + /* hinko ignore usb_pipeisoc */
  11645. +#if 0
  11646. +static void tc_dma_process_isoc_urb(struct urb *urb) {
  11647. + unsigned long flags;
  11648. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  11649. + int epid;
  11650. +
  11651. + /* Do not disturb us while fiddling with EPs and epids */
  11652. + local_irq_save(flags);
  11653. +
  11654. + ASSERT(urb_priv);
  11655. + ASSERT(urb_priv->first_sb);
  11656. + epid = urb_priv->epid;
  11657. +
  11658. + if(activeUrbList[epid] == NULL) {
  11659. + /* EP is idle, so make this URB active */
  11660. + activeUrbList[epid] = urb;
  11661. + urb_list_del(urb, epid);
  11662. + ASSERT(TxIsocEPList[epid].sub == 0);
  11663. + ASSERT(!(TxIsocEPList[epid].command &
  11664. + IO_STATE(USB_EP_command, enable, yes)));
  11665. +
  11666. + /* Differentiate between In and Out Isoc. Because In SBs are not consumed*/
  11667. + if(usb_pipein(urb->pipe)) {
  11668. + /* Each EP for In Isoc will have only one SB descriptor, setup when
  11669. + submitting the first active urb. We do it here by copying from URBs
  11670. + pre-allocated SB. */
  11671. + memcpy((void *)&(TxIsocSBList[epid]), urb_priv->first_sb,
  11672. + sizeof(TxIsocSBList[epid]));
  11673. + TxIsocEPList[epid].hw_len = 0;
  11674. + TxIsocEPList[epid].sub = virt_to_phys(&(TxIsocSBList[epid]));
  11675. + } else {
  11676. + /* For Out Isoc we attach the pre-allocated list of SBs for the URB */
  11677. + TxIsocEPList[epid].hw_len = 0;
  11678. + TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
  11679. +
  11680. + isoc_dbg("Attached first URB:0x%x[%d] to epid:%d first_sb:0x%x"
  11681. + " last_sb::0x%x\n",
  11682. + (unsigned int)urb, urb_priv->urb_num, epid,
  11683. + (unsigned int)(urb_priv->first_sb),
  11684. + (unsigned int)(urb_priv->last_sb));
  11685. + }
  11686. +
  11687. + if (urb->transfer_flags & URB_ISO_ASAP) {
  11688. + /* The isoc transfer should be started as soon as possible. The
  11689. + start_frame field is a return value if URB_ISO_ASAP was set. Comparing
  11690. + R_USB_FM_NUMBER with a USB Chief trace shows that the first isoc IN
  11691. + token is sent 2 frames later. I'm not sure how this affects usage of
  11692. + the start_frame field by the device driver, or how it affects things
  11693. + when USB_ISO_ASAP is not set, so therefore there's no compensation for
  11694. + the 2 frame "lag" here. */
  11695. + urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
  11696. + TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
  11697. + urb_priv->urb_state = STARTED;
  11698. + isoc_dbg("URB_ISO_ASAP set, urb->start_frame set to %d\n",
  11699. + urb->start_frame);
  11700. + } else {
  11701. + /* Not started yet. */
  11702. + urb_priv->urb_state = NOT_STARTED;
  11703. + isoc_warn("urb_priv->urb_state set to NOT_STARTED for URB:0x%x\n",
  11704. + (unsigned int)urb);
  11705. + }
  11706. +
  11707. + } else {
  11708. + /* An URB is already active on the EP. Leave URB in queue and let
  11709. + finish_isoc_urb process it after current active URB */
  11710. + ASSERT(TxIsocEPList[epid].sub != 0);
  11711. +
  11712. + if(usb_pipein(urb->pipe)) {
  11713. + /* Because there already is a active In URB on this epid we do nothing
  11714. + and the finish_isoc_urb() function will handle switching to next URB*/
  11715. +
  11716. + } else { /* For Out Isoc, insert new URBs traffic last in SB-list. */
  11717. + struct USB_SB_Desc *temp_sb_desc;
  11718. +
  11719. + /* Set state STARTED to all Out Isoc URBs added to SB list because we
  11720. + don't know how many of them that are finished before descr interrupt*/
  11721. + urb_priv->urb_state = STARTED;
  11722. +
  11723. + /* Find end of current SB list by looking for SB with eol flag sat */
  11724. + temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
  11725. + while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) !=
  11726. + IO_STATE(USB_SB_command, eol, yes)) {
  11727. + ASSERT(temp_sb_desc->next);
  11728. + temp_sb_desc = phys_to_virt(temp_sb_desc->next);
  11729. + }
  11730. +
  11731. + isoc_dbg("Appended URB:0x%x[%d] (first:0x%x last:0x%x) to epid:%d"
  11732. + " sub:0x%x eol:0x%x\n",
  11733. + (unsigned int)urb, urb_priv->urb_num,
  11734. + (unsigned int)(urb_priv->first_sb),
  11735. + (unsigned int)(urb_priv->last_sb), epid,
  11736. + (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
  11737. + (unsigned int)temp_sb_desc);
  11738. +
  11739. + /* Next pointer must be set before eol is removed. */
  11740. + temp_sb_desc->next = virt_to_phys(urb_priv->first_sb);
  11741. + /* Clear the previous end of list flag since there is a new in the
  11742. + added SB descriptor list. */
  11743. + temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol);
  11744. +
  11745. + if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
  11746. + __u32 epid_data;
  11747. + /* 8.8.5 in Designer's Reference says we should check for and correct
  11748. + any errors in the EP here. That should not be necessary if
  11749. + epid_attn is handled correctly, so we assume all is ok. */
  11750. + epid_data = etrax_epid_iso_get(epid);
  11751. + if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) !=
  11752. + IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
  11753. + isoc_err("Disabled Isoc EP with error:%d on epid:%d when appending"
  11754. + " URB:0x%x[%d]\n",
  11755. + IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data), epid,
  11756. + (unsigned int)urb, urb_priv->urb_num);
  11757. + }
  11758. +
  11759. + /* The SB list was exhausted. */
  11760. + if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) {
  11761. + /* The new sublist did not get processed before the EP was
  11762. + disabled. Setup the EP again. */
  11763. +
  11764. + if(virt_to_phys(temp_sb_desc) == TxIsocEPList[epid].sub) {
  11765. + isoc_dbg("EP for epid:%d stoped at SB:0x%x before newly inserted"
  11766. + ", restarting from this URBs SB:0x%x\n",
  11767. + epid, (unsigned int)temp_sb_desc,
  11768. + (unsigned int)(urb_priv->first_sb));
  11769. + TxIsocEPList[epid].hw_len = 0;
  11770. + TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
  11771. + urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
  11772. + /* Enable the EP again so data gets processed this time */
  11773. + TxIsocEPList[epid].command |=
  11774. + IO_STATE(USB_EP_command, enable, yes);
  11775. +
  11776. + } else {
  11777. + /* The EP has been disabled but not at end this URB (god knows
  11778. + where). This should generate an epid_attn so we should not be
  11779. + here */
  11780. + isoc_warn("EP was disabled on sb:0x%x before SB list for"
  11781. + " URB:0x%x[%d] got processed\n",
  11782. + (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
  11783. + (unsigned int)urb, urb_priv->urb_num);
  11784. + }
  11785. + } else {
  11786. + /* This might happend if we are slow on this function and isn't
  11787. + an error. */
  11788. + isoc_dbg("EP was disabled and finished with SBs from appended"
  11789. + " URB:0x%x[%d]\n", (unsigned int)urb, urb_priv->urb_num);
  11790. + }
  11791. + }
  11792. + }
  11793. + }
  11794. +
  11795. + /* Start the DMA sub channel */
  11796. + *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
  11797. +
  11798. + local_irq_restore(flags);
  11799. +}
  11800. +#endif
  11801. +
  11802. +static void tc_dma_unlink_intr_urb(struct urb *urb) {
  11803. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  11804. + volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
  11805. + volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
  11806. + volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
  11807. + volatile struct USB_EP_Desc *unlink_ep; /* The one we should remove from
  11808. + the list. */
  11809. + int count = 0;
  11810. + volatile int timeout = 10000;
  11811. + int epid;
  11812. +
  11813. + /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the
  11814. + List". */
  11815. + ASSERT(urb_priv);
  11816. + ASSERT(urb_priv->intr_ep_pool_length > 0);
  11817. + epid = urb_priv->epid;
  11818. +
  11819. + /* First disable all Intr EPs belonging to epid for this URB */
  11820. + first_ep = &TxIntrEPList[0];
  11821. + curr_ep = first_ep;
  11822. + do {
  11823. + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
  11824. + if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
  11825. + /* Disable EP */
  11826. + next_ep->command &= ~IO_MASK(USB_EP_command, enable);
  11827. + }
  11828. + curr_ep = phys_to_virt(curr_ep->next);
  11829. + } while (curr_ep != first_ep);
  11830. +
  11831. +
  11832. + /* Now unlink all EPs belonging to this epid from Descr list */
  11833. + first_ep = &TxIntrEPList[0];
  11834. + curr_ep = first_ep;
  11835. + do {
  11836. + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
  11837. + if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
  11838. + /* This is the one we should unlink. */
  11839. + unlink_ep = next_ep;
  11840. +
  11841. + /* Actually unlink the EP from the DMA list. */
  11842. + curr_ep->next = unlink_ep->next;
  11843. +
  11844. + /* Wait until the DMA is no longer at this descriptor. */
  11845. + while((*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep)) &&
  11846. + (timeout-- > 0));
  11847. + if(timeout == 0) {
  11848. + warn("Timeout while waiting for DMA-TX-Intr to leave unlink EP\n");
  11849. + }
  11850. +
  11851. + count++;
  11852. + }
  11853. + curr_ep = phys_to_virt(curr_ep->next);
  11854. + } while (curr_ep != first_ep);
  11855. +
  11856. + if(count != urb_priv->intr_ep_pool_length) {
  11857. + intr_warn("Unlinked %d of %d Intr EPs for URB:0x%x[%d]\n", count,
  11858. + urb_priv->intr_ep_pool_length, (unsigned int)urb,
  11859. + urb_priv->urb_num);
  11860. + } else {
  11861. + intr_dbg("Unlinked %d of %d interrupt EPs for URB:0x%x\n", count,
  11862. + urb_priv->intr_ep_pool_length, (unsigned int)urb);
  11863. + }
  11864. +}
  11865. +
  11866. +static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
  11867. + int timer) {
  11868. + unsigned long flags;
  11869. + int epid;
  11870. + struct urb *urb;
  11871. + struct crisv10_urb_priv * urb_priv;
  11872. + __u32 epid_data;
  11873. +
  11874. + /* Protect TxEPList */
  11875. + local_irq_save(flags);
  11876. +
  11877. + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
  11878. + /* A finished EP descriptor is disabled and has a valid sub pointer */
  11879. + if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
  11880. + (TxBulkEPList[epid].sub != 0)) {
  11881. +
  11882. + /* Get the active URB for this epid */
  11883. + urb = activeUrbList[epid];
  11884. + /* Sanity checks */
  11885. + ASSERT(urb);
  11886. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  11887. + ASSERT(urb_priv);
  11888. +
  11889. + /* Only handle finished out Bulk EPs here,
  11890. + and let RX interrupt take care of the rest */
  11891. + if(!epid_out_traffic(epid)) {
  11892. + continue;
  11893. + }
  11894. +
  11895. + if(timer) {
  11896. + tc_warn("Found finished %s Bulk epid:%d URB:0x%x[%d] from timeout\n",
  11897. + epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
  11898. + urb_priv->urb_num);
  11899. + } else {
  11900. + tc_dbg("Found finished %s Bulk epid:%d URB:0x%x[%d] from interrupt\n",
  11901. + epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
  11902. + urb_priv->urb_num);
  11903. + }
  11904. +
  11905. + if(urb_priv->urb_state == UNLINK) {
  11906. + /* This Bulk URB is requested to be unlinked, that means that the EP
  11907. + has been disabled and we might not have sent all data */
  11908. + tc_finish_urb(hcd, urb, urb->status);
  11909. + continue;
  11910. + }
  11911. +
  11912. + ASSERT(urb_priv->urb_state == STARTED);
  11913. + if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) {
  11914. + tc_err("Endpoint got disabled before reaching last sb\n");
  11915. + }
  11916. +
  11917. + epid_data = etrax_epid_get(epid);
  11918. + if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
  11919. + IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
  11920. + /* This means that the endpoint has no error, is disabled
  11921. + and had inserted traffic, i.e. transfer successfully completed. */
  11922. + tc_finish_urb(hcd, urb, 0);
  11923. + } else {
  11924. + /* Shouldn't happen. We expect errors to be caught by epid
  11925. + attention. */
  11926. + tc_err("Found disabled bulk EP desc (epid:%d error:%d)\n",
  11927. + epid, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
  11928. + }
  11929. + } else {
  11930. + tc_dbg("Ignoring In Bulk epid:%d, let RX interrupt handle it\n", epid);
  11931. + }
  11932. + }
  11933. +
  11934. + local_irq_restore(flags);
  11935. +}
  11936. +
  11937. +static void check_finished_ctrl_tx_epids(struct usb_hcd *hcd) {
  11938. + unsigned long flags;
  11939. + int epid;
  11940. + struct urb *urb;
  11941. + struct crisv10_urb_priv * urb_priv;
  11942. + __u32 epid_data;
  11943. +
  11944. + /* Protect TxEPList */
  11945. + local_irq_save(flags);
  11946. +
  11947. + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
  11948. + if(epid == DUMMY_EPID)
  11949. + continue;
  11950. +
  11951. + /* A finished EP descriptor is disabled and has a valid sub pointer */
  11952. + if (!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
  11953. + (TxCtrlEPList[epid].sub != 0)) {
  11954. +
  11955. + /* Get the active URB for this epid */
  11956. + urb = activeUrbList[epid];
  11957. +
  11958. + if(urb == NULL) {
  11959. + tc_warn("Found finished Ctrl epid:%d with no active URB\n", epid);
  11960. + continue;
  11961. + }
  11962. +
  11963. + /* Sanity checks */
  11964. + ASSERT(usb_pipein(urb->pipe));
  11965. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  11966. + ASSERT(urb_priv);
  11967. + if (phys_to_virt(TxCtrlEPList[epid].sub) != urb_priv->last_sb) {
  11968. + tc_err("Endpoint got disabled before reaching last sb\n");
  11969. + }
  11970. +
  11971. + epid_data = etrax_epid_get(epid);
  11972. + if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
  11973. + IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
  11974. + /* This means that the endpoint has no error, is disabled
  11975. + and had inserted traffic, i.e. transfer successfully completed. */
  11976. +
  11977. + /* Check if RX-interrupt for In Ctrl has been processed before
  11978. + finishing the URB */
  11979. + if(urb_priv->ctrl_rx_done) {
  11980. + tc_dbg("Finishing In Ctrl URB:0x%x[%d] in tx_interrupt\n",
  11981. + (unsigned int)urb, urb_priv->urb_num);
  11982. + tc_finish_urb(hcd, urb, 0);
  11983. + } else {
  11984. + /* If we get zout descriptor interrupt before RX was done for a
  11985. + In Ctrl transfer, then we flag that and it will be finished
  11986. + in the RX-Interrupt */
  11987. + urb_priv->ctrl_zout_done = 1;
  11988. + tc_dbg("Got zout descr interrupt before RX interrupt\n");
  11989. + }
  11990. + } else {
  11991. + /* Shouldn't happen. We expect errors to be caught by epid
  11992. + attention. */
  11993. + tc_err("Found disabled Ctrl EP desc (epid:%d URB:0x%x[%d]) error_code:%d\n", epid, (unsigned int)urb, urb_priv->urb_num, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
  11994. + __dump_ep_desc(&(TxCtrlEPList[epid]));
  11995. + __dump_ept_data(epid);
  11996. + }
  11997. + }
  11998. + }
  11999. + local_irq_restore(flags);
  12000. +}
  12001. +
  12002. + /* hinko ignore usb_pipeisoc */
  12003. +#if 0
  12004. +/* This function goes through all epids that are setup for Out Isoc transfers
  12005. + and marks (isoc_out_done) all queued URBs that the DMA has finished
  12006. + transfer for.
  12007. + No URB completetion is done here to make interrupt routine return quickly.
  12008. + URBs are completed later with help of complete_isoc_bottom_half() that
  12009. + becomes schedules when this functions is finished. */
  12010. +static void check_finished_isoc_tx_epids(void) {
  12011. + unsigned long flags;
  12012. + int epid;
  12013. + struct urb *urb;
  12014. + struct crisv10_urb_priv * urb_priv;
  12015. + struct USB_SB_Desc* sb_desc;
  12016. + int epid_done;
  12017. +
  12018. + /* Protect TxIsocEPList */
  12019. + local_irq_save(flags);
  12020. +
  12021. + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
  12022. + if (TxIsocEPList[epid].sub == 0 || epid == INVALID_EPID ||
  12023. + !epid_out_traffic(epid)) {
  12024. + /* Nothing here to see. */
  12025. + continue;
  12026. + }
  12027. + ASSERT(epid_inuse(epid));
  12028. + ASSERT(epid_isoc(epid));
  12029. +
  12030. + sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
  12031. + /* Find the last descriptor of the currently active URB for this ep.
  12032. + This is the first descriptor in the sub list marked for a descriptor
  12033. + interrupt. */
  12034. + while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) {
  12035. + sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0;
  12036. + }
  12037. + ASSERT(sb_desc);
  12038. +
  12039. + isoc_dbg("Descr IRQ checking epid:%d sub:0x%x intr:0x%x\n",
  12040. + epid, (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
  12041. + (unsigned int)sb_desc);
  12042. +
  12043. + urb = activeUrbList[epid];
  12044. + if(urb == NULL) {
  12045. + isoc_err("Isoc Descr irq on epid:%d with no active URB\n", epid);
  12046. + continue;
  12047. + }
  12048. +
  12049. + epid_done = 0;
  12050. + while(urb && !epid_done) {
  12051. + /* Sanity check. */
  12052. + ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
  12053. + ASSERT(usb_pipeout(urb->pipe));
  12054. +
  12055. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  12056. + ASSERT(urb_priv);
  12057. + ASSERT(urb_priv->urb_state == STARTED ||
  12058. + urb_priv->urb_state == UNLINK);
  12059. +
  12060. + if (sb_desc != urb_priv->last_sb) {
  12061. + /* This urb has been sent. */
  12062. + urb_priv->isoc_out_done = 1;
  12063. +
  12064. + } else { /* Found URB that has last_sb as the interrupt reason */
  12065. +
  12066. + /* Check if EP has been disabled, meaning that all transfers are done*/
  12067. + if(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
  12068. + ASSERT((sb_desc->command & IO_MASK(USB_SB_command, eol)) ==
  12069. + IO_STATE(USB_SB_command, eol, yes));
  12070. + ASSERT(sb_desc->next == 0);
  12071. + urb_priv->isoc_out_done = 1;
  12072. + } else {
  12073. + isoc_dbg("Skipping URB:0x%x[%d] because EP not disabled yet\n",
  12074. + (unsigned int)urb, urb_priv->urb_num);
  12075. + }
  12076. + /* Stop looking any further in queue */
  12077. + epid_done = 1;
  12078. + }
  12079. +
  12080. + if (!epid_done) {
  12081. + if(urb == activeUrbList[epid]) {
  12082. + urb = urb_list_first(epid);
  12083. + } else {
  12084. + urb = urb_list_next(urb, epid);
  12085. + }
  12086. + }
  12087. + } /* END: while(urb && !epid_done) */
  12088. + }
  12089. +
  12090. + local_irq_restore(flags);
  12091. +}
  12092. +
  12093. +
  12094. +/* This is where the Out Isoc URBs are realy completed. This function is
  12095. + scheduled from tc_dma_tx_interrupt() when one or more Out Isoc transfers
  12096. + are done. This functions completes all URBs earlier marked with
  12097. + isoc_out_done by fast interrupt routine check_finished_isoc_tx_epids() */
  12098. +
  12099. +static void complete_isoc_bottom_half(void *data) {
  12100. + struct crisv10_isoc_complete_data *comp_data;
  12101. + struct usb_iso_packet_descriptor *packet;
  12102. + struct crisv10_urb_priv * urb_priv;
  12103. + unsigned long flags;
  12104. + struct urb* urb;
  12105. + int epid_done;
  12106. + int epid;
  12107. + int i;
  12108. +
  12109. + comp_data = (struct crisv10_isoc_complete_data*)data;
  12110. +
  12111. + local_irq_save(flags);
  12112. +
  12113. + for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
  12114. + if(!epid_inuse(epid) || !epid_isoc(epid) || !epid_out_traffic(epid) || epid == DUMMY_EPID) {
  12115. + /* Only check valid Out Isoc epids */
  12116. + continue;
  12117. + }
  12118. +
  12119. + isoc_dbg("Isoc bottom-half checking epid:%d, sub:0x%x\n", epid,
  12120. + (unsigned int)phys_to_virt(TxIsocEPList[epid].sub));
  12121. +
  12122. + /* The descriptor interrupt handler has marked all transmitted Out Isoc
  12123. + URBs with isoc_out_done. Now we traverse all epids and for all that
  12124. + have out Isoc traffic we traverse its URB list and complete the
  12125. + transmitted URBs. */
  12126. + epid_done = 0;
  12127. + while (!epid_done) {
  12128. +
  12129. + /* Get the active urb (if any) */
  12130. + urb = activeUrbList[epid];
  12131. + if (urb == 0) {
  12132. + isoc_dbg("No active URB on epid:%d anymore\n", epid);
  12133. + epid_done = 1;
  12134. + continue;
  12135. + }
  12136. +
  12137. + /* Sanity check. */
  12138. + ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
  12139. + ASSERT(usb_pipeout(urb->pipe));
  12140. +
  12141. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  12142. + ASSERT(urb_priv);
  12143. +
  12144. + if (!(urb_priv->isoc_out_done)) {
  12145. + /* We have reached URB that isn't flaged done yet, stop traversing. */
  12146. + isoc_dbg("Stoped traversing Out Isoc URBs on epid:%d"
  12147. + " before not yet flaged URB:0x%x[%d]\n",
  12148. + epid, (unsigned int)urb, urb_priv->urb_num);
  12149. + epid_done = 1;
  12150. + continue;
  12151. + }
  12152. +
  12153. + /* This urb has been sent. */
  12154. + isoc_dbg("Found URB:0x%x[%d] that is flaged isoc_out_done\n",
  12155. + (unsigned int)urb, urb_priv->urb_num);
  12156. +
  12157. + /* Set ok on transfered packets for this URB and finish it */
  12158. + for (i = 0; i < urb->number_of_packets; i++) {
  12159. + packet = &urb->iso_frame_desc[i];
  12160. + packet->status = 0;
  12161. + packet->actual_length = packet->length;
  12162. + }
  12163. + urb_priv->isoc_packet_counter = urb->number_of_packets;
  12164. + tc_finish_urb(comp_data->hcd, urb, 0);
  12165. +
  12166. + } /* END: while(!epid_done) */
  12167. + } /* END: for(epid...) */
  12168. +
  12169. + local_irq_restore(flags);
  12170. + kmem_cache_free(isoc_compl_cache, comp_data);
  12171. +}
  12172. +#endif
  12173. +
  12174. +static void check_finished_intr_tx_epids(struct usb_hcd *hcd) {
  12175. + unsigned long flags;
  12176. + int epid;
  12177. + struct urb *urb;
  12178. + struct crisv10_urb_priv * urb_priv;
  12179. + volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
  12180. + volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
  12181. +
  12182. + /* Protect TxintrEPList */
  12183. + local_irq_save(flags);
  12184. +
  12185. + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
  12186. + if(!epid_inuse(epid) || !epid_intr(epid) || !epid_out_traffic(epid)) {
  12187. + /* Nothing to see on this epid. Only check valid Out Intr epids */
  12188. + continue;
  12189. + }
  12190. +
  12191. + urb = activeUrbList[epid];
  12192. + if(urb == 0) {
  12193. + intr_warn("Found Out Intr epid:%d with no active URB\n", epid);
  12194. + continue;
  12195. + }
  12196. +
  12197. + /* Sanity check. */
  12198. + ASSERT(usb_pipetype(urb->pipe) == PIPE_INTERRUPT);
  12199. + ASSERT(usb_pipeout(urb->pipe));
  12200. +
  12201. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  12202. + ASSERT(urb_priv);
  12203. +
  12204. + /* Go through EPs between first and second sof-EP. It's here Out Intr EPs
  12205. + are inserted.*/
  12206. + curr_ep = &TxIntrEPList[0];
  12207. + do {
  12208. + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
  12209. + if(next_ep == urb_priv->intr_ep_pool[0]) {
  12210. + /* We found the Out Intr EP for this epid */
  12211. +
  12212. + /* Disable it so it doesn't get processed again */
  12213. + next_ep->command &= ~IO_MASK(USB_EP_command, enable);
  12214. +
  12215. + /* Finish the active Out Intr URB with status OK */
  12216. + tc_finish_urb(hcd, urb, 0);
  12217. + }
  12218. + curr_ep = phys_to_virt(curr_ep->next);
  12219. + } while (curr_ep != &TxIntrEPList[1]);
  12220. +
  12221. + }
  12222. + local_irq_restore(flags);
  12223. +}
  12224. +
  12225. +/* Interrupt handler for DMA8/IRQ24 with subchannels (called from hardware intr) */
  12226. +static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc) {
  12227. + struct usb_hcd *hcd = (struct usb_hcd*)vhc;
  12228. + ASSERT(hcd);
  12229. +
  12230. + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) {
  12231. + /* Clear this interrupt */
  12232. + *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do);
  12233. + restart_dma8_sub0();
  12234. + }
  12235. +
  12236. + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) {
  12237. + /* Clear this interrupt */
  12238. + *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do);
  12239. + check_finished_ctrl_tx_epids(hcd);
  12240. + }
  12241. +
  12242. + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) {
  12243. + /* Clear this interrupt */
  12244. + *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do);
  12245. + check_finished_intr_tx_epids(hcd);
  12246. + }
  12247. +
  12248. + /* hinko ignore usb_pipeisoc */
  12249. +#if 0
  12250. + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) {
  12251. + struct crisv10_isoc_complete_data* comp_data;
  12252. +
  12253. + /* Flag done Out Isoc for later completion */
  12254. + check_finished_isoc_tx_epids();
  12255. +
  12256. + /* Clear this interrupt */
  12257. + *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
  12258. + /* Schedule bottom half of Out Isoc completion function. This function
  12259. + finishes the URBs marked with isoc_out_done */
  12260. + comp_data = (struct crisv10_isoc_complete_data*)
  12261. + kmem_cache_alloc(isoc_compl_cache, GFP_ATOMIC);
  12262. + ASSERT(comp_data != NULL);
  12263. + comp_data ->hcd = hcd;
  12264. +
  12265. + //INIT_WORK(&comp_data->usb_bh, complete_isoc_bottom_half, comp_data);
  12266. + INIT_WORK(&comp_data->usb_bh, complete_isoc_bottom_half);
  12267. + schedule_work(&comp_data->usb_bh);
  12268. + }
  12269. +#endif
  12270. +
  12271. + return IRQ_HANDLED;
  12272. +}
  12273. +
  12274. +/* Interrupt handler for DMA9/IRQ25 (called from hardware intr) */
  12275. +static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc) {
  12276. + unsigned long flags;
  12277. + struct urb *urb;
  12278. + struct usb_hcd *hcd = (struct usb_hcd*)vhc;
  12279. + struct crisv10_urb_priv *urb_priv;
  12280. + int epid = 0;
  12281. + int real_error;
  12282. +
  12283. + ASSERT(hcd);
  12284. +
  12285. + /* Clear this interrupt. */
  12286. + *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do);
  12287. +
  12288. + /* Custom clear interrupt for this interrupt */
  12289. + /* The reason we cli here is that we call the driver's callback functions. */
  12290. + local_irq_save(flags);
  12291. +
  12292. + /* Note that this while loop assumes that all packets span only
  12293. + one rx descriptor. */
  12294. + while(myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) {
  12295. + epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status);
  12296. + /* Get the active URB for this epid */
  12297. + urb = activeUrbList[epid];
  12298. +
  12299. + ASSERT(epid_inuse(epid));
  12300. + if (!urb) {
  12301. + dma_err("No urb for epid %d in rx interrupt\n", epid);
  12302. + goto skip_out;
  12303. + }
  12304. +
  12305. + /* Check if any errors on epid */
  12306. + real_error = 0;
  12307. + if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) {
  12308. + __u32 r_usb_ept_data;
  12309. +
  12310. + if (usb_pipeisoc(urb->pipe)) {
  12311. + r_usb_ept_data = etrax_epid_iso_get(epid);
  12312. + if((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) &&
  12313. + (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) &&
  12314. + (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) {
  12315. + /* Not an error, just a failure to receive an expected iso
  12316. + in packet in this frame. This is not documented
  12317. + in the designers reference. Continue processing.
  12318. + */
  12319. + } else real_error = 1;
  12320. + } else real_error = 1;
  12321. + }
  12322. +
  12323. + if(real_error) {
  12324. + dma_err("Error in RX descr on epid:%d for URB 0x%x",
  12325. + epid, (unsigned int)urb);
  12326. + dump_ept_data(epid);
  12327. + dump_in_desc(myNextRxDesc);
  12328. + goto skip_out;
  12329. + }
  12330. +
  12331. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  12332. + ASSERT(urb_priv);
  12333. + ASSERT(urb_priv->urb_state == STARTED ||
  12334. + urb_priv->urb_state == UNLINK);
  12335. +
  12336. + if ((usb_pipetype(urb->pipe) == PIPE_BULK) ||
  12337. + (usb_pipetype(urb->pipe) == PIPE_CONTROL) ||
  12338. + (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
  12339. +
  12340. + /* We get nodata for empty data transactions, and the rx descriptor's
  12341. + hw_len field is not valid in that case. No data to copy in other
  12342. + words. */
  12343. + if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
  12344. + /* No data to copy */
  12345. + } else {
  12346. + /*
  12347. + dma_dbg("Processing RX for URB:0x%x epid:%d (data:%d ofs:%d)\n",
  12348. + (unsigned int)urb, epid, myNextRxDesc->hw_len,
  12349. + urb_priv->rx_offset);
  12350. + */
  12351. + /* Only copy data if URB isn't flaged to be unlinked*/
  12352. + if(urb_priv->urb_state != UNLINK) {
  12353. + /* Make sure the data fits in the buffer. */
  12354. + if(urb_priv->rx_offset + myNextRxDesc->hw_len
  12355. + <= urb->transfer_buffer_length) {
  12356. +
  12357. + /* Copy the data to URBs buffer */
  12358. + memcpy(urb->transfer_buffer + urb_priv->rx_offset,
  12359. + phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len);
  12360. + urb_priv->rx_offset += myNextRxDesc->hw_len;
  12361. + } else {
  12362. + /* Signal overflow when returning URB */
  12363. + urb->status = -EOVERFLOW;
  12364. + tc_finish_urb_later(hcd, urb, urb->status);
  12365. + }
  12366. + }
  12367. + }
  12368. +
  12369. + /* Check if it was the last packet in the transfer */
  12370. + if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) {
  12371. + /* Special handling for In Ctrl URBs. */
  12372. + if(usb_pipecontrol(urb->pipe) && usb_pipein(urb->pipe) &&
  12373. + !(urb_priv->ctrl_zout_done)) {
  12374. + /* Flag that RX part of Ctrl transfer is done. Because zout descr
  12375. + interrupt hasn't happend yet will the URB be finished in the
  12376. + TX-Interrupt. */
  12377. + urb_priv->ctrl_rx_done = 1;
  12378. + tc_dbg("Not finishing In Ctrl URB:0x%x from rx_interrupt, waiting"
  12379. + " for zout\n", (unsigned int)urb);
  12380. + } else {
  12381. + tc_finish_urb(hcd, urb, 0);
  12382. + }
  12383. + }
  12384. + } else { /* ISOC RX */
  12385. + /*
  12386. + isoc_dbg("Processing RX for epid:%d (URB:0x%x) ISOC pipe\n",
  12387. + epid, (unsigned int)urb);
  12388. + */
  12389. +
  12390. + struct usb_iso_packet_descriptor *packet;
  12391. +
  12392. + if (urb_priv->urb_state == UNLINK) {
  12393. + isoc_warn("Ignoring Isoc Rx data for urb being unlinked.\n");
  12394. + goto skip_out;
  12395. + } else if (urb_priv->urb_state == NOT_STARTED) {
  12396. + isoc_err("What? Got Rx data for Isoc urb that isn't started?\n");
  12397. + goto skip_out;
  12398. + }
  12399. +
  12400. + packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter];
  12401. + ASSERT(packet);
  12402. + packet->status = 0;
  12403. +
  12404. + if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
  12405. + /* We get nodata for empty data transactions, and the rx descriptor's
  12406. + hw_len field is not valid in that case. We copy 0 bytes however to
  12407. + stay in synch. */
  12408. + packet->actual_length = 0;
  12409. + } else {
  12410. + packet->actual_length = myNextRxDesc->hw_len;
  12411. + /* Make sure the data fits in the buffer. */
  12412. + ASSERT(packet->actual_length <= packet->length);
  12413. + memcpy(urb->transfer_buffer + packet->offset,
  12414. + phys_to_virt(myNextRxDesc->buf), packet->actual_length);
  12415. + if(packet->actual_length > 0)
  12416. + isoc_dbg("Copied %d bytes, packet %d for URB:0x%x[%d]\n",
  12417. + packet->actual_length, urb_priv->isoc_packet_counter,
  12418. + (unsigned int)urb, urb_priv->urb_num);
  12419. + }
  12420. +
  12421. + /* Increment the packet counter. */
  12422. + urb_priv->isoc_packet_counter++;
  12423. +
  12424. + /* Note that we don't care about the eot field in the rx descriptor's
  12425. + status. It will always be set for isoc traffic. */
  12426. + if (urb->number_of_packets == urb_priv->isoc_packet_counter) {
  12427. + /* Complete the urb with status OK. */
  12428. + tc_finish_urb(hcd, urb, 0);
  12429. + }
  12430. + }
  12431. +
  12432. + skip_out:
  12433. + myNextRxDesc->status = 0;
  12434. + myNextRxDesc->command |= IO_MASK(USB_IN_command, eol);
  12435. + myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol);
  12436. + myLastRxDesc = myNextRxDesc;
  12437. + myNextRxDesc = phys_to_virt(myNextRxDesc->next);
  12438. + flush_etrax_cache();
  12439. + *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, restart);
  12440. + }
  12441. +
  12442. + local_irq_restore(flags);
  12443. +
  12444. + return IRQ_HANDLED;
  12445. +}
  12446. +
  12447. +static void tc_bulk_start_timer_func(unsigned long dummy) {
  12448. + /* We might enable an EP descriptor behind the current DMA position when
  12449. + it's about to decide that there are no more bulk traffic and it should
  12450. + stop the bulk channel.
  12451. + Therefore we periodically check if the bulk channel is stopped and there
  12452. + is an enabled bulk EP descriptor, in which case we start the bulk
  12453. + channel. */
  12454. +
  12455. + if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
  12456. + int epid;
  12457. +
  12458. + timer_dbg("bulk_start_timer: Bulk DMA channel not running.\n");
  12459. +
  12460. + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
  12461. + if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  12462. + timer_warn("Found enabled EP for epid %d, starting bulk channel.\n",
  12463. + epid);
  12464. + restart_dma8_sub0();
  12465. +
  12466. + /* Restart the bulk eot timer since we just started the bulk channel.*/
  12467. + mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
  12468. +
  12469. + /* No need to search any further. */
  12470. + break;
  12471. + }
  12472. + }
  12473. + } else {
  12474. + timer_dbg("bulk_start_timer: Bulk DMA channel running.\n");
  12475. + }
  12476. +}
  12477. +
  12478. +static void tc_bulk_eot_timer_func(unsigned long dummy) {
  12479. + struct usb_hcd *hcd = (struct usb_hcd*)dummy;
  12480. + ASSERT(hcd);
  12481. + /* Because of a race condition in the top half, we might miss a bulk eot.
  12482. + This timer "simulates" a bulk eot if we don't get one for a while,
  12483. + hopefully correcting the situation. */
  12484. + timer_dbg("bulk_eot_timer timed out.\n");
  12485. + check_finished_bulk_tx_epids(hcd, 1);
  12486. +}
  12487. +
  12488. +
  12489. +/*************************************************************/
  12490. +/*************************************************************/
  12491. +/* Device driver block */
  12492. +/*************************************************************/
  12493. +/*************************************************************/
  12494. +
  12495. +/* Forward declarations for device driver functions */
  12496. +static int devdrv_hcd_probe(struct device *);
  12497. +static int devdrv_hcd_remove(struct device *);
  12498. +#ifdef CONFIG_PM
  12499. +static int devdrv_hcd_suspend(struct device *, u32, u32);
  12500. +static int devdrv_hcd_resume(struct device *, u32);
  12501. +#endif /* CONFIG_PM */
  12502. +
  12503. +/* the device */
  12504. +static struct platform_device *devdrv_hc_platform_device;
  12505. +
  12506. +/* device driver interface */
  12507. +static struct device_driver devdrv_hc_device_driver = {
  12508. + .name = (char *) hc_name,
  12509. + .bus = &platform_bus_type,
  12510. +
  12511. + .probe = devdrv_hcd_probe,
  12512. + .remove = devdrv_hcd_remove,
  12513. +
  12514. +#ifdef CONFIG_PM
  12515. + .suspend = devdrv_hcd_suspend,
  12516. + .resume = devdrv_hcd_resume,
  12517. +#endif /* CONFIG_PM */
  12518. +};
  12519. +
  12520. +/* initialize the host controller and driver */
  12521. +static int __init_or_module devdrv_hcd_probe(struct device *dev)
  12522. +{
  12523. + struct usb_hcd *hcd;
  12524. + struct crisv10_hcd *crisv10_hcd;
  12525. + int retval;
  12526. + int rev_maj, rev_min;
  12527. +
  12528. + /* Check DMA burst length */
  12529. + if(IO_EXTRACT(R_BUS_CONFIG, dma_burst, *R_BUS_CONFIG) !=
  12530. + IO_STATE(R_BUS_CONFIG, dma_burst, burst32)) {
  12531. + devdrv_err("Invalid DMA burst length in Etrax 100LX,"
  12532. + " needs to be 32\n");
  12533. + return -EPERM;
  12534. + }
  12535. +
  12536. + hcd = usb_create_hcd(&crisv10_hc_driver, dev, dev_name(dev));
  12537. + if (!hcd)
  12538. + return -ENOMEM;
  12539. +
  12540. + crisv10_hcd = hcd_to_crisv10_hcd(hcd);
  12541. + spin_lock_init(&crisv10_hcd->lock);
  12542. + crisv10_hcd->num_ports = num_ports();
  12543. + crisv10_hcd->running = 0;
  12544. +
  12545. + dev_set_drvdata(dev, crisv10_hcd);
  12546. +
  12547. + devdrv_dbg("ETRAX USB IRQs HC:%d RX:%d TX:%d\n", ETRAX_USB_HC_IRQ,
  12548. + ETRAX_USB_RX_IRQ, ETRAX_USB_TX_IRQ);
  12549. +
  12550. + /* Print out chip version read from registers */
  12551. + rev_maj = *R_USB_REVISION & IO_MASK(R_USB_REVISION, major);
  12552. + rev_min = *R_USB_REVISION & IO_MASK(R_USB_REVISION, minor);
  12553. + if(rev_min == 0) {
  12554. + devdrv_info("Etrax 100LX USB Revision %d v1,2\n", rev_maj);
  12555. + } else {
  12556. + devdrv_info("Etrax 100LX USB Revision %d v%d\n", rev_maj, rev_min);
  12557. + }
  12558. +
  12559. + devdrv_info("Bulk timer interval, start:%d eot:%d\n",
  12560. + BULK_START_TIMER_INTERVAL,
  12561. + BULK_EOT_TIMER_INTERVAL);
  12562. +
  12563. +
  12564. + /* Init root hub data structures */
  12565. + if(rh_init()) {
  12566. + devdrv_err("Failed init data for Root Hub\n");
  12567. + retval = -ENOMEM;
  12568. + }
  12569. +
  12570. + if(port_in_use(0)) {
  12571. + if (cris_request_io_interface(if_usb_1, "ETRAX100LX USB-HCD")) {
  12572. + printk(KERN_CRIT "usb-host: request IO interface usb1 failed");
  12573. + retval = -EBUSY;
  12574. + goto out;
  12575. + }
  12576. + devdrv_info("Claimed interface for USB physical port 1\n");
  12577. + }
  12578. + if(port_in_use(1)) {
  12579. + if (cris_request_io_interface(if_usb_2, "ETRAX100LX USB-HCD")) {
  12580. + /* Free first interface if second failed to be claimed */
  12581. + if(port_in_use(0)) {
  12582. + cris_free_io_interface(if_usb_1);
  12583. + }
  12584. + printk(KERN_CRIT "usb-host: request IO interface usb2 failed");
  12585. + retval = -EBUSY;
  12586. + goto out;
  12587. + }
  12588. + devdrv_info("Claimed interface for USB physical port 2\n");
  12589. + }
  12590. +
  12591. + /* Init transfer controller structs and locks */
  12592. + if((retval = tc_init(hcd)) != 0) {
  12593. + goto out;
  12594. + }
  12595. +
  12596. + /* Attach interrupt functions for DMA and init DMA controller */
  12597. + if((retval = tc_dma_init(hcd)) != 0) {
  12598. + goto out;
  12599. + }
  12600. +
  12601. + /* Attach the top IRQ handler for USB controller interrupts */
  12602. + if (request_irq(ETRAX_USB_HC_IRQ, crisv10_hcd_top_irq, 0,
  12603. + "ETRAX 100LX built-in USB (HC)", hcd)) {
  12604. + err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ);
  12605. + retval = -EBUSY;
  12606. + goto out;
  12607. + }
  12608. +
  12609. + /* iso_eof is only enabled when isoc traffic is running. */
  12610. + *R_USB_IRQ_MASK_SET =
  12611. + /* IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) | */
  12612. + IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) |
  12613. + IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) |
  12614. + IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) |
  12615. + IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
  12616. +
  12617. +
  12618. + crisv10_ready_wait();
  12619. + /* Reset the USB interface. */
  12620. + *R_USB_COMMAND =
  12621. + IO_STATE(R_USB_COMMAND, port_sel, nop) |
  12622. + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
  12623. + IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
  12624. +
  12625. + /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to
  12626. + 0x2A30 (10800), to guarantee that control traffic gets 10% of the
  12627. + bandwidth, and periodic transfer may allocate the rest (90%).
  12628. + This doesn't work though.
  12629. + The value 11960 is chosen to be just after the SOF token, with a couple
  12630. + of bit times extra for possible bit stuffing. */
  12631. + *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960);
  12632. +
  12633. + crisv10_ready_wait();
  12634. + /* Configure the USB interface as a host controller. */
  12635. + *R_USB_COMMAND =
  12636. + IO_STATE(R_USB_COMMAND, port_sel, nop) |
  12637. + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
  12638. + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config);
  12639. +
  12640. +
  12641. + /* Check so controller not busy before enabling ports */
  12642. + crisv10_ready_wait();
  12643. +
  12644. + /* Enable selected USB ports */
  12645. + if(port_in_use(0)) {
  12646. + *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
  12647. + } else {
  12648. + *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
  12649. + }
  12650. + if(port_in_use(1)) {
  12651. + *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
  12652. + } else {
  12653. + *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
  12654. + }
  12655. +
  12656. + crisv10_ready_wait();
  12657. + /* Start processing of USB traffic. */
  12658. + *R_USB_COMMAND =
  12659. + IO_STATE(R_USB_COMMAND, port_sel, nop) |
  12660. + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
  12661. + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
  12662. +
  12663. + /* Do not continue probing initialization before USB interface is done */
  12664. + crisv10_ready_wait();
  12665. +
  12666. + /* Register our Host Controller to USB Core
  12667. + * Finish the remaining parts of generic HCD initialization: allocate the
  12668. + * buffers of consistent memory, register the bus
  12669. + * and call the driver's reset() and start() routines. */
  12670. + retval = usb_add_hcd(hcd, ETRAX_USB_HC_IRQ, IRQF_DISABLED);
  12671. + if (retval != 0) {
  12672. + devdrv_err("Failed registering HCD driver\n");
  12673. + goto out;
  12674. + }
  12675. +
  12676. + return 0;
  12677. +
  12678. + out:
  12679. + devdrv_hcd_remove(dev);
  12680. + return retval;
  12681. +}
  12682. +
  12683. +
  12684. +/* cleanup after the host controller and driver */
  12685. +static int __init_or_module devdrv_hcd_remove(struct device *dev)
  12686. +{
  12687. + struct crisv10_hcd *crisv10_hcd = dev_get_drvdata(dev);
  12688. + struct usb_hcd *hcd;
  12689. +
  12690. + if (!crisv10_hcd)
  12691. + return 0;
  12692. + hcd = crisv10_hcd_to_hcd(crisv10_hcd);
  12693. +
  12694. +
  12695. + /* Stop USB Controller in Etrax 100LX */
  12696. + crisv10_hcd_reset(hcd);
  12697. +
  12698. + usb_remove_hcd(hcd);
  12699. + devdrv_dbg("Removed HCD from USB Core\n");
  12700. +
  12701. + /* Free USB Controller IRQ */
  12702. + free_irq(ETRAX_USB_HC_IRQ, NULL);
  12703. +
  12704. + /* Free resources */
  12705. + tc_dma_destroy();
  12706. + tc_destroy();
  12707. +
  12708. +
  12709. + if(port_in_use(0)) {
  12710. + cris_free_io_interface(if_usb_1);
  12711. + }
  12712. + if(port_in_use(1)) {
  12713. + cris_free_io_interface(if_usb_2);
  12714. + }
  12715. +
  12716. + devdrv_dbg("Freed all claimed resources\n");
  12717. +
  12718. + return 0;
  12719. +}
  12720. +
  12721. +
  12722. +#ifdef CONFIG_PM
  12723. +
  12724. +static int devdrv_hcd_suspend(struct usb_hcd *hcd, u32 state, u32 level)
  12725. +{
  12726. + return 0; /* no-op for now */
  12727. +}
  12728. +
  12729. +static int devdrv_hcd_resume(struct usb_hcd *hcd, u32 level)
  12730. +{
  12731. + return 0; /* no-op for now */
  12732. +}
  12733. +
  12734. +#endif /* CONFIG_PM */
  12735. +
  12736. +
  12737. +
  12738. +/*************************************************************/
  12739. +/*************************************************************/
  12740. +/* Module block */
  12741. +/*************************************************************/
  12742. +/*************************************************************/
  12743. +
  12744. +/* register driver */
  12745. +static int __init module_hcd_init(void)
  12746. +{
  12747. +
  12748. + if (usb_disabled())
  12749. + return -ENODEV;
  12750. +
  12751. + /* Here we select enabled ports by following defines created from
  12752. + menuconfig */
  12753. +#ifndef CONFIG_ETRAX_USB_HOST_PORT1
  12754. + ports &= ~(1<<0);
  12755. +#endif
  12756. +#ifndef CONFIG_ETRAX_USB_HOST_PORT2
  12757. + ports &= ~(1<<1);
  12758. +#endif
  12759. +
  12760. + printk(KERN_INFO "%s version "VERSION" "COPYRIGHT"\n", product_desc);
  12761. +
  12762. + devdrv_hc_platform_device =
  12763. + platform_device_register_simple((char *) hc_name, 0, NULL, 0);
  12764. +
  12765. + if (IS_ERR(devdrv_hc_platform_device))
  12766. + return PTR_ERR(devdrv_hc_platform_device);
  12767. + return driver_register(&devdrv_hc_device_driver);
  12768. + /*
  12769. + * Note that we do not set the DMA mask for the device,
  12770. + * i.e. we pretend that we will use PIO, since no specific
  12771. + * allocation routines are needed for DMA buffers. This will
  12772. + * cause the HCD buffer allocation routines to fall back to
  12773. + * kmalloc().
  12774. + */
  12775. +}
  12776. +
  12777. +/* unregister driver */
  12778. +static void __exit module_hcd_exit(void)
  12779. +{
  12780. + driver_unregister(&devdrv_hc_device_driver);
  12781. +}
  12782. +
  12783. +
  12784. +/* Module hooks */
  12785. +module_init(module_hcd_init);
  12786. +module_exit(module_hcd_exit);
  12787. diff -Nur linux-2.6.39.orig/drivers/usb/host/hc-crisv10.h linux-2.6.39/drivers/usb/host/hc-crisv10.h
  12788. --- linux-2.6.39.orig/drivers/usb/host/hc-crisv10.h 1970-01-01 01:00:00.000000000 +0100
  12789. +++ linux-2.6.39/drivers/usb/host/hc-crisv10.h 2011-07-28 16:16:37.757174124 +0200
  12790. @@ -0,0 +1,331 @@
  12791. +#ifndef __LINUX_ETRAX_USB_H
  12792. +#define __LINUX_ETRAX_USB_H
  12793. +
  12794. +#include <linux/types.h>
  12795. +#include <linux/list.h>
  12796. +
  12797. +struct USB_IN_Desc {
  12798. + volatile __u16 sw_len;
  12799. + volatile __u16 command;
  12800. + volatile unsigned long next;
  12801. + volatile unsigned long buf;
  12802. + volatile __u16 hw_len;
  12803. + volatile __u16 status;
  12804. +};
  12805. +
  12806. +struct USB_SB_Desc {
  12807. + volatile __u16 sw_len;
  12808. + volatile __u16 command;
  12809. + volatile unsigned long next;
  12810. + volatile unsigned long buf;
  12811. +};
  12812. +
  12813. +struct USB_EP_Desc {
  12814. + volatile __u16 hw_len;
  12815. + volatile __u16 command;
  12816. + volatile unsigned long sub;
  12817. + volatile unsigned long next;
  12818. +};
  12819. +
  12820. +
  12821. +/* Root Hub port status struct */
  12822. +struct crisv10_rh {
  12823. + volatile __u16 wPortChange[2];
  12824. + volatile __u16 wPortStatusPrev[2];
  12825. +};
  12826. +
  12827. +/* HCD description */
  12828. +struct crisv10_hcd {
  12829. + spinlock_t lock;
  12830. + __u8 num_ports;
  12831. + __u8 running;
  12832. +};
  12833. +
  12834. +
  12835. +/* Endpoint HC private data description */
  12836. +struct crisv10_ep_priv {
  12837. + int epid;
  12838. +};
  12839. +
  12840. +/* Additional software state info for a USB Controller epid */
  12841. +struct etrax_epid {
  12842. + __u8 inuse; /* !0 = setup in Etrax and used for a endpoint */
  12843. + __u8 disabled; /* !0 = Temporarly disabled to avoid resubmission */
  12844. + __u8 type; /* Setup as: PIPE_BULK, PIPE_CONTROL ... */
  12845. + __u8 out_traffic; /* !0 = This epid is for out traffic */
  12846. +};
  12847. +
  12848. +/* Struct to hold information of scheduled later URB completion */
  12849. +struct urb_later_data {
  12850. +// struct work_struct ws;
  12851. + struct delayed_work ws;
  12852. + struct usb_hcd *hcd;
  12853. + struct urb *urb;
  12854. + int urb_num;
  12855. + int status;
  12856. +};
  12857. +
  12858. +
  12859. +typedef enum {
  12860. + STARTED,
  12861. + NOT_STARTED,
  12862. + UNLINK,
  12863. +} crisv10_urb_state_t;
  12864. +
  12865. +
  12866. +struct crisv10_urb_priv {
  12867. + /* Sequence number for this URB. Every new submited URB gets this from
  12868. + a incrementing counter. Used when a URB is scheduled for later finish to
  12869. + be sure that the intended URB hasn't already been completed (device
  12870. + drivers has a tendency to reuse URBs once they are completed, causing us
  12871. + to not be able to single old ones out only based on the URB pointer.) */
  12872. + __u32 urb_num;
  12873. +
  12874. + /* The first_sb field is used for freeing all SB descriptors belonging
  12875. + to an urb. The corresponding ep descriptor's sub pointer cannot be
  12876. + used for this since the DMA advances the sub pointer as it processes
  12877. + the sb list. */
  12878. + struct USB_SB_Desc *first_sb;
  12879. +
  12880. + /* The last_sb field referes to the last SB descriptor that belongs to
  12881. + this urb. This is important to know so we can free the SB descriptors
  12882. + that ranges between first_sb and last_sb. */
  12883. + struct USB_SB_Desc *last_sb;
  12884. +
  12885. + /* The rx_offset field is used in ctrl and bulk traffic to keep track
  12886. + of the offset in the urb's transfer_buffer where incoming data should be
  12887. + copied to. */
  12888. + __u32 rx_offset;
  12889. +
  12890. + /* Counter used in isochronous transfers to keep track of the
  12891. + number of packets received/transmitted. */
  12892. + __u32 isoc_packet_counter;
  12893. +
  12894. + /* Flag that marks if this Isoc Out URB has finished it's transfer. Used
  12895. + because several URBs can be finished before list is processed */
  12896. + __u8 isoc_out_done;
  12897. +
  12898. + /* This field is used to pass information about the urb's current state
  12899. + between the various interrupt handlers (thus marked volatile). */
  12900. + volatile crisv10_urb_state_t urb_state;
  12901. +
  12902. + /* In Ctrl transfers consist of (at least) 3 packets: SETUP, IN and ZOUT.
  12903. + When DMA8 sub-channel 2 has processed the SB list for this sequence we
  12904. + get a interrupt. We also get a interrupt for In transfers and which
  12905. + one of these interrupts that comes first depends of data size and device.
  12906. + To be sure that we have got both interrupts before we complete the URB
  12907. + we have these to flags that shows which part that has completed.
  12908. + We can then check when we get one of the interrupts that if the other has
  12909. + occured it's safe for us to complete the URB, otherwise we set appropriate
  12910. + flag and do the completion when we get the other interrupt. */
  12911. + volatile unsigned char ctrl_zout_done;
  12912. + volatile unsigned char ctrl_rx_done;
  12913. +
  12914. + /* Connection between the submitted urb and ETRAX epid number */
  12915. + __u8 epid;
  12916. +
  12917. + /* The rx_data_list field is used for periodic traffic, to hold
  12918. + received data for later processing in the the complete_urb functions,
  12919. + where the data us copied to the urb's transfer_buffer. Basically, we
  12920. + use this intermediate storage because we don't know when it's safe to
  12921. + reuse the transfer_buffer (FIXME?). */
  12922. + struct list_head rx_data_list;
  12923. +
  12924. +
  12925. + /* The interval time rounded up to closest 2^N */
  12926. + int interval;
  12927. +
  12928. + /* Pool of EP descriptors needed if it's a INTR transfer.
  12929. + Amount of EPs in pool correspons to how many INTR that should
  12930. + be inserted in TxIntrEPList (max 128, defined by MAX_INTR_INTERVAL) */
  12931. + struct USB_EP_Desc* intr_ep_pool[128];
  12932. +
  12933. + /* The mount of EPs allocated for this INTR URB */
  12934. + int intr_ep_pool_length;
  12935. +
  12936. + /* Pointer to info struct if URB is scheduled to be finished later */
  12937. + struct urb_later_data* later_data;
  12938. +};
  12939. +
  12940. +
  12941. +/* This struct is for passing data from the top half to the bottom half irq
  12942. + handlers */
  12943. +struct crisv10_irq_reg {
  12944. + struct usb_hcd* hcd;
  12945. + __u32 r_usb_epid_attn;
  12946. + __u8 r_usb_status;
  12947. + __u16 r_usb_rh_port_status_1;
  12948. + __u16 r_usb_rh_port_status_2;
  12949. + __u32 r_usb_irq_mask_read;
  12950. + __u32 r_usb_fm_number;
  12951. + struct work_struct usb_bh;
  12952. +};
  12953. +
  12954. +
  12955. +/* This struct is for passing data from the isoc top half to the isoc bottom
  12956. + half. */
  12957. +struct crisv10_isoc_complete_data {
  12958. + struct usb_hcd *hcd;
  12959. + struct urb *urb;
  12960. + struct work_struct usb_bh;
  12961. +};
  12962. +
  12963. +/* Entry item for URB lists for each endpint */
  12964. +typedef struct urb_entry
  12965. +{
  12966. + struct urb *urb;
  12967. + struct list_head list;
  12968. +} urb_entry_t;
  12969. +
  12970. +/* ---------------------------------------------------------------------------
  12971. + Virtual Root HUB
  12972. + ------------------------------------------------------------------------- */
  12973. +/* destination of request */
  12974. +#define RH_INTERFACE 0x01
  12975. +#define RH_ENDPOINT 0x02
  12976. +#define RH_OTHER 0x03
  12977. +
  12978. +#define RH_CLASS 0x20
  12979. +#define RH_VENDOR 0x40
  12980. +
  12981. +/* Requests: bRequest << 8 | bmRequestType */
  12982. +#define RH_GET_STATUS 0x0080
  12983. +#define RH_CLEAR_FEATURE 0x0100
  12984. +#define RH_SET_FEATURE 0x0300
  12985. +#define RH_SET_ADDRESS 0x0500
  12986. +#define RH_GET_DESCRIPTOR 0x0680
  12987. +#define RH_SET_DESCRIPTOR 0x0700
  12988. +#define RH_GET_CONFIGURATION 0x0880
  12989. +#define RH_SET_CONFIGURATION 0x0900
  12990. +#define RH_GET_STATE 0x0280
  12991. +#define RH_GET_INTERFACE 0x0A80
  12992. +#define RH_SET_INTERFACE 0x0B00
  12993. +#define RH_SYNC_FRAME 0x0C80
  12994. +/* Our Vendor Specific Request */
  12995. +#define RH_SET_EP 0x2000
  12996. +
  12997. +
  12998. +/* Hub port features */
  12999. +#define RH_PORT_CONNECTION 0x00
  13000. +#define RH_PORT_ENABLE 0x01
  13001. +#define RH_PORT_SUSPEND 0x02
  13002. +#define RH_PORT_OVER_CURRENT 0x03
  13003. +#define RH_PORT_RESET 0x04
  13004. +#define RH_PORT_POWER 0x08
  13005. +#define RH_PORT_LOW_SPEED 0x09
  13006. +#define RH_C_PORT_CONNECTION 0x10
  13007. +#define RH_C_PORT_ENABLE 0x11
  13008. +#define RH_C_PORT_SUSPEND 0x12
  13009. +#define RH_C_PORT_OVER_CURRENT 0x13
  13010. +#define RH_C_PORT_RESET 0x14
  13011. +
  13012. +/* Hub features */
  13013. +#define RH_C_HUB_LOCAL_POWER 0x00
  13014. +#define RH_C_HUB_OVER_CURRENT 0x01
  13015. +
  13016. +#define RH_DEVICE_REMOTE_WAKEUP 0x00
  13017. +#define RH_ENDPOINT_STALL 0x01
  13018. +
  13019. +/* Our Vendor Specific feature */
  13020. +#define RH_REMOVE_EP 0x00
  13021. +
  13022. +
  13023. +#define RH_ACK 0x01
  13024. +#define RH_REQ_ERR -1
  13025. +#define RH_NACK 0x00
  13026. +
  13027. +/* Field definitions for */
  13028. +
  13029. +#define USB_IN_command__eol__BITNR 0 /* command macros */
  13030. +#define USB_IN_command__eol__WIDTH 1
  13031. +#define USB_IN_command__eol__no 0
  13032. +#define USB_IN_command__eol__yes 1
  13033. +
  13034. +#define USB_IN_command__intr__BITNR 3
  13035. +#define USB_IN_command__intr__WIDTH 1
  13036. +#define USB_IN_command__intr__no 0
  13037. +#define USB_IN_command__intr__yes 1
  13038. +
  13039. +#define USB_IN_status__eop__BITNR 1 /* status macros. */
  13040. +#define USB_IN_status__eop__WIDTH 1
  13041. +#define USB_IN_status__eop__no 0
  13042. +#define USB_IN_status__eop__yes 1
  13043. +
  13044. +#define USB_IN_status__eot__BITNR 5
  13045. +#define USB_IN_status__eot__WIDTH 1
  13046. +#define USB_IN_status__eot__no 0
  13047. +#define USB_IN_status__eot__yes 1
  13048. +
  13049. +#define USB_IN_status__error__BITNR 6
  13050. +#define USB_IN_status__error__WIDTH 1
  13051. +#define USB_IN_status__error__no 0
  13052. +#define USB_IN_status__error__yes 1
  13053. +
  13054. +#define USB_IN_status__nodata__BITNR 7
  13055. +#define USB_IN_status__nodata__WIDTH 1
  13056. +#define USB_IN_status__nodata__no 0
  13057. +#define USB_IN_status__nodata__yes 1
  13058. +
  13059. +#define USB_IN_status__epid__BITNR 8
  13060. +#define USB_IN_status__epid__WIDTH 5
  13061. +
  13062. +#define USB_EP_command__eol__BITNR 0
  13063. +#define USB_EP_command__eol__WIDTH 1
  13064. +#define USB_EP_command__eol__no 0
  13065. +#define USB_EP_command__eol__yes 1
  13066. +
  13067. +#define USB_EP_command__eof__BITNR 1
  13068. +#define USB_EP_command__eof__WIDTH 1
  13069. +#define USB_EP_command__eof__no 0
  13070. +#define USB_EP_command__eof__yes 1
  13071. +
  13072. +#define USB_EP_command__intr__BITNR 3
  13073. +#define USB_EP_command__intr__WIDTH 1
  13074. +#define USB_EP_command__intr__no 0
  13075. +#define USB_EP_command__intr__yes 1
  13076. +
  13077. +#define USB_EP_command__enable__BITNR 4
  13078. +#define USB_EP_command__enable__WIDTH 1
  13079. +#define USB_EP_command__enable__no 0
  13080. +#define USB_EP_command__enable__yes 1
  13081. +
  13082. +#define USB_EP_command__hw_valid__BITNR 5
  13083. +#define USB_EP_command__hw_valid__WIDTH 1
  13084. +#define USB_EP_command__hw_valid__no 0
  13085. +#define USB_EP_command__hw_valid__yes 1
  13086. +
  13087. +#define USB_EP_command__epid__BITNR 8
  13088. +#define USB_EP_command__epid__WIDTH 5
  13089. +
  13090. +#define USB_SB_command__eol__BITNR 0 /* command macros. */
  13091. +#define USB_SB_command__eol__WIDTH 1
  13092. +#define USB_SB_command__eol__no 0
  13093. +#define USB_SB_command__eol__yes 1
  13094. +
  13095. +#define USB_SB_command__eot__BITNR 1
  13096. +#define USB_SB_command__eot__WIDTH 1
  13097. +#define USB_SB_command__eot__no 0
  13098. +#define USB_SB_command__eot__yes 1
  13099. +
  13100. +#define USB_SB_command__intr__BITNR 3
  13101. +#define USB_SB_command__intr__WIDTH 1
  13102. +#define USB_SB_command__intr__no 0
  13103. +#define USB_SB_command__intr__yes 1
  13104. +
  13105. +#define USB_SB_command__tt__BITNR 4
  13106. +#define USB_SB_command__tt__WIDTH 2
  13107. +#define USB_SB_command__tt__zout 0
  13108. +#define USB_SB_command__tt__in 1
  13109. +#define USB_SB_command__tt__out 2
  13110. +#define USB_SB_command__tt__setup 3
  13111. +
  13112. +
  13113. +#define USB_SB_command__rem__BITNR 8
  13114. +#define USB_SB_command__rem__WIDTH 6
  13115. +
  13116. +#define USB_SB_command__full__BITNR 6
  13117. +#define USB_SB_command__full__WIDTH 1
  13118. +#define USB_SB_command__full__no 0
  13119. +#define USB_SB_command__full__yes 1
  13120. +
  13121. +#endif
  13122. diff -Nur linux-2.6.39.orig/drivers/usb/host/Makefile linux-2.6.39/drivers/usb/host/Makefile
  13123. --- linux-2.6.39.orig/drivers/usb/host/Makefile 2011-05-19 06:06:34.000000000 +0200
  13124. +++ linux-2.6.39/drivers/usb/host/Makefile 2011-07-28 16:16:37.863421513 +0200
  13125. @@ -32,6 +32,7 @@
  13126. obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
  13127. obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
  13128. obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
  13129. +obj-$(CONFIG_ETRAX_USB_HOST) += hc-crisv10.o
  13130. obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
  13131. obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o
  13132. obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o
  13133. diff -Nur linux-2.6.39.orig/drivers/usb/Makefile linux-2.6.39/drivers/usb/Makefile
  13134. --- linux-2.6.39.orig/drivers/usb/Makefile 2011-05-19 06:06:34.000000000 +0200
  13135. +++ linux-2.6.39/drivers/usb/Makefile 2011-07-28 16:16:37.993659487 +0200
  13136. @@ -21,6 +21,7 @@
  13137. obj-$(CONFIG_USB_R8A66597_HCD) += host/
  13138. obj-$(CONFIG_USB_HWA_HCD) += host/
  13139. obj-$(CONFIG_USB_ISP1760_HCD) += host/
  13140. +obj-$(CONFIG_ETRAX_USB_HOST) += host/
  13141. obj-$(CONFIG_USB_IMX21_HCD) += host/
  13142. obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
  13143. diff -Nur linux-2.6.39.orig/lib/klist.c linux-2.6.39/lib/klist.c
  13144. --- linux-2.6.39.orig/lib/klist.c 2011-05-19 06:06:34.000000000 +0200
  13145. +++ linux-2.6.39/lib/klist.c 2011-07-28 16:16:38.103425277 +0200
  13146. @@ -60,7 +60,7 @@
  13147. {
  13148. knode->n_klist = klist;
  13149. /* no knode deserves to start its life dead */
  13150. - WARN_ON(knode_dead(knode));
  13151. + //WARN_ON(knode_dead(knode));
  13152. }
  13153. static void knode_kill(struct klist_node *knode)