cris.patch 373 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176
  1. diff -Nur linux-2.6.32.orig/arch/cris/arch-v10/drivers/axisflashmap.c linux-2.6.32/arch/cris/arch-v10/drivers/axisflashmap.c
  2. --- linux-2.6.32.orig/arch/cris/arch-v10/drivers/axisflashmap.c 2009-12-03 04:51:21.000000000 +0100
  3. +++ linux-2.6.32/arch/cris/arch-v10/drivers/axisflashmap.c 2010-01-10 14:34:37.376309632 +0100
  4. @@ -113,7 +113,7 @@
  5. /* If no partition-table was found, we use this default-set. */
  6. #define MAX_PARTITIONS 7
  7. -#define NUM_DEFAULT_PARTITIONS 3
  8. +#define NUM_DEFAULT_PARTITIONS 4
  9. /*
  10. * Default flash size is 2MB. CONFIG_ETRAX_PTABLE_SECTOR is most likely the
  11. @@ -122,19 +122,24 @@
  12. */
  13. static struct mtd_partition axis_default_partitions[NUM_DEFAULT_PARTITIONS] = {
  14. {
  15. - .name = "boot firmware",
  16. - .size = CONFIG_ETRAX_PTABLE_SECTOR,
  17. + .name = "kernel",
  18. + .size = 0x00,
  19. .offset = 0
  20. },
  21. {
  22. - .name = "kernel",
  23. - .size = 0x200000 - (6 * CONFIG_ETRAX_PTABLE_SECTOR),
  24. - .offset = CONFIG_ETRAX_PTABLE_SECTOR
  25. + .name = "rootfs",
  26. + .size = 0x200000 ,
  27. + .offset = 0x200000
  28. },
  29. {
  30. - .name = "filesystem",
  31. - .size = 5 * CONFIG_ETRAX_PTABLE_SECTOR,
  32. - .offset = 0x200000 - (5 * CONFIG_ETRAX_PTABLE_SECTOR)
  33. + .name = "cfgfs",
  34. + .size = 0x20000 ,
  35. + .offset = CONFIG_ETRAX_MTD_SIZE - 0x20000
  36. + },
  37. + {
  38. + .name = "linux",
  39. + .size = CONFIG_ETRAX_MTD_SIZE - 0x20000,
  40. + .offset = 0
  41. }
  42. };
  43. @@ -281,6 +286,11 @@
  44. struct partitiontable_entry *ptable;
  45. int use_default_ptable = 1; /* Until proven otherwise. */
  46. const char pmsg[] = " /dev/flash%d at 0x%08x, size 0x%08x\n";
  47. + unsigned int kernel_part_size = 0;
  48. + unsigned char *flash_mem = (unsigned char*)(FLASH_CACHED_ADDR);
  49. + unsigned int flash_scan_count = 0;
  50. + const char *part_magic = "ACME_PART_MAGIC";
  51. + unsigned int magic_len = strlen(part_magic);
  52. if (!(mymtd = flash_probe())) {
  53. /* There's no reason to use this module if no flash chip can
  54. @@ -292,6 +302,31 @@
  55. mymtd->name, mymtd->size);
  56. axisflash_mtd = mymtd;
  57. }
  58. + /* scan flash to findout where out partition starts */
  59. +
  60. + printk(KERN_INFO "Scanning flash for end of kernel magic\n");
  61. + for(flash_scan_count = 0; flash_scan_count < 100000; flash_scan_count++){
  62. + if(strncmp(&flash_mem[flash_scan_count], part_magic, magic_len - 1) == 0)
  63. + {
  64. + kernel_part_size = flash_mem[flash_scan_count + magic_len ];
  65. + kernel_part_size <<= 8;
  66. + kernel_part_size += flash_mem[flash_scan_count + magic_len + 2];
  67. + kernel_part_size <<= 8;
  68. + kernel_part_size += flash_mem[flash_scan_count + magic_len + 1];
  69. + kernel_part_size <<= 8;
  70. + kernel_part_size += flash_mem[flash_scan_count + magic_len + 3];
  71. + printk(KERN_INFO "Kernel ends at 0x%.08X\n", kernel_part_size);
  72. + flash_scan_count = 1100000;
  73. + }
  74. + }
  75. +
  76. +
  77. + if(kernel_part_size){
  78. + kernel_part_size = (kernel_part_size & 0xffff0000);
  79. + axis_default_partitions[0].size = kernel_part_size;
  80. + axis_default_partitions[1].size = mymtd->size - axis_default_partitions[0].size - axis_default_partitions[2].size;
  81. + axis_default_partitions[1].offset = axis_default_partitions[0].size;
  82. + }
  83. if (mymtd) {
  84. mymtd->owner = THIS_MODULE;
  85. @@ -360,21 +395,6 @@
  86. use_default_ptable = !ptable_ok;
  87. }
  88. - if (romfs_in_flash) {
  89. - /* Add an overlapping device for the root partition (romfs). */
  90. -
  91. - axis_partitions[pidx].name = "romfs";
  92. - axis_partitions[pidx].size = romfs_length;
  93. - axis_partitions[pidx].offset = romfs_start - FLASH_CACHED_ADDR;
  94. - axis_partitions[pidx].mask_flags |= MTD_WRITEABLE;
  95. -
  96. - printk(KERN_INFO
  97. - " Adding readonly flash partition for romfs image:\n");
  98. - printk(pmsg, pidx, axis_partitions[pidx].offset,
  99. - axis_partitions[pidx].size);
  100. - pidx++;
  101. - }
  102. -
  103. #ifdef CONFIG_ETRAX_AXISFLASHMAP_MTD0WHOLE
  104. if (mymtd) {
  105. main_partition.size = mymtd->size;
  106. @@ -397,36 +417,6 @@
  107. if (err)
  108. panic("axisflashmap could not add MTD partitions!\n");
  109. }
  110. -
  111. - if (!romfs_in_flash) {
  112. - /* Create an RAM device for the root partition (romfs). */
  113. -
  114. -#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0)
  115. - /* No use trying to boot this kernel from RAM. Panic! */
  116. - printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM "
  117. - "device due to kernel (mis)configuration!\n");
  118. - panic("This kernel cannot boot from RAM!\n");
  119. -#else
  120. - struct mtd_info *mtd_ram;
  121. -
  122. - mtd_ram = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
  123. - if (!mtd_ram)
  124. - panic("axisflashmap couldn't allocate memory for "
  125. - "mtd_info!\n");
  126. -
  127. - printk(KERN_INFO " Adding RAM partition for romfs image:\n");
  128. - printk(pmsg, pidx, (unsigned)romfs_start,
  129. - (unsigned)romfs_length);
  130. -
  131. - err = mtdram_init_device(mtd_ram,
  132. - (void *)romfs_start,
  133. - romfs_length,
  134. - "romfs");
  135. - if (err)
  136. - panic("axisflashmap could not initialize MTD RAM "
  137. - "device!\n");
  138. -#endif
  139. - }
  140. return err;
  141. }
  142. diff -Nur linux-2.6.32.orig/arch/cris/arch-v10/drivers/ds1302.c linux-2.6.32/arch/cris/arch-v10/drivers/ds1302.c
  143. --- linux-2.6.32.orig/arch/cris/arch-v10/drivers/ds1302.c 2009-12-03 04:51:21.000000000 +0100
  144. +++ linux-2.6.32/arch/cris/arch-v10/drivers/ds1302.c 2010-01-10 13:41:59.256309588 +0100
  145. @@ -21,6 +21,7 @@
  146. #include <linux/delay.h>
  147. #include <linux/bcd.h>
  148. #include <linux/capability.h>
  149. +#include <linux/device.h>
  150. #include <asm/uaccess.h>
  151. #include <asm/system.h>
  152. @@ -489,6 +490,10 @@
  153. return 0;
  154. }
  155. +#ifdef CONFIG_SYSFS
  156. +static struct class *rtc_class;
  157. +#endif
  158. +
  159. static int __init ds1302_register(void)
  160. {
  161. ds1302_init();
  162. @@ -497,6 +502,12 @@
  163. ds1302_name, RTC_MAJOR_NR);
  164. return -1;
  165. }
  166. + #ifdef CONFIG_SYSFS
  167. + rtc_class = class_create(THIS_MODULE, "rtc");
  168. + class_device_create(rtc_class, NULL, MKDEV(RTC_MAJOR_NR, 0),
  169. + NULL, "rtc");
  170. + #endif
  171. +
  172. return 0;
  173. }
  174. diff -Nur linux-2.6.32.orig/arch/cris/arch-v10/drivers/gpio.c linux-2.6.32/arch/cris/arch-v10/drivers/gpio.c
  175. --- linux-2.6.32.orig/arch/cris/arch-v10/drivers/gpio.c 2009-12-03 04:51:21.000000000 +0100
  176. +++ linux-2.6.32/arch/cris/arch-v10/drivers/gpio.c 2010-01-10 13:41:59.256309588 +0100
  177. @@ -21,6 +21,7 @@
  178. #include <linux/poll.h>
  179. #include <linux/init.h>
  180. #include <linux/interrupt.h>
  181. +#include <linux/device.h>
  182. #include <asm/etraxgpio.h>
  183. #include <arch/svinto.h>
  184. @@ -771,6 +772,10 @@
  185. /* main driver initialization routine, called from mem.c */
  186. +#ifdef CONFIG_SYSFS
  187. +static struct class *gpio_class;
  188. +#endif
  189. +
  190. static int __init gpio_init(void)
  191. {
  192. int res;
  193. @@ -784,6 +789,13 @@
  194. return res;
  195. }
  196. +#ifdef CONFIG_SYSFS
  197. + gpio_class = class_create(THIS_MODULE, "gpio");
  198. + device_create(gpio_class, NULL, MKDEV(GPIO_MAJOR, 0), NULL, "gpioa");
  199. + device_create(gpio_class, NULL, MKDEV(GPIO_MAJOR, 1), NULL, "gpiob");
  200. + device_create(gpio_class, NULL, MKDEV(GPIO_MAJOR, 2), NULL, "leds");
  201. + device_create(gpio_class, NULL, MKDEV(GPIO_MAJOR, 3), NULL, "gpiog");
  202. +#endif
  203. /* Clear all leds */
  204. #if defined (CONFIG_ETRAX_CSP0_LEDS) || defined (CONFIG_ETRAX_PA_LEDS) || defined (CONFIG_ETRAX_PB_LEDS)
  205. CRIS_LED_NETWORK_SET(0);
  206. diff -Nur linux-2.6.32.orig/arch/cris/arch-v10/lib/hw_settings.S linux-2.6.32/arch/cris/arch-v10/lib/hw_settings.S
  207. --- linux-2.6.32.orig/arch/cris/arch-v10/lib/hw_settings.S 2009-12-03 04:51:21.000000000 +0100
  208. +++ linux-2.6.32/arch/cris/arch-v10/lib/hw_settings.S 2010-01-10 13:41:59.256309588 +0100
  209. @@ -60,3 +60,5 @@
  210. .dword R_PORT_PB_SET
  211. .dword PB_SET_VALUE
  212. .dword 0 ; No more register values
  213. + .ascii "ACME_PART_MAGIC"
  214. + .dword 0xdeadc0de
  215. diff -Nur linux-2.6.32.orig/arch/cris/arch-v10/mm/init.c linux-2.6.32/arch/cris/arch-v10/mm/init.c
  216. --- linux-2.6.32.orig/arch/cris/arch-v10/mm/init.c 2009-12-03 04:51:21.000000000 +0100
  217. +++ linux-2.6.32/arch/cris/arch-v10/mm/init.c 2010-01-10 13:41:59.256309588 +0100
  218. @@ -184,6 +184,9 @@
  219. free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
  220. }
  221. +void free_initrd_mem(unsigned long start, unsigned long end)
  222. +{
  223. +}
  224. /* Initialize remaps of some I/O-ports. It is important that this
  225. * is called before any driver is initialized.
  226. diff -Nur linux-2.6.32.orig/arch/cris/boot/compressed/Makefile linux-2.6.32/arch/cris/boot/compressed/Makefile
  227. --- linux-2.6.32.orig/arch/cris/boot/compressed/Makefile 2009-12-03 04:51:21.000000000 +0100
  228. +++ linux-2.6.32/arch/cris/boot/compressed/Makefile 2010-01-10 13:41:59.256309588 +0100
  229. @@ -18,7 +18,7 @@
  230. OBJECTS-$(CONFIG_ETRAX_ARCH_V32) = $(obj)/head_v32.o
  231. OBJECTS-$(CONFIG_ETRAX_ARCH_V10) = $(obj)/head_v10.o
  232. OBJECTS= $(OBJECTS-y) $(obj)/misc.o
  233. -OBJCOPYFLAGS = -O binary --remove-section=.bss
  234. +#OBJCOPYFLAGS = -O binary --remove-section=.bss
  235. quiet_cmd_image = BUILD $@
  236. cmd_image = cat $(obj)/decompress.bin $(obj)/piggy.gz > $@
  237. diff -Nur linux-2.6.32.orig/arch/cris/boot/compressed/misc.c linux-2.6.32/arch/cris/boot/compressed/misc.c
  238. --- linux-2.6.32.orig/arch/cris/boot/compressed/misc.c 2009-12-03 04:51:21.000000000 +0100
  239. +++ linux-2.6.32/arch/cris/boot/compressed/misc.c 2010-01-10 13:41:59.256309588 +0100
  240. @@ -106,7 +106,7 @@
  241. static void flush_window(void);
  242. static void error(char *m);
  243. -static void puts(const char *);
  244. +static void putstr(const char *);
  245. extern char *input_data; /* lives in head.S */
  246. @@ -139,7 +139,7 @@
  247. }
  248. #endif
  249. -static void puts(const char *s)
  250. +static void putstr(const char *s)
  251. {
  252. #ifndef CONFIG_ETRAX_DEBUG_PORT_NULL
  253. while (*s) {
  254. @@ -233,9 +233,9 @@
  255. static void error(char *x)
  256. {
  257. - puts("\n\n");
  258. - puts(x);
  259. - puts("\n\n -- System halted\n");
  260. + putstr("\n\n");
  261. + putstr(x);
  262. + putstr("\n\n -- System halted\n");
  263. while(1); /* Halt */
  264. }
  265. @@ -378,14 +378,14 @@
  266. __asm__ volatile ("move $vr,%0" : "=rm" (revision));
  267. if (revision < compile_rev) {
  268. #ifdef CONFIG_ETRAX_ARCH_V32
  269. - puts("You need an ETRAX FS to run Linux 2.6/crisv32\n");
  270. + putstr("You need an ETRAX FS to run Linux 2.6/crisv32\n");
  271. #else
  272. - puts("You need an ETRAX 100LX to run linux 2.6\n");
  273. + putstr("You need an ETRAX 100LX to run linux 2.6\n");
  274. #endif
  275. while(1);
  276. }
  277. - puts("Uncompressing Linux...\n");
  278. + putstr("Uncompressing Linux...\n");
  279. gunzip();
  280. - puts("Done. Now booting the kernel\n");
  281. + putstr("Done. Now booting the kernel\n");
  282. }
  283. diff -Nur linux-2.6.32.orig/arch/cris/boot/Makefile linux-2.6.32/arch/cris/boot/Makefile
  284. --- linux-2.6.32.orig/arch/cris/boot/Makefile 2009-12-03 04:51:21.000000000 +0100
  285. +++ linux-2.6.32/arch/cris/boot/Makefile 2010-01-10 13:41:59.256309588 +0100
  286. @@ -5,7 +5,7 @@
  287. objcopyflags-$(CONFIG_ETRAX_ARCH_V10) += -R .note -R .comment
  288. objcopyflags-$(CONFIG_ETRAX_ARCH_V32) += --remove-section=.bss
  289. -OBJCOPYFLAGS = -O binary $(objcopyflags-y)
  290. +#OBJCOPYFLAGS = -O binary $(objcopyflags-y)
  291. subdir- := compressed rescue
  292. @@ -17,7 +17,6 @@
  293. $(obj)/compressed/vmlinux: $(obj)/Image FORCE
  294. $(Q)$(MAKE) $(build)=$(obj)/compressed $@
  295. - $(Q)$(MAKE) $(build)=$(obj)/rescue $(obj)/rescue/rescue.bin
  296. $(obj)/zImage: $(obj)/compressed/vmlinux
  297. @cp $< $@
  298. diff -Nur linux-2.6.32.orig/arch/cris/Kconfig linux-2.6.32/arch/cris/Kconfig
  299. --- linux-2.6.32.orig/arch/cris/Kconfig 2009-12-03 04:51:21.000000000 +0100
  300. +++ linux-2.6.32/arch/cris/Kconfig 2010-01-10 13:41:59.256309588 +0100
  301. @@ -168,6 +168,12 @@
  302. help
  303. Size of DRAM (decimal in MB) typically 2, 8 or 16.
  304. +config ETRAX_MTD_SIZE
  305. + hex "MTD size (hex)"
  306. + default "0x00800000"
  307. + help
  308. + Size of MTD device typically 4 or 8 MB.
  309. +
  310. config ETRAX_VMEM_SIZE
  311. int "Video memory size (dec, in MB)"
  312. depends on ETRAX_ARCH_V32 && !ETRAXFS
  313. @@ -273,7 +279,7 @@
  314. select MTD_CFI_AMDSTD
  315. select MTD_JEDECPROBE if ETRAX_ARCH_V32
  316. select MTD_CHAR
  317. - select MTD_BLOCK
  318. + select MTD_BLOCK_RO
  319. select MTD_PARTITIONS
  320. select MTD_CONCAT
  321. select MTD_COMPLEX_MAPPINGS
  322. @@ -662,6 +668,11 @@
  323. source "drivers/ide/Kconfig"
  324. +#mysteriously part of this standard linux driver was removed from cris build! - info@crisos.org
  325. +source "drivers/scsi/Kconfig"
  326. +
  327. +source "drivers/media/Kconfig"
  328. +
  329. source "drivers/net/Kconfig"
  330. source "drivers/i2c/Kconfig"
  331. diff -Nur linux-2.6.32.orig/arch/cris/Makefile linux-2.6.32/arch/cris/Makefile
  332. --- linux-2.6.32.orig/arch/cris/Makefile 2009-12-03 04:51:21.000000000 +0100
  333. +++ linux-2.6.32/arch/cris/Makefile 2010-01-10 13:41:59.256309588 +0100
  334. @@ -40,10 +40,10 @@
  335. LD = $(CROSS_COMPILE)ld -mcrislinux
  336. -OBJCOPYFLAGS := -O binary -R .note -R .comment -S
  337. +OBJCOPYFLAGS := -O binary -R .bss -R .note -R .note.gnu.build-id -R .comment -S
  338. KBUILD_AFLAGS += -mlinux -march=$(arch-y) $(inc)
  339. -KBUILD_CFLAGS += -mlinux -march=$(arch-y) -pipe $(inc)
  340. +KBUILD_CFLAGS += -mlinux -march=$(arch-y) -pipe -fno-peephole2 $(inc)
  341. KBUILD_CPPFLAGS += $(inc)
  342. ifdef CONFIG_FRAME_POINTER
  343. diff -Nur linux-2.6.32.orig/drivers/net/cris/eth_v10.c linux-2.6.32/drivers/net/cris/eth_v10.c
  344. --- linux-2.6.32.orig/drivers/net/cris/eth_v10.c 2009-12-03 04:51:21.000000000 +0100
  345. +++ linux-2.6.32/drivers/net/cris/eth_v10.c 2010-01-10 13:41:59.256309588 +0100
  346. @@ -1725,7 +1725,7 @@
  347. static void
  348. e100_netpoll(struct net_device* netdev)
  349. {
  350. - e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
  351. + e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev);
  352. }
  353. #endif
  354. diff -Nur linux-2.6.32.orig/drivers/net/cris/eth_v10.c.orig linux-2.6.32/drivers/net/cris/eth_v10.c.orig
  355. --- linux-2.6.32.orig/drivers/net/cris/eth_v10.c.orig 1970-01-01 01:00:00.000000000 +0100
  356. +++ linux-2.6.32/drivers/net/cris/eth_v10.c.orig 2009-12-03 04:51:21.000000000 +0100
  357. @@ -0,0 +1,1760 @@
  358. +/*
  359. + * e100net.c: A network driver for the ETRAX 100LX network controller.
  360. + *
  361. + * Copyright (c) 1998-2002 Axis Communications AB.
  362. + *
  363. + * The outline of this driver comes from skeleton.c.
  364. + *
  365. + */
  366. +
  367. +
  368. +#include <linux/module.h>
  369. +
  370. +#include <linux/kernel.h>
  371. +#include <linux/delay.h>
  372. +#include <linux/types.h>
  373. +#include <linux/fcntl.h>
  374. +#include <linux/interrupt.h>
  375. +#include <linux/ptrace.h>
  376. +#include <linux/ioport.h>
  377. +#include <linux/in.h>
  378. +#include <linux/slab.h>
  379. +#include <linux/string.h>
  380. +#include <linux/spinlock.h>
  381. +#include <linux/errno.h>
  382. +#include <linux/init.h>
  383. +#include <linux/bitops.h>
  384. +
  385. +#include <linux/if.h>
  386. +#include <linux/mii.h>
  387. +#include <linux/netdevice.h>
  388. +#include <linux/etherdevice.h>
  389. +#include <linux/skbuff.h>
  390. +#include <linux/ethtool.h>
  391. +
  392. +#include <arch/svinto.h>/* DMA and register descriptions */
  393. +#include <asm/io.h> /* CRIS_LED_* I/O functions */
  394. +#include <asm/irq.h>
  395. +#include <asm/dma.h>
  396. +#include <asm/system.h>
  397. +#include <asm/ethernet.h>
  398. +#include <asm/cache.h>
  399. +#include <arch/io_interface_mux.h>
  400. +
  401. +//#define ETHDEBUG
  402. +#define D(x)
  403. +
  404. +/*
  405. + * The name of the card. Is used for messages and in the requests for
  406. + * io regions, irqs and dma channels
  407. + */
  408. +
  409. +static const char* cardname = "ETRAX 100LX built-in ethernet controller";
  410. +
  411. +/* A default ethernet address. Highlevel SW will set the real one later */
  412. +
  413. +static struct sockaddr default_mac = {
  414. + 0,
  415. + { 0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00 }
  416. +};
  417. +
  418. +/* Information that need to be kept for each board. */
  419. +struct net_local {
  420. + struct net_device_stats stats;
  421. + struct mii_if_info mii_if;
  422. +
  423. + /* Tx control lock. This protects the transmit buffer ring
  424. + * state along with the "tx full" state of the driver. This
  425. + * means all netif_queue flow control actions are protected
  426. + * by this lock as well.
  427. + */
  428. + spinlock_t lock;
  429. +
  430. + spinlock_t led_lock; /* Protect LED state */
  431. + spinlock_t transceiver_lock; /* Protect transceiver state. */
  432. +};
  433. +
  434. +typedef struct etrax_eth_descr
  435. +{
  436. + etrax_dma_descr descr;
  437. + struct sk_buff* skb;
  438. +} etrax_eth_descr;
  439. +
  440. +/* Some transceivers requires special handling */
  441. +struct transceiver_ops
  442. +{
  443. + unsigned int oui;
  444. + void (*check_speed)(struct net_device* dev);
  445. + void (*check_duplex)(struct net_device* dev);
  446. +};
  447. +
  448. +/* Duplex settings */
  449. +enum duplex
  450. +{
  451. + half,
  452. + full,
  453. + autoneg
  454. +};
  455. +
  456. +/* Dma descriptors etc. */
  457. +
  458. +#define MAX_MEDIA_DATA_SIZE 1522
  459. +
  460. +#define MIN_PACKET_LEN 46
  461. +#define ETHER_HEAD_LEN 14
  462. +
  463. +/*
  464. +** MDIO constants.
  465. +*/
  466. +#define MDIO_START 0x1
  467. +#define MDIO_READ 0x2
  468. +#define MDIO_WRITE 0x1
  469. +#define MDIO_PREAMBLE 0xfffffffful
  470. +
  471. +/* Broadcom specific */
  472. +#define MDIO_AUX_CTRL_STATUS_REG 0x18
  473. +#define MDIO_BC_FULL_DUPLEX_IND 0x1
  474. +#define MDIO_BC_SPEED 0x2
  475. +
  476. +/* TDK specific */
  477. +#define MDIO_TDK_DIAGNOSTIC_REG 18
  478. +#define MDIO_TDK_DIAGNOSTIC_RATE 0x400
  479. +#define MDIO_TDK_DIAGNOSTIC_DPLX 0x800
  480. +
  481. +/*Intel LXT972A specific*/
  482. +#define MDIO_INT_STATUS_REG_2 0x0011
  483. +#define MDIO_INT_FULL_DUPLEX_IND (1 << 9)
  484. +#define MDIO_INT_SPEED (1 << 14)
  485. +
  486. +/* Network flash constants */
  487. +#define NET_FLASH_TIME (HZ/50) /* 20 ms */
  488. +#define NET_FLASH_PAUSE (HZ/100) /* 10 ms */
  489. +#define NET_LINK_UP_CHECK_INTERVAL (2*HZ) /* 2 s */
  490. +#define NET_DUPLEX_CHECK_INTERVAL (2*HZ) /* 2 s */
  491. +
  492. +#define NO_NETWORK_ACTIVITY 0
  493. +#define NETWORK_ACTIVITY 1
  494. +
  495. +#define NBR_OF_RX_DESC 32
  496. +#define NBR_OF_TX_DESC 16
  497. +
  498. +/* Large packets are sent directly to upper layers while small packets are */
  499. +/* copied (to reduce memory waste). The following constant decides the breakpoint */
  500. +#define RX_COPYBREAK 256
  501. +
  502. +/* Due to a chip bug we need to flush the cache when descriptors are returned */
  503. +/* to the DMA. To decrease performance impact we return descriptors in chunks. */
  504. +/* The following constant determines the number of descriptors to return. */
  505. +#define RX_QUEUE_THRESHOLD NBR_OF_RX_DESC/2
  506. +
  507. +#define GET_BIT(bit,val) (((val) >> (bit)) & 0x01)
  508. +
  509. +/* Define some macros to access ETRAX 100 registers */
  510. +#define SETF(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
  511. + IO_FIELD_(reg##_, field##_, val)
  512. +#define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
  513. + IO_STATE_(reg##_, field##_, _##val)
  514. +
  515. +static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to
  516. + to be processed */
  517. +static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */
  518. +
  519. +static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32)));
  520. +
  521. +static etrax_eth_descr* myFirstTxDesc; /* First packet not yet sent */
  522. +static etrax_eth_descr* myLastTxDesc; /* End of send queue */
  523. +static etrax_eth_descr* myNextTxDesc; /* Next descriptor to use */
  524. +static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32)));
  525. +
  526. +static unsigned int network_rec_config_shadow = 0;
  527. +
  528. +static unsigned int network_tr_ctrl_shadow = 0;
  529. +
  530. +/* Network speed indication. */
  531. +static DEFINE_TIMER(speed_timer, NULL, 0, 0);
  532. +static DEFINE_TIMER(clear_led_timer, NULL, 0, 0);
  533. +static int current_speed; /* Speed read from transceiver */
  534. +static int current_speed_selection; /* Speed selected by user */
  535. +static unsigned long led_next_time;
  536. +static int led_active;
  537. +static int rx_queue_len;
  538. +
  539. +/* Duplex */
  540. +static DEFINE_TIMER(duplex_timer, NULL, 0, 0);
  541. +static int full_duplex;
  542. +static enum duplex current_duplex;
  543. +
  544. +/* Index to functions, as function prototypes. */
  545. +
  546. +static int etrax_ethernet_init(void);
  547. +
  548. +static int e100_open(struct net_device *dev);
  549. +static int e100_set_mac_address(struct net_device *dev, void *addr);
  550. +static int e100_send_packet(struct sk_buff *skb, struct net_device *dev);
  551. +static irqreturn_t e100rxtx_interrupt(int irq, void *dev_id);
  552. +static irqreturn_t e100nw_interrupt(int irq, void *dev_id);
  553. +static void e100_rx(struct net_device *dev);
  554. +static int e100_close(struct net_device *dev);
  555. +static int e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
  556. +static int e100_set_config(struct net_device* dev, struct ifmap* map);
  557. +static void e100_tx_timeout(struct net_device *dev);
  558. +static struct net_device_stats *e100_get_stats(struct net_device *dev);
  559. +static void set_multicast_list(struct net_device *dev);
  560. +static void e100_hardware_send_packet(struct net_local* np, char *buf, int length);
  561. +static void update_rx_stats(struct net_device_stats *);
  562. +static void update_tx_stats(struct net_device_stats *);
  563. +static int e100_probe_transceiver(struct net_device* dev);
  564. +
  565. +static void e100_check_speed(unsigned long priv);
  566. +static void e100_set_speed(struct net_device* dev, unsigned long speed);
  567. +static void e100_check_duplex(unsigned long priv);
  568. +static void e100_set_duplex(struct net_device* dev, enum duplex);
  569. +static void e100_negotiate(struct net_device* dev);
  570. +
  571. +static int e100_get_mdio_reg(struct net_device *dev, int phy_id, int location);
  572. +static void e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value);
  573. +
  574. +static void e100_send_mdio_cmd(unsigned short cmd, int write_cmd);
  575. +static void e100_send_mdio_bit(unsigned char bit);
  576. +static unsigned char e100_receive_mdio_bit(void);
  577. +static void e100_reset_transceiver(struct net_device* net);
  578. +
  579. +static void e100_clear_network_leds(unsigned long dummy);
  580. +static void e100_set_network_leds(int active);
  581. +
  582. +static const struct ethtool_ops e100_ethtool_ops;
  583. +#if defined(CONFIG_ETRAX_NO_PHY)
  584. +static void dummy_check_speed(struct net_device* dev);
  585. +static void dummy_check_duplex(struct net_device* dev);
  586. +#else
  587. +static void broadcom_check_speed(struct net_device* dev);
  588. +static void broadcom_check_duplex(struct net_device* dev);
  589. +static void tdk_check_speed(struct net_device* dev);
  590. +static void tdk_check_duplex(struct net_device* dev);
  591. +static void intel_check_speed(struct net_device* dev);
  592. +static void intel_check_duplex(struct net_device* dev);
  593. +static void generic_check_speed(struct net_device* dev);
  594. +static void generic_check_duplex(struct net_device* dev);
  595. +#endif
  596. +#ifdef CONFIG_NET_POLL_CONTROLLER
  597. +static void e100_netpoll(struct net_device* dev);
  598. +#endif
  599. +
  600. +static int autoneg_normal = 1;
  601. +
  602. +struct transceiver_ops transceivers[] =
  603. +{
  604. +#if defined(CONFIG_ETRAX_NO_PHY)
  605. + {0x0000, dummy_check_speed, dummy_check_duplex} /* Dummy */
  606. +#else
  607. + {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */
  608. + {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */
  609. + {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */
  610. + {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/
  611. + {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */
  612. +#endif
  613. +};
  614. +
  615. +struct transceiver_ops* transceiver = &transceivers[0];
  616. +
  617. +static const struct net_device_ops e100_netdev_ops = {
  618. + .ndo_open = e100_open,
  619. + .ndo_stop = e100_close,
  620. + .ndo_start_xmit = e100_send_packet,
  621. + .ndo_tx_timeout = e100_tx_timeout,
  622. + .ndo_get_stats = e100_get_stats,
  623. + .ndo_set_multicast_list = set_multicast_list,
  624. + .ndo_do_ioctl = e100_ioctl,
  625. + .ndo_set_mac_address = e100_set_mac_address,
  626. + .ndo_validate_addr = eth_validate_addr,
  627. + .ndo_change_mtu = eth_change_mtu,
  628. + .ndo_set_config = e100_set_config,
  629. +#ifdef CONFIG_NET_POLL_CONTROLLER
  630. + .ndo_poll_controller = e100_netpoll,
  631. +#endif
  632. +};
  633. +
  634. +#define tx_done(dev) (*R_DMA_CH0_CMD == 0)
  635. +
  636. +/*
  637. + * Check for a network adaptor of this type, and return '0' if one exists.
  638. + * If dev->base_addr == 0, probe all likely locations.
  639. + * If dev->base_addr == 1, always return failure.
  640. + * If dev->base_addr == 2, allocate space for the device and return success
  641. + * (detachable devices only).
  642. + */
  643. +
  644. +static int __init
  645. +etrax_ethernet_init(void)
  646. +{
  647. + struct net_device *dev;
  648. + struct net_local* np;
  649. + int i, err;
  650. +
  651. + printk(KERN_INFO
  652. + "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n");
  653. +
  654. + if (cris_request_io_interface(if_eth, cardname)) {
  655. + printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n");
  656. + return -EBUSY;
  657. + }
  658. +
  659. + dev = alloc_etherdev(sizeof(struct net_local));
  660. + if (!dev)
  661. + return -ENOMEM;
  662. +
  663. + np = netdev_priv(dev);
  664. +
  665. + /* we do our own locking */
  666. + dev->features |= NETIF_F_LLTX;
  667. +
  668. + dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */
  669. +
  670. + /* now setup our etrax specific stuff */
  671. +
  672. + dev->irq = NETWORK_DMA_RX_IRQ_NBR; /* we really use DMATX as well... */
  673. + dev->dma = NETWORK_RX_DMA_NBR;
  674. +
  675. + /* fill in our handlers so the network layer can talk to us in the future */
  676. +
  677. + dev->ethtool_ops = &e100_ethtool_ops;
  678. + dev->netdev_ops = &e100_netdev_ops;
  679. +
  680. + spin_lock_init(&np->lock);
  681. + spin_lock_init(&np->led_lock);
  682. + spin_lock_init(&np->transceiver_lock);
  683. +
  684. + /* Initialise the list of Etrax DMA-descriptors */
  685. +
  686. + /* Initialise receive descriptors */
  687. +
  688. + for (i = 0; i < NBR_OF_RX_DESC; i++) {
  689. + /* Allocate two extra cachelines to make sure that buffer used
  690. + * by DMA does not share cacheline with any other data (to
  691. + * avoid cache bug)
  692. + */
  693. + RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
  694. + if (!RxDescList[i].skb)
  695. + return -ENOMEM;
  696. + RxDescList[i].descr.ctrl = 0;
  697. + RxDescList[i].descr.sw_len = MAX_MEDIA_DATA_SIZE;
  698. + RxDescList[i].descr.next = virt_to_phys(&RxDescList[i + 1]);
  699. + RxDescList[i].descr.buf = L1_CACHE_ALIGN(virt_to_phys(RxDescList[i].skb->data));
  700. + RxDescList[i].descr.status = 0;
  701. + RxDescList[i].descr.hw_len = 0;
  702. + prepare_rx_descriptor(&RxDescList[i].descr);
  703. + }
  704. +
  705. + RxDescList[NBR_OF_RX_DESC - 1].descr.ctrl = d_eol;
  706. + RxDescList[NBR_OF_RX_DESC - 1].descr.next = virt_to_phys(&RxDescList[0]);
  707. + rx_queue_len = 0;
  708. +
  709. + /* Initialize transmit descriptors */
  710. + for (i = 0; i < NBR_OF_TX_DESC; i++) {
  711. + TxDescList[i].descr.ctrl = 0;
  712. + TxDescList[i].descr.sw_len = 0;
  713. + TxDescList[i].descr.next = virt_to_phys(&TxDescList[i + 1].descr);
  714. + TxDescList[i].descr.buf = 0;
  715. + TxDescList[i].descr.status = 0;
  716. + TxDescList[i].descr.hw_len = 0;
  717. + TxDescList[i].skb = 0;
  718. + }
  719. +
  720. + TxDescList[NBR_OF_TX_DESC - 1].descr.ctrl = d_eol;
  721. + TxDescList[NBR_OF_TX_DESC - 1].descr.next = virt_to_phys(&TxDescList[0].descr);
  722. +
  723. + /* Initialise initial pointers */
  724. +
  725. + myNextRxDesc = &RxDescList[0];
  726. + myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
  727. + myFirstTxDesc = &TxDescList[0];
  728. + myNextTxDesc = &TxDescList[0];
  729. + myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1];
  730. +
  731. + /* Register device */
  732. + err = register_netdev(dev);
  733. + if (err) {
  734. + free_netdev(dev);
  735. + return err;
  736. + }
  737. +
  738. + /* set the default MAC address */
  739. +
  740. + e100_set_mac_address(dev, &default_mac);
  741. +
  742. + /* Initialize speed indicator stuff. */
  743. +
  744. + current_speed = 10;
  745. + current_speed_selection = 0; /* Auto */
  746. + speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
  747. + speed_timer.data = (unsigned long)dev;
  748. + speed_timer.function = e100_check_speed;
  749. +
  750. + clear_led_timer.function = e100_clear_network_leds;
  751. + clear_led_timer.data = (unsigned long)dev;
  752. +
  753. + full_duplex = 0;
  754. + current_duplex = autoneg;
  755. + duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
  756. + duplex_timer.data = (unsigned long)dev;
  757. + duplex_timer.function = e100_check_duplex;
  758. +
  759. + /* Initialize mii interface */
  760. + np->mii_if.phy_id_mask = 0x1f;
  761. + np->mii_if.reg_num_mask = 0x1f;
  762. + np->mii_if.dev = dev;
  763. + np->mii_if.mdio_read = e100_get_mdio_reg;
  764. + np->mii_if.mdio_write = e100_set_mdio_reg;
  765. +
  766. + /* Initialize group address registers to make sure that no */
  767. + /* unwanted addresses are matched */
  768. + *R_NETWORK_GA_0 = 0x00000000;
  769. + *R_NETWORK_GA_1 = 0x00000000;
  770. +
  771. + /* Initialize next time the led can flash */
  772. + led_next_time = jiffies;
  773. + return 0;
  774. +}
  775. +
  776. +/* set MAC address of the interface. called from the core after a
  777. + * SIOCSIFADDR ioctl, and from the bootup above.
  778. + */
  779. +
  780. +static int
  781. +e100_set_mac_address(struct net_device *dev, void *p)
  782. +{
  783. + struct net_local *np = netdev_priv(dev);
  784. + struct sockaddr *addr = p;
  785. +
  786. + spin_lock(&np->lock); /* preemption protection */
  787. +
  788. + /* remember it */
  789. +
  790. + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  791. +
  792. + /* Write it to the hardware.
  793. + * Note the way the address is wrapped:
  794. + * *R_NETWORK_SA_0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24);
  795. + * *R_NETWORK_SA_1 = a0_4 | (a0_5 << 8);
  796. + */
  797. +
  798. + *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
  799. + (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
  800. + *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8);
  801. + *R_NETWORK_SA_2 = 0;
  802. +
  803. + /* show it in the log as well */
  804. +
  805. + printk(KERN_INFO "%s: changed MAC to %pM\n", dev->name, dev->dev_addr);
  806. +
  807. + spin_unlock(&np->lock);
  808. +
  809. + return 0;
  810. +}
  811. +
  812. +/*
  813. + * Open/initialize the board. This is called (in the current kernel)
  814. + * sometime after booting when the 'ifconfig' program is run.
  815. + *
  816. + * This routine should set everything up anew at each open, even
  817. + * registers that "should" only need to be set once at boot, so that
  818. + * there is non-reboot way to recover if something goes wrong.
  819. + */
  820. +
  821. +static int
  822. +e100_open(struct net_device *dev)
  823. +{
  824. + unsigned long flags;
  825. +
  826. + /* enable the MDIO output pin */
  827. +
  828. + *R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable);
  829. +
  830. + *R_IRQ_MASK0_CLR =
  831. + IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) |
  832. + IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) |
  833. + IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr);
  834. +
  835. + /* clear dma0 and 1 eop and descr irq masks */
  836. + *R_IRQ_MASK2_CLR =
  837. + IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) |
  838. + IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
  839. + IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) |
  840. + IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
  841. +
  842. + /* Reset and wait for the DMA channels */
  843. +
  844. + RESET_DMA(NETWORK_TX_DMA_NBR);
  845. + RESET_DMA(NETWORK_RX_DMA_NBR);
  846. + WAIT_DMA(NETWORK_TX_DMA_NBR);
  847. + WAIT_DMA(NETWORK_RX_DMA_NBR);
  848. +
  849. + /* Initialise the etrax network controller */
  850. +
  851. + /* allocate the irq corresponding to the receiving DMA */
  852. +
  853. + if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt,
  854. + IRQF_SAMPLE_RANDOM, cardname, (void *)dev)) {
  855. + goto grace_exit0;
  856. + }
  857. +
  858. + /* allocate the irq corresponding to the transmitting DMA */
  859. +
  860. + if (request_irq(NETWORK_DMA_TX_IRQ_NBR, e100rxtx_interrupt, 0,
  861. + cardname, (void *)dev)) {
  862. + goto grace_exit1;
  863. + }
  864. +
  865. + /* allocate the irq corresponding to the network errors etc */
  866. +
  867. + if (request_irq(NETWORK_STATUS_IRQ_NBR, e100nw_interrupt, 0,
  868. + cardname, (void *)dev)) {
  869. + goto grace_exit2;
  870. + }
  871. +
  872. + /*
  873. + * Always allocate the DMA channels after the IRQ,
  874. + * and clean up on failure.
  875. + */
  876. +
  877. + if (cris_request_dma(NETWORK_TX_DMA_NBR,
  878. + cardname,
  879. + DMA_VERBOSE_ON_ERROR,
  880. + dma_eth)) {
  881. + goto grace_exit3;
  882. + }
  883. +
  884. + if (cris_request_dma(NETWORK_RX_DMA_NBR,
  885. + cardname,
  886. + DMA_VERBOSE_ON_ERROR,
  887. + dma_eth)) {
  888. + goto grace_exit4;
  889. + }
  890. +
  891. + /* give the HW an idea of what MAC address we want */
  892. +
  893. + *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
  894. + (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
  895. + *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8);
  896. + *R_NETWORK_SA_2 = 0;
  897. +
  898. +#if 0
  899. + /* use promiscuous mode for testing */
  900. + *R_NETWORK_GA_0 = 0xffffffff;
  901. + *R_NETWORK_GA_1 = 0xffffffff;
  902. +
  903. + *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */
  904. +#else
  905. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522);
  906. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive);
  907. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable);
  908. + SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
  909. + *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
  910. +#endif
  911. +
  912. + *R_NETWORK_GEN_CONFIG =
  913. + IO_STATE(R_NETWORK_GEN_CONFIG, phy, mii_clk) |
  914. + IO_STATE(R_NETWORK_GEN_CONFIG, enable, on);
  915. +
  916. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
  917. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, delay, none);
  918. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cancel, dont);
  919. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cd, enable);
  920. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, retry, enable);
  921. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, pad, enable);
  922. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable);
  923. + *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
  924. +
  925. + local_irq_save(flags);
  926. +
  927. + /* enable the irq's for ethernet DMA */
  928. +
  929. + *R_IRQ_MASK2_SET =
  930. + IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) |
  931. + IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set);
  932. +
  933. + *R_IRQ_MASK0_SET =
  934. + IO_STATE(R_IRQ_MASK0_SET, overrun, set) |
  935. + IO_STATE(R_IRQ_MASK0_SET, underrun, set) |
  936. + IO_STATE(R_IRQ_MASK0_SET, excessive_col, set);
  937. +
  938. + /* make sure the irqs are cleared */
  939. +
  940. + *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
  941. + *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do);
  942. +
  943. + /* make sure the rec and transmit error counters are cleared */
  944. +
  945. + (void)*R_REC_COUNTERS; /* dummy read */
  946. + (void)*R_TR_COUNTERS; /* dummy read */
  947. +
  948. + /* start the receiving DMA channel so we can receive packets from now on */
  949. +
  950. + *R_DMA_CH1_FIRST = virt_to_phys(myNextRxDesc);
  951. + *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, start);
  952. +
  953. + /* Set up transmit DMA channel so it can be restarted later */
  954. +
  955. + *R_DMA_CH0_FIRST = 0;
  956. + *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
  957. + netif_start_queue(dev);
  958. +
  959. + local_irq_restore(flags);
  960. +
  961. + /* Probe for transceiver */
  962. + if (e100_probe_transceiver(dev))
  963. + goto grace_exit5;
  964. +
  965. + /* Start duplex/speed timers */
  966. + add_timer(&speed_timer);
  967. + add_timer(&duplex_timer);
  968. +
  969. + /* We are now ready to accept transmit requeusts from
  970. + * the queueing layer of the networking.
  971. + */
  972. + netif_carrier_on(dev);
  973. +
  974. + return 0;
  975. +
  976. +grace_exit5:
  977. + cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
  978. +grace_exit4:
  979. + cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
  980. +grace_exit3:
  981. + free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
  982. +grace_exit2:
  983. + free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
  984. +grace_exit1:
  985. + free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev);
  986. +grace_exit0:
  987. + return -EAGAIN;
  988. +}
  989. +
  990. +#if defined(CONFIG_ETRAX_NO_PHY)
  991. +static void
  992. +dummy_check_speed(struct net_device* dev)
  993. +{
  994. + current_speed = 100;
  995. +}
  996. +#else
  997. +static void
  998. +generic_check_speed(struct net_device* dev)
  999. +{
  1000. + unsigned long data;
  1001. + struct net_local *np = netdev_priv(dev);
  1002. +
  1003. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
  1004. + if ((data & ADVERTISE_100FULL) ||
  1005. + (data & ADVERTISE_100HALF))
  1006. + current_speed = 100;
  1007. + else
  1008. + current_speed = 10;
  1009. +}
  1010. +
  1011. +static void
  1012. +tdk_check_speed(struct net_device* dev)
  1013. +{
  1014. + unsigned long data;
  1015. + struct net_local *np = netdev_priv(dev);
  1016. +
  1017. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  1018. + MDIO_TDK_DIAGNOSTIC_REG);
  1019. + current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
  1020. +}
  1021. +
  1022. +static void
  1023. +broadcom_check_speed(struct net_device* dev)
  1024. +{
  1025. + unsigned long data;
  1026. + struct net_local *np = netdev_priv(dev);
  1027. +
  1028. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  1029. + MDIO_AUX_CTRL_STATUS_REG);
  1030. + current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
  1031. +}
  1032. +
  1033. +static void
  1034. +intel_check_speed(struct net_device* dev)
  1035. +{
  1036. + unsigned long data;
  1037. + struct net_local *np = netdev_priv(dev);
  1038. +
  1039. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  1040. + MDIO_INT_STATUS_REG_2);
  1041. + current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
  1042. +}
  1043. +#endif
  1044. +static void
  1045. +e100_check_speed(unsigned long priv)
  1046. +{
  1047. + struct net_device* dev = (struct net_device*)priv;
  1048. + struct net_local *np = netdev_priv(dev);
  1049. + static int led_initiated = 0;
  1050. + unsigned long data;
  1051. + int old_speed = current_speed;
  1052. +
  1053. + spin_lock(&np->transceiver_lock);
  1054. +
  1055. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR);
  1056. + if (!(data & BMSR_LSTATUS)) {
  1057. + current_speed = 0;
  1058. + } else {
  1059. + transceiver->check_speed(dev);
  1060. + }
  1061. +
  1062. + spin_lock(&np->led_lock);
  1063. + if ((old_speed != current_speed) || !led_initiated) {
  1064. + led_initiated = 1;
  1065. + e100_set_network_leds(NO_NETWORK_ACTIVITY);
  1066. + if (current_speed)
  1067. + netif_carrier_on(dev);
  1068. + else
  1069. + netif_carrier_off(dev);
  1070. + }
  1071. + spin_unlock(&np->led_lock);
  1072. +
  1073. + /* Reinitialize the timer. */
  1074. + speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
  1075. + add_timer(&speed_timer);
  1076. +
  1077. + spin_unlock(&np->transceiver_lock);
  1078. +}
  1079. +
  1080. +static void
  1081. +e100_negotiate(struct net_device* dev)
  1082. +{
  1083. + struct net_local *np = netdev_priv(dev);
  1084. + unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  1085. + MII_ADVERTISE);
  1086. +
  1087. + /* Discard old speed and duplex settings */
  1088. + data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL |
  1089. + ADVERTISE_10HALF | ADVERTISE_10FULL);
  1090. +
  1091. + switch (current_speed_selection) {
  1092. + case 10:
  1093. + if (current_duplex == full)
  1094. + data |= ADVERTISE_10FULL;
  1095. + else if (current_duplex == half)
  1096. + data |= ADVERTISE_10HALF;
  1097. + else
  1098. + data |= ADVERTISE_10HALF | ADVERTISE_10FULL;
  1099. + break;
  1100. +
  1101. + case 100:
  1102. + if (current_duplex == full)
  1103. + data |= ADVERTISE_100FULL;
  1104. + else if (current_duplex == half)
  1105. + data |= ADVERTISE_100HALF;
  1106. + else
  1107. + data |= ADVERTISE_100HALF | ADVERTISE_100FULL;
  1108. + break;
  1109. +
  1110. + case 0: /* Auto */
  1111. + if (current_duplex == full)
  1112. + data |= ADVERTISE_100FULL | ADVERTISE_10FULL;
  1113. + else if (current_duplex == half)
  1114. + data |= ADVERTISE_100HALF | ADVERTISE_10HALF;
  1115. + else
  1116. + data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
  1117. + ADVERTISE_100HALF | ADVERTISE_100FULL;
  1118. + break;
  1119. +
  1120. + default: /* assume autoneg speed and duplex */
  1121. + data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
  1122. + ADVERTISE_100HALF | ADVERTISE_100FULL;
  1123. + break;
  1124. + }
  1125. +
  1126. + e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
  1127. +
  1128. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
  1129. + if (autoneg_normal) {
  1130. + /* Renegotiate with link partner */
  1131. + data |= BMCR_ANENABLE | BMCR_ANRESTART;
  1132. + } else {
  1133. + /* Don't negotiate speed or duplex */
  1134. + data &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
  1135. +
  1136. + /* Set speed and duplex static */
  1137. + if (current_speed_selection == 10)
  1138. + data &= ~BMCR_SPEED100;
  1139. + else
  1140. + data |= BMCR_SPEED100;
  1141. +
  1142. + if (current_duplex != full)
  1143. + data &= ~BMCR_FULLDPLX;
  1144. + else
  1145. + data |= BMCR_FULLDPLX;
  1146. + }
  1147. + e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
  1148. +}
  1149. +
  1150. +static void
  1151. +e100_set_speed(struct net_device* dev, unsigned long speed)
  1152. +{
  1153. + struct net_local *np = netdev_priv(dev);
  1154. +
  1155. + spin_lock(&np->transceiver_lock);
  1156. + if (speed != current_speed_selection) {
  1157. + current_speed_selection = speed;
  1158. + e100_negotiate(dev);
  1159. + }
  1160. + spin_unlock(&np->transceiver_lock);
  1161. +}
  1162. +
  1163. +static void
  1164. +e100_check_duplex(unsigned long priv)
  1165. +{
  1166. + struct net_device *dev = (struct net_device *)priv;
  1167. + struct net_local *np = netdev_priv(dev);
  1168. + int old_duplex;
  1169. +
  1170. + spin_lock(&np->transceiver_lock);
  1171. + old_duplex = full_duplex;
  1172. + transceiver->check_duplex(dev);
  1173. + if (old_duplex != full_duplex) {
  1174. + /* Duplex changed */
  1175. + SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
  1176. + *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
  1177. + }
  1178. +
  1179. + /* Reinitialize the timer. */
  1180. + duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
  1181. + add_timer(&duplex_timer);
  1182. + np->mii_if.full_duplex = full_duplex;
  1183. + spin_unlock(&np->transceiver_lock);
  1184. +}
  1185. +#if defined(CONFIG_ETRAX_NO_PHY)
  1186. +static void
  1187. +dummy_check_duplex(struct net_device* dev)
  1188. +{
  1189. + full_duplex = 1;
  1190. +}
  1191. +#else
  1192. +static void
  1193. +generic_check_duplex(struct net_device* dev)
  1194. +{
  1195. + unsigned long data;
  1196. + struct net_local *np = netdev_priv(dev);
  1197. +
  1198. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
  1199. + if ((data & ADVERTISE_10FULL) ||
  1200. + (data & ADVERTISE_100FULL))
  1201. + full_duplex = 1;
  1202. + else
  1203. + full_duplex = 0;
  1204. +}
  1205. +
  1206. +static void
  1207. +tdk_check_duplex(struct net_device* dev)
  1208. +{
  1209. + unsigned long data;
  1210. + struct net_local *np = netdev_priv(dev);
  1211. +
  1212. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  1213. + MDIO_TDK_DIAGNOSTIC_REG);
  1214. + full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
  1215. +}
  1216. +
  1217. +static void
  1218. +broadcom_check_duplex(struct net_device* dev)
  1219. +{
  1220. + unsigned long data;
  1221. + struct net_local *np = netdev_priv(dev);
  1222. +
  1223. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  1224. + MDIO_AUX_CTRL_STATUS_REG);
  1225. + full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
  1226. +}
  1227. +
  1228. +static void
  1229. +intel_check_duplex(struct net_device* dev)
  1230. +{
  1231. + unsigned long data;
  1232. + struct net_local *np = netdev_priv(dev);
  1233. +
  1234. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
  1235. + MDIO_INT_STATUS_REG_2);
  1236. + full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
  1237. +}
  1238. +#endif
  1239. +static void
  1240. +e100_set_duplex(struct net_device* dev, enum duplex new_duplex)
  1241. +{
  1242. + struct net_local *np = netdev_priv(dev);
  1243. +
  1244. + spin_lock(&np->transceiver_lock);
  1245. + if (new_duplex != current_duplex) {
  1246. + current_duplex = new_duplex;
  1247. + e100_negotiate(dev);
  1248. + }
  1249. + spin_unlock(&np->transceiver_lock);
  1250. +}
  1251. +
  1252. +static int
  1253. +e100_probe_transceiver(struct net_device* dev)
  1254. +{
  1255. + int ret = 0;
  1256. +
  1257. +#if !defined(CONFIG_ETRAX_NO_PHY)
  1258. + unsigned int phyid_high;
  1259. + unsigned int phyid_low;
  1260. + unsigned int oui;
  1261. + struct transceiver_ops* ops = NULL;
  1262. + struct net_local *np = netdev_priv(dev);
  1263. +
  1264. + spin_lock(&np->transceiver_lock);
  1265. +
  1266. + /* Probe MDIO physical address */
  1267. + for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31;
  1268. + np->mii_if.phy_id++) {
  1269. + if (e100_get_mdio_reg(dev,
  1270. + np->mii_if.phy_id, MII_BMSR) != 0xffff)
  1271. + break;
  1272. + }
  1273. + if (np->mii_if.phy_id == 32) {
  1274. + ret = -ENODEV;
  1275. + goto out;
  1276. + }
  1277. +
  1278. + /* Get manufacturer */
  1279. + phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1);
  1280. + phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2);
  1281. + oui = (phyid_high << 6) | (phyid_low >> 10);
  1282. +
  1283. + for (ops = &transceivers[0]; ops->oui; ops++) {
  1284. + if (ops->oui == oui)
  1285. + break;
  1286. + }
  1287. + transceiver = ops;
  1288. +out:
  1289. + spin_unlock(&np->transceiver_lock);
  1290. +#endif
  1291. + return ret;
  1292. +}
  1293. +
  1294. +static int
  1295. +e100_get_mdio_reg(struct net_device *dev, int phy_id, int location)
  1296. +{
  1297. + unsigned short cmd; /* Data to be sent on MDIO port */
  1298. + int data; /* Data read from MDIO */
  1299. + int bitCounter;
  1300. +
  1301. + /* Start of frame, OP Code, Physical Address, Register Address */
  1302. + cmd = (MDIO_START << 14) | (MDIO_READ << 12) | (phy_id << 7) |
  1303. + (location << 2);
  1304. +
  1305. + e100_send_mdio_cmd(cmd, 0);
  1306. +
  1307. + data = 0;
  1308. +
  1309. + /* Data... */
  1310. + for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
  1311. + data |= (e100_receive_mdio_bit() << bitCounter);
  1312. + }
  1313. +
  1314. + return data;
  1315. +}
  1316. +
  1317. +static void
  1318. +e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value)
  1319. +{
  1320. + int bitCounter;
  1321. + unsigned short cmd;
  1322. +
  1323. + cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (phy_id << 7) |
  1324. + (location << 2);
  1325. +
  1326. + e100_send_mdio_cmd(cmd, 1);
  1327. +
  1328. + /* Data... */
  1329. + for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
  1330. + e100_send_mdio_bit(GET_BIT(bitCounter, value));
  1331. + }
  1332. +
  1333. +}
  1334. +
  1335. +static void
  1336. +e100_send_mdio_cmd(unsigned short cmd, int write_cmd)
  1337. +{
  1338. + int bitCounter;
  1339. + unsigned char data = 0x2;
  1340. +
  1341. + /* Preamble */
  1342. + for (bitCounter = 31; bitCounter>= 0; bitCounter--)
  1343. + e100_send_mdio_bit(GET_BIT(bitCounter, MDIO_PREAMBLE));
  1344. +
  1345. + for (bitCounter = 15; bitCounter >= 2; bitCounter--)
  1346. + e100_send_mdio_bit(GET_BIT(bitCounter, cmd));
  1347. +
  1348. + /* Turnaround */
  1349. + for (bitCounter = 1; bitCounter >= 0 ; bitCounter--)
  1350. + if (write_cmd)
  1351. + e100_send_mdio_bit(GET_BIT(bitCounter, data));
  1352. + else
  1353. + e100_receive_mdio_bit();
  1354. +}
  1355. +
  1356. +static void
  1357. +e100_send_mdio_bit(unsigned char bit)
  1358. +{
  1359. + *R_NETWORK_MGM_CTRL =
  1360. + IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) |
  1361. + IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit);
  1362. + udelay(1);
  1363. + *R_NETWORK_MGM_CTRL =
  1364. + IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) |
  1365. + IO_MASK(R_NETWORK_MGM_CTRL, mdck) |
  1366. + IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit);
  1367. + udelay(1);
  1368. +}
  1369. +
  1370. +static unsigned char
  1371. +e100_receive_mdio_bit()
  1372. +{
  1373. + unsigned char bit;
  1374. + *R_NETWORK_MGM_CTRL = 0;
  1375. + bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT);
  1376. + udelay(1);
  1377. + *R_NETWORK_MGM_CTRL = IO_MASK(R_NETWORK_MGM_CTRL, mdck);
  1378. + udelay(1);
  1379. + return bit;
  1380. +}
  1381. +
  1382. +static void
  1383. +e100_reset_transceiver(struct net_device* dev)
  1384. +{
  1385. + struct net_local *np = netdev_priv(dev);
  1386. + unsigned short cmd;
  1387. + unsigned short data;
  1388. + int bitCounter;
  1389. +
  1390. + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
  1391. +
  1392. + cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2);
  1393. +
  1394. + e100_send_mdio_cmd(cmd, 1);
  1395. +
  1396. + data |= 0x8000;
  1397. +
  1398. + for (bitCounter = 15; bitCounter >= 0 ; bitCounter--) {
  1399. + e100_send_mdio_bit(GET_BIT(bitCounter, data));
  1400. + }
  1401. +}
  1402. +
  1403. +/* Called by upper layers if they decide it took too long to complete
  1404. + * sending a packet - we need to reset and stuff.
  1405. + */
  1406. +
  1407. +static void
  1408. +e100_tx_timeout(struct net_device *dev)
  1409. +{
  1410. + struct net_local *np = netdev_priv(dev);
  1411. + unsigned long flags;
  1412. +
  1413. + spin_lock_irqsave(&np->lock, flags);
  1414. +
  1415. + printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
  1416. + tx_done(dev) ? "IRQ problem" : "network cable problem");
  1417. +
  1418. + /* remember we got an error */
  1419. +
  1420. + np->stats.tx_errors++;
  1421. +
  1422. + /* reset the TX DMA in case it has hung on something */
  1423. +
  1424. + RESET_DMA(NETWORK_TX_DMA_NBR);
  1425. + WAIT_DMA(NETWORK_TX_DMA_NBR);
  1426. +
  1427. + /* Reset the transceiver. */
  1428. +
  1429. + e100_reset_transceiver(dev);
  1430. +
  1431. + /* and get rid of the packets that never got an interrupt */
  1432. + while (myFirstTxDesc != myNextTxDesc) {
  1433. + dev_kfree_skb(myFirstTxDesc->skb);
  1434. + myFirstTxDesc->skb = 0;
  1435. + myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
  1436. + }
  1437. +
  1438. + /* Set up transmit DMA channel so it can be restarted later */
  1439. + *R_DMA_CH0_FIRST = 0;
  1440. + *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
  1441. +
  1442. + /* tell the upper layers we're ok again */
  1443. +
  1444. + netif_wake_queue(dev);
  1445. + spin_unlock_irqrestore(&np->lock, flags);
  1446. +}
  1447. +
  1448. +
  1449. +/* This will only be invoked if the driver is _not_ in XOFF state.
  1450. + * What this means is that we need not check it, and that this
  1451. + * invariant will hold if we make sure that the netif_*_queue()
  1452. + * calls are done at the proper times.
  1453. + */
  1454. +
  1455. +static int
  1456. +e100_send_packet(struct sk_buff *skb, struct net_device *dev)
  1457. +{
  1458. + struct net_local *np = netdev_priv(dev);
  1459. + unsigned char *buf = skb->data;
  1460. + unsigned long flags;
  1461. +
  1462. +#ifdef ETHDEBUG
  1463. + printk("send packet len %d\n", length);
  1464. +#endif
  1465. + spin_lock_irqsave(&np->lock, flags); /* protect from tx_interrupt and ourself */
  1466. +
  1467. + myNextTxDesc->skb = skb;
  1468. +
  1469. + dev->trans_start = jiffies;
  1470. +
  1471. + e100_hardware_send_packet(np, buf, skb->len);
  1472. +
  1473. + myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next);
  1474. +
  1475. + /* Stop queue if full */
  1476. + if (myNextTxDesc == myFirstTxDesc) {
  1477. + netif_stop_queue(dev);
  1478. + }
  1479. +
  1480. + spin_unlock_irqrestore(&np->lock, flags);
  1481. +
  1482. + return NETDEV_TX_OK;
  1483. +}
  1484. +
  1485. +/*
  1486. + * The typical workload of the driver:
  1487. + * Handle the network interface interrupts.
  1488. + */
  1489. +
  1490. +static irqreturn_t
  1491. +e100rxtx_interrupt(int irq, void *dev_id)
  1492. +{
  1493. + struct net_device *dev = (struct net_device *)dev_id;
  1494. + struct net_local *np = netdev_priv(dev);
  1495. + unsigned long irqbits;
  1496. +
  1497. + /*
  1498. + * Note that both rx and tx interrupts are blocked at this point,
  1499. + * regardless of which got us here.
  1500. + */
  1501. +
  1502. + irqbits = *R_IRQ_MASK2_RD;
  1503. +
  1504. + /* Handle received packets */
  1505. + if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) {
  1506. + /* acknowledge the eop interrupt */
  1507. +
  1508. + *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do);
  1509. +
  1510. + /* check if one or more complete packets were indeed received */
  1511. +
  1512. + while ((*R_DMA_CH1_FIRST != virt_to_phys(myNextRxDesc)) &&
  1513. + (myNextRxDesc != myLastRxDesc)) {
  1514. + /* Take out the buffer and give it to the OS, then
  1515. + * allocate a new buffer to put a packet in.
  1516. + */
  1517. + e100_rx(dev);
  1518. + np->stats.rx_packets++;
  1519. + /* restart/continue on the channel, for safety */
  1520. + *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
  1521. + /* clear dma channel 1 eop/descr irq bits */
  1522. + *R_DMA_CH1_CLR_INTR =
  1523. + IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do) |
  1524. + IO_STATE(R_DMA_CH1_CLR_INTR, clr_descr, do);
  1525. +
  1526. + /* now, we might have gotten another packet
  1527. + so we have to loop back and check if so */
  1528. + }
  1529. + }
  1530. +
  1531. + /* Report any packets that have been sent */
  1532. + while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
  1533. + (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
  1534. + np->stats.tx_bytes += myFirstTxDesc->skb->len;
  1535. + np->stats.tx_packets++;
  1536. +
  1537. + /* dma is ready with the transmission of the data in tx_skb, so now
  1538. + we can release the skb memory */
  1539. + dev_kfree_skb_irq(myFirstTxDesc->skb);
  1540. + myFirstTxDesc->skb = 0;
  1541. + myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
  1542. + /* Wake up queue. */
  1543. + netif_wake_queue(dev);
  1544. + }
  1545. +
  1546. + if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) {
  1547. + /* acknowledge the eop interrupt. */
  1548. + *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
  1549. + }
  1550. +
  1551. + return IRQ_HANDLED;
  1552. +}
  1553. +
  1554. +static irqreturn_t
  1555. +e100nw_interrupt(int irq, void *dev_id)
  1556. +{
  1557. + struct net_device *dev = (struct net_device *)dev_id;
  1558. + struct net_local *np = netdev_priv(dev);
  1559. + unsigned long irqbits = *R_IRQ_MASK0_RD;
  1560. +
  1561. + /* check for underrun irq */
  1562. + if (irqbits & IO_STATE(R_IRQ_MASK0_RD, underrun, active)) {
  1563. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
  1564. + *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
  1565. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
  1566. + np->stats.tx_errors++;
  1567. + D(printk("ethernet receiver underrun!\n"));
  1568. + }
  1569. +
  1570. + /* check for overrun irq */
  1571. + if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
  1572. + update_rx_stats(&np->stats); /* this will ack the irq */
  1573. + D(printk("ethernet receiver overrun!\n"));
  1574. + }
  1575. + /* check for excessive collision irq */
  1576. + if (irqbits & IO_STATE(R_IRQ_MASK0_RD, excessive_col, active)) {
  1577. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
  1578. + *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
  1579. + SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
  1580. + np->stats.tx_errors++;
  1581. + D(printk("ethernet excessive collisions!\n"));
  1582. + }
  1583. + return IRQ_HANDLED;
  1584. +}
  1585. +
  1586. +/* We have a good packet(s), get it/them out of the buffers. */
  1587. +static void
  1588. +e100_rx(struct net_device *dev)
  1589. +{
  1590. + struct sk_buff *skb;
  1591. + int length = 0;
  1592. + struct net_local *np = netdev_priv(dev);
  1593. + unsigned char *skb_data_ptr;
  1594. +#ifdef ETHDEBUG
  1595. + int i;
  1596. +#endif
  1597. + etrax_eth_descr *prevRxDesc; /* The descriptor right before myNextRxDesc */
  1598. + spin_lock(&np->led_lock);
  1599. + if (!led_active && time_after(jiffies, led_next_time)) {
  1600. + /* light the network leds depending on the current speed. */
  1601. + e100_set_network_leds(NETWORK_ACTIVITY);
  1602. +
  1603. + /* Set the earliest time we may clear the LED */
  1604. + led_next_time = jiffies + NET_FLASH_TIME;
  1605. + led_active = 1;
  1606. + mod_timer(&clear_led_timer, jiffies + HZ/10);
  1607. + }
  1608. + spin_unlock(&np->led_lock);
  1609. +
  1610. + length = myNextRxDesc->descr.hw_len - 4;
  1611. + np->stats.rx_bytes += length;
  1612. +
  1613. +#ifdef ETHDEBUG
  1614. + printk("Got a packet of length %d:\n", length);
  1615. + /* dump the first bytes in the packet */
  1616. + skb_data_ptr = (unsigned char *)phys_to_virt(myNextRxDesc->descr.buf);
  1617. + for (i = 0; i < 8; i++) {
  1618. + printk("%d: %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", i * 8,
  1619. + skb_data_ptr[0],skb_data_ptr[1],skb_data_ptr[2],skb_data_ptr[3],
  1620. + skb_data_ptr[4],skb_data_ptr[5],skb_data_ptr[6],skb_data_ptr[7]);
  1621. + skb_data_ptr += 8;
  1622. + }
  1623. +#endif
  1624. +
  1625. + if (length < RX_COPYBREAK) {
  1626. + /* Small packet, copy data */
  1627. + skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
  1628. + if (!skb) {
  1629. + np->stats.rx_errors++;
  1630. + printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
  1631. + goto update_nextrxdesc;
  1632. + }
  1633. +
  1634. + skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */
  1635. + skb_data_ptr = skb_push(skb, ETHER_HEAD_LEN); /* allocate room for the header */
  1636. +
  1637. +#ifdef ETHDEBUG
  1638. + printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n",
  1639. + skb->head, skb->data, skb_tail_pointer(skb),
  1640. + skb_end_pointer(skb));
  1641. + printk("copying packet to 0x%x.\n", skb_data_ptr);
  1642. +#endif
  1643. +
  1644. + memcpy(skb_data_ptr, phys_to_virt(myNextRxDesc->descr.buf), length);
  1645. + }
  1646. + else {
  1647. + /* Large packet, send directly to upper layers and allocate new
  1648. + * memory (aligned to cache line boundary to avoid bug).
  1649. + * Before sending the skb to upper layers we must make sure
  1650. + * that skb->data points to the aligned start of the packet.
  1651. + */
  1652. + int align;
  1653. + struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
  1654. + if (!new_skb) {
  1655. + np->stats.rx_errors++;
  1656. + printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
  1657. + goto update_nextrxdesc;
  1658. + }
  1659. + skb = myNextRxDesc->skb;
  1660. + align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data;
  1661. + skb_put(skb, length + align);
  1662. + skb_pull(skb, align); /* Remove alignment bytes */
  1663. + myNextRxDesc->skb = new_skb;
  1664. + myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data));
  1665. + }
  1666. +
  1667. + skb->protocol = eth_type_trans(skb, dev);
  1668. +
  1669. + /* Send the packet to the upper layers */
  1670. + netif_rx(skb);
  1671. +
  1672. + update_nextrxdesc:
  1673. + /* Prepare for next packet */
  1674. + myNextRxDesc->descr.status = 0;
  1675. + prevRxDesc = myNextRxDesc;
  1676. + myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next);
  1677. +
  1678. + rx_queue_len++;
  1679. +
  1680. + /* Check if descriptors should be returned */
  1681. + if (rx_queue_len == RX_QUEUE_THRESHOLD) {
  1682. + flush_etrax_cache();
  1683. + prevRxDesc->descr.ctrl |= d_eol;
  1684. + myLastRxDesc->descr.ctrl &= ~d_eol;
  1685. + myLastRxDesc = prevRxDesc;
  1686. + rx_queue_len = 0;
  1687. + }
  1688. +}
  1689. +
  1690. +/* The inverse routine to net_open(). */
  1691. +static int
  1692. +e100_close(struct net_device *dev)
  1693. +{
  1694. + struct net_local *np = netdev_priv(dev);
  1695. +
  1696. + printk(KERN_INFO "Closing %s.\n", dev->name);
  1697. +
  1698. + netif_stop_queue(dev);
  1699. +
  1700. + *R_IRQ_MASK0_CLR =
  1701. + IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) |
  1702. + IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) |
  1703. + IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr);
  1704. +
  1705. + *R_IRQ_MASK2_CLR =
  1706. + IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) |
  1707. + IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
  1708. + IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) |
  1709. + IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
  1710. +
  1711. + /* Stop the receiver and the transmitter */
  1712. +
  1713. + RESET_DMA(NETWORK_TX_DMA_NBR);
  1714. + RESET_DMA(NETWORK_RX_DMA_NBR);
  1715. +
  1716. + /* Flush the Tx and disable Rx here. */
  1717. +
  1718. + free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev);
  1719. + free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
  1720. + free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
  1721. +
  1722. + cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
  1723. + cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
  1724. +
  1725. + /* Update the statistics here. */
  1726. +
  1727. + update_rx_stats(&np->stats);
  1728. + update_tx_stats(&np->stats);
  1729. +
  1730. + /* Stop speed/duplex timers */
  1731. + del_timer(&speed_timer);
  1732. + del_timer(&duplex_timer);
  1733. +
  1734. + return 0;
  1735. +}
  1736. +
  1737. +static int
  1738. +e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1739. +{
  1740. + struct mii_ioctl_data *data = if_mii(ifr);
  1741. + struct net_local *np = netdev_priv(dev);
  1742. + int rc = 0;
  1743. + int old_autoneg;
  1744. +
  1745. + spin_lock(&np->lock); /* Preempt protection */
  1746. + switch (cmd) {
  1747. + /* The ioctls below should be considered obsolete but are */
  1748. + /* still present for compatability with old scripts/apps */
  1749. + case SET_ETH_SPEED_10: /* 10 Mbps */
  1750. + e100_set_speed(dev, 10);
  1751. + break;
  1752. + case SET_ETH_SPEED_100: /* 100 Mbps */
  1753. + e100_set_speed(dev, 100);
  1754. + break;
  1755. + case SET_ETH_SPEED_AUTO: /* Auto-negotiate speed */
  1756. + e100_set_speed(dev, 0);
  1757. + break;
  1758. + case SET_ETH_DUPLEX_HALF: /* Half duplex */
  1759. + e100_set_duplex(dev, half);
  1760. + break;
  1761. + case SET_ETH_DUPLEX_FULL: /* Full duplex */
  1762. + e100_set_duplex(dev, full);
  1763. + break;
  1764. + case SET_ETH_DUPLEX_AUTO: /* Auto-negotiate duplex */
  1765. + e100_set_duplex(dev, autoneg);
  1766. + break;
  1767. + case SET_ETH_AUTONEG:
  1768. + old_autoneg = autoneg_normal;
  1769. + autoneg_normal = *(int*)data;
  1770. + if (autoneg_normal != old_autoneg)
  1771. + e100_negotiate(dev);
  1772. + break;
  1773. + default:
  1774. + rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr),
  1775. + cmd, NULL);
  1776. + break;
  1777. + }
  1778. + spin_unlock(&np->lock);
  1779. + return rc;
  1780. +}
  1781. +
  1782. +static int e100_get_settings(struct net_device *dev,
  1783. + struct ethtool_cmd *cmd)
  1784. +{
  1785. + struct net_local *np = netdev_priv(dev);
  1786. + int err;
  1787. +
  1788. + spin_lock_irq(&np->lock);
  1789. + err = mii_ethtool_gset(&np->mii_if, cmd);
  1790. + spin_unlock_irq(&np->lock);
  1791. +
  1792. + /* The PHY may support 1000baseT, but the Etrax100 does not. */
  1793. + cmd->supported &= ~(SUPPORTED_1000baseT_Half
  1794. + | SUPPORTED_1000baseT_Full);
  1795. + return err;
  1796. +}
  1797. +
  1798. +static int e100_set_settings(struct net_device *dev,
  1799. + struct ethtool_cmd *ecmd)
  1800. +{
  1801. + if (ecmd->autoneg == AUTONEG_ENABLE) {
  1802. + e100_set_duplex(dev, autoneg);
  1803. + e100_set_speed(dev, 0);
  1804. + } else {
  1805. + e100_set_duplex(dev, ecmd->duplex == DUPLEX_HALF ? half : full);
  1806. + e100_set_speed(dev, ecmd->speed == SPEED_10 ? 10: 100);
  1807. + }
  1808. +
  1809. + return 0;
  1810. +}
  1811. +
  1812. +static void e100_get_drvinfo(struct net_device *dev,
  1813. + struct ethtool_drvinfo *info)
  1814. +{
  1815. + strncpy(info->driver, "ETRAX 100LX", sizeof(info->driver) - 1);
  1816. + strncpy(info->version, "$Revision: 1.31 $", sizeof(info->version) - 1);
  1817. + strncpy(info->fw_version, "N/A", sizeof(info->fw_version) - 1);
  1818. + strncpy(info->bus_info, "N/A", sizeof(info->bus_info) - 1);
  1819. +}
  1820. +
  1821. +static int e100_nway_reset(struct net_device *dev)
  1822. +{
  1823. + if (current_duplex == autoneg && current_speed_selection == 0)
  1824. + e100_negotiate(dev);
  1825. + return 0;
  1826. +}
  1827. +
  1828. +static const struct ethtool_ops e100_ethtool_ops = {
  1829. + .get_settings = e100_get_settings,
  1830. + .set_settings = e100_set_settings,
  1831. + .get_drvinfo = e100_get_drvinfo,
  1832. + .nway_reset = e100_nway_reset,
  1833. + .get_link = ethtool_op_get_link,
  1834. +};
  1835. +
  1836. +static int
  1837. +e100_set_config(struct net_device *dev, struct ifmap *map)
  1838. +{
  1839. + struct net_local *np = netdev_priv(dev);
  1840. +
  1841. + spin_lock(&np->lock); /* Preempt protection */
  1842. +
  1843. + switch(map->port) {
  1844. + case IF_PORT_UNKNOWN:
  1845. + /* Use autoneg */
  1846. + e100_set_speed(dev, 0);
  1847. + e100_set_duplex(dev, autoneg);
  1848. + break;
  1849. + case IF_PORT_10BASET:
  1850. + e100_set_speed(dev, 10);
  1851. + e100_set_duplex(dev, autoneg);
  1852. + break;
  1853. + case IF_PORT_100BASET:
  1854. + case IF_PORT_100BASETX:
  1855. + e100_set_speed(dev, 100);
  1856. + e100_set_duplex(dev, autoneg);
  1857. + break;
  1858. + case IF_PORT_100BASEFX:
  1859. + case IF_PORT_10BASE2:
  1860. + case IF_PORT_AUI:
  1861. + spin_unlock(&np->lock);
  1862. + return -EOPNOTSUPP;
  1863. + break;
  1864. + default:
  1865. + printk(KERN_ERR "%s: Invalid media selected", dev->name);
  1866. + spin_unlock(&np->lock);
  1867. + return -EINVAL;
  1868. + }
  1869. + spin_unlock(&np->lock);
  1870. + return 0;
  1871. +}
  1872. +
  1873. +static void
  1874. +update_rx_stats(struct net_device_stats *es)
  1875. +{
  1876. + unsigned long r = *R_REC_COUNTERS;
  1877. + /* update stats relevant to reception errors */
  1878. + es->rx_fifo_errors += IO_EXTRACT(R_REC_COUNTERS, congestion, r);
  1879. + es->rx_crc_errors += IO_EXTRACT(R_REC_COUNTERS, crc_error, r);
  1880. + es->rx_frame_errors += IO_EXTRACT(R_REC_COUNTERS, alignment_error, r);
  1881. + es->rx_length_errors += IO_EXTRACT(R_REC_COUNTERS, oversize, r);
  1882. +}
  1883. +
  1884. +static void
  1885. +update_tx_stats(struct net_device_stats *es)
  1886. +{
  1887. + unsigned long r = *R_TR_COUNTERS;
  1888. + /* update stats relevant to transmission errors */
  1889. + es->collisions +=
  1890. + IO_EXTRACT(R_TR_COUNTERS, single_col, r) +
  1891. + IO_EXTRACT(R_TR_COUNTERS, multiple_col, r);
  1892. +}
  1893. +
  1894. +/*
  1895. + * Get the current statistics.
  1896. + * This may be called with the card open or closed.
  1897. + */
  1898. +static struct net_device_stats *
  1899. +e100_get_stats(struct net_device *dev)
  1900. +{
  1901. + struct net_local *lp = netdev_priv(dev);
  1902. + unsigned long flags;
  1903. +
  1904. + spin_lock_irqsave(&lp->lock, flags);
  1905. +
  1906. + update_rx_stats(&lp->stats);
  1907. + update_tx_stats(&lp->stats);
  1908. +
  1909. + spin_unlock_irqrestore(&lp->lock, flags);
  1910. + return &lp->stats;
  1911. +}
  1912. +
  1913. +/*
  1914. + * Set or clear the multicast filter for this adaptor.
  1915. + * num_addrs == -1 Promiscuous mode, receive all packets
  1916. + * num_addrs == 0 Normal mode, clear multicast list
  1917. + * num_addrs > 0 Multicast mode, receive normal and MC packets,
  1918. + * and do best-effort filtering.
  1919. + */
  1920. +static void
  1921. +set_multicast_list(struct net_device *dev)
  1922. +{
  1923. + struct net_local *lp = netdev_priv(dev);
  1924. + int num_addr = dev->mc_count;
  1925. + unsigned long int lo_bits;
  1926. + unsigned long int hi_bits;
  1927. +
  1928. + spin_lock(&lp->lock);
  1929. + if (dev->flags & IFF_PROMISC) {
  1930. + /* promiscuous mode */
  1931. + lo_bits = 0xfffffffful;
  1932. + hi_bits = 0xfffffffful;
  1933. +
  1934. + /* Enable individual receive */
  1935. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, receive);
  1936. + *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
  1937. + } else if (dev->flags & IFF_ALLMULTI) {
  1938. + /* enable all multicasts */
  1939. + lo_bits = 0xfffffffful;
  1940. + hi_bits = 0xfffffffful;
  1941. +
  1942. + /* Disable individual receive */
  1943. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
  1944. + *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
  1945. + } else if (num_addr == 0) {
  1946. + /* Normal, clear the mc list */
  1947. + lo_bits = 0x00000000ul;
  1948. + hi_bits = 0x00000000ul;
  1949. +
  1950. + /* Disable individual receive */
  1951. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
  1952. + *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
  1953. + } else {
  1954. + /* MC mode, receive normal and MC packets */
  1955. + char hash_ix;
  1956. + struct dev_mc_list *dmi = dev->mc_list;
  1957. + int i;
  1958. + char *baddr;
  1959. +
  1960. + lo_bits = 0x00000000ul;
  1961. + hi_bits = 0x00000000ul;
  1962. + for (i = 0; i < num_addr; i++) {
  1963. + /* Calculate the hash index for the GA registers */
  1964. +
  1965. + hash_ix = 0;
  1966. + baddr = dmi->dmi_addr;
  1967. + hash_ix ^= (*baddr) & 0x3f;
  1968. + hash_ix ^= ((*baddr) >> 6) & 0x03;
  1969. + ++baddr;
  1970. + hash_ix ^= ((*baddr) << 2) & 0x03c;
  1971. + hash_ix ^= ((*baddr) >> 4) & 0xf;
  1972. + ++baddr;
  1973. + hash_ix ^= ((*baddr) << 4) & 0x30;
  1974. + hash_ix ^= ((*baddr) >> 2) & 0x3f;
  1975. + ++baddr;
  1976. + hash_ix ^= (*baddr) & 0x3f;
  1977. + hash_ix ^= ((*baddr) >> 6) & 0x03;
  1978. + ++baddr;
  1979. + hash_ix ^= ((*baddr) << 2) & 0x03c;
  1980. + hash_ix ^= ((*baddr) >> 4) & 0xf;
  1981. + ++baddr;
  1982. + hash_ix ^= ((*baddr) << 4) & 0x30;
  1983. + hash_ix ^= ((*baddr) >> 2) & 0x3f;
  1984. +
  1985. + hash_ix &= 0x3f;
  1986. +
  1987. + if (hash_ix >= 32) {
  1988. + hi_bits |= (1 << (hash_ix-32));
  1989. + } else {
  1990. + lo_bits |= (1 << hash_ix);
  1991. + }
  1992. + dmi = dmi->next;
  1993. + }
  1994. + /* Disable individual receive */
  1995. + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
  1996. + *R_NETWORK_REC_CONFIG = network_rec_config_shadow;
  1997. + }
  1998. + *R_NETWORK_GA_0 = lo_bits;
  1999. + *R_NETWORK_GA_1 = hi_bits;
  2000. + spin_unlock(&lp->lock);
  2001. +}
  2002. +
  2003. +void
  2004. +e100_hardware_send_packet(struct net_local *np, char *buf, int length)
  2005. +{
  2006. + D(printk("e100 send pack, buf 0x%x len %d\n", buf, length));
  2007. +
  2008. + spin_lock(&np->led_lock);
  2009. + if (!led_active && time_after(jiffies, led_next_time)) {
  2010. + /* light the network leds depending on the current speed. */
  2011. + e100_set_network_leds(NETWORK_ACTIVITY);
  2012. +
  2013. + /* Set the earliest time we may clear the LED */
  2014. + led_next_time = jiffies + NET_FLASH_TIME;
  2015. + led_active = 1;
  2016. + mod_timer(&clear_led_timer, jiffies + HZ/10);
  2017. + }
  2018. + spin_unlock(&np->led_lock);
  2019. +
  2020. + /* configure the tx dma descriptor */
  2021. + myNextTxDesc->descr.sw_len = length;
  2022. + myNextTxDesc->descr.ctrl = d_eop | d_eol | d_wait;
  2023. + myNextTxDesc->descr.buf = virt_to_phys(buf);
  2024. +
  2025. + /* Move end of list */
  2026. + myLastTxDesc->descr.ctrl &= ~d_eol;
  2027. + myLastTxDesc = myNextTxDesc;
  2028. +
  2029. + /* Restart DMA channel */
  2030. + *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, restart);
  2031. +}
  2032. +
  2033. +static void
  2034. +e100_clear_network_leds(unsigned long dummy)
  2035. +{
  2036. + struct net_device *dev = (struct net_device *)dummy;
  2037. + struct net_local *np = netdev_priv(dev);
  2038. +
  2039. + spin_lock(&np->led_lock);
  2040. +
  2041. + if (led_active && time_after(jiffies, led_next_time)) {
  2042. + e100_set_network_leds(NO_NETWORK_ACTIVITY);
  2043. +
  2044. + /* Set the earliest time we may set the LED */
  2045. + led_next_time = jiffies + NET_FLASH_PAUSE;
  2046. + led_active = 0;
  2047. + }
  2048. +
  2049. + spin_unlock(&np->led_lock);
  2050. +}
  2051. +
  2052. +static void
  2053. +e100_set_network_leds(int active)
  2054. +{
  2055. +#if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK)
  2056. + int light_leds = (active == NO_NETWORK_ACTIVITY);
  2057. +#elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY)
  2058. + int light_leds = (active == NETWORK_ACTIVITY);
  2059. +#else
  2060. +#error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY"
  2061. +#endif
  2062. +
  2063. + if (!current_speed) {
  2064. + /* Make LED red, link is down */
  2065. +#if defined(CONFIG_ETRAX_NETWORK_RED_ON_NO_CONNECTION)
  2066. + CRIS_LED_NETWORK_SET(CRIS_LED_RED);
  2067. +#else
  2068. + CRIS_LED_NETWORK_SET(CRIS_LED_OFF);
  2069. +#endif
  2070. + } else if (light_leds) {
  2071. + if (current_speed == 10) {
  2072. + CRIS_LED_NETWORK_SET(CRIS_LED_ORANGE);
  2073. + } else {
  2074. + CRIS_LED_NETWORK_SET(CRIS_LED_GREEN);
  2075. + }
  2076. + } else {
  2077. + CRIS_LED_NETWORK_SET(CRIS_LED_OFF);
  2078. + }
  2079. +}
  2080. +
  2081. +#ifdef CONFIG_NET_POLL_CONTROLLER
  2082. +static void
  2083. +e100_netpoll(struct net_device* netdev)
  2084. +{
  2085. + e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
  2086. +}
  2087. +#endif
  2088. +
  2089. +static int
  2090. +etrax_init_module(void)
  2091. +{
  2092. + return etrax_ethernet_init();
  2093. +}
  2094. +
  2095. +static int __init
  2096. +e100_boot_setup(char* str)
  2097. +{
  2098. + struct sockaddr sa = {0};
  2099. + int i;
  2100. +
  2101. + /* Parse the colon separated Ethernet station address */
  2102. + for (i = 0; i < ETH_ALEN; i++) {
  2103. + unsigned int tmp;
  2104. + if (sscanf(str + 3*i, "%2x", &tmp) != 1) {
  2105. + printk(KERN_WARNING "Malformed station address");
  2106. + return 0;
  2107. + }
  2108. + sa.sa_data[i] = (char)tmp;
  2109. + }
  2110. +
  2111. + default_mac = sa;
  2112. + return 1;
  2113. +}
  2114. +
  2115. +__setup("etrax100_eth=", e100_boot_setup);
  2116. +
  2117. +module_init(etrax_init_module);
  2118. diff -Nur linux-2.6.32.orig/drivers/serial/crisv10.c linux-2.6.32/drivers/serial/crisv10.c
  2119. --- linux-2.6.32.orig/drivers/serial/crisv10.c 2009-12-03 04:51:21.000000000 +0100
  2120. +++ linux-2.6.32/drivers/serial/crisv10.c 2010-01-10 13:41:59.276309474 +0100
  2121. @@ -13,6 +13,7 @@
  2122. #include <linux/errno.h>
  2123. #include <linux/signal.h>
  2124. #include <linux/sched.h>
  2125. +#include <linux/smp_lock.h>
  2126. #include <linux/timer.h>
  2127. #include <linux/interrupt.h>
  2128. #include <linux/tty.h>
  2129. @@ -27,6 +28,7 @@
  2130. #include <linux/kernel.h>
  2131. #include <linux/mutex.h>
  2132. #include <linux/bitops.h>
  2133. +#include <linux/device.h>
  2134. #include <linux/seq_file.h>
  2135. #include <linux/delay.h>
  2136. #include <linux/module.h>
  2137. @@ -4415,6 +4417,7 @@
  2138. #endif
  2139. };
  2140. +static struct class *rs_class;
  2141. static int __init rs_init(void)
  2142. {
  2143. int i;
  2144. @@ -4548,6 +4551,24 @@
  2145. #endif
  2146. #endif /* CONFIG_SVINTO_SIM */
  2147. + rs_class = class_create(THIS_MODULE, "rs_tty");
  2148. +#ifdef CONFIG_ETRAX_SERIAL_PORT0
  2149. + device_create(rs_class, NULL,
  2150. + MKDEV(TTY_MAJOR, 64), NULL, "ttyS0");
  2151. +#endif
  2152. +#ifdef CONFIG_ETRAX_SERIAL_PORT1
  2153. + device_create(rs_class, NULL,
  2154. + MKDEV(TTY_MAJOR, 65), NULL, "ttyS1");
  2155. +#endif
  2156. +#ifdef CONFIG_ETRAX_SERIAL_PORT2
  2157. + device_create(rs_class, NULL,
  2158. + MKDEV(TTY_MAJOR, 66), NULL, "ttyS2");
  2159. +#endif
  2160. +#ifdef CONFIG_ETRAX_SERIAL_PORT3
  2161. + device_create(rs_class, NULL,
  2162. + MKDEV(TTY_MAJOR, 67), NULL, "ttyS3");
  2163. +#endif
  2164. +
  2165. return 0;
  2166. }
  2167. diff -Nur linux-2.6.32.orig/drivers/serial/crisv10.c.orig linux-2.6.32/drivers/serial/crisv10.c.orig
  2168. --- linux-2.6.32.orig/drivers/serial/crisv10.c.orig 1970-01-01 01:00:00.000000000 +0100
  2169. +++ linux-2.6.32/drivers/serial/crisv10.c.orig 2009-12-03 04:51:21.000000000 +0100
  2170. @@ -0,0 +1,4556 @@
  2171. +/*
  2172. + * Serial port driver for the ETRAX 100LX chip
  2173. + *
  2174. + * Copyright (C) 1998-2007 Axis Communications AB
  2175. + *
  2176. + * Many, many authors. Based once upon a time on serial.c for 16x50.
  2177. + *
  2178. + */
  2179. +
  2180. +static char *serial_version = "$Revision: 1.25 $";
  2181. +
  2182. +#include <linux/types.h>
  2183. +#include <linux/errno.h>
  2184. +#include <linux/signal.h>
  2185. +#include <linux/sched.h>
  2186. +#include <linux/timer.h>
  2187. +#include <linux/interrupt.h>
  2188. +#include <linux/tty.h>
  2189. +#include <linux/tty_flip.h>
  2190. +#include <linux/major.h>
  2191. +#include <linux/smp_lock.h>
  2192. +#include <linux/string.h>
  2193. +#include <linux/fcntl.h>
  2194. +#include <linux/mm.h>
  2195. +#include <linux/slab.h>
  2196. +#include <linux/init.h>
  2197. +#include <linux/kernel.h>
  2198. +#include <linux/mutex.h>
  2199. +#include <linux/bitops.h>
  2200. +#include <linux/seq_file.h>
  2201. +#include <linux/delay.h>
  2202. +#include <linux/module.h>
  2203. +#include <linux/uaccess.h>
  2204. +#include <linux/io.h>
  2205. +
  2206. +#include <asm/irq.h>
  2207. +#include <asm/dma.h>
  2208. +#include <asm/system.h>
  2209. +
  2210. +#include <arch/svinto.h>
  2211. +
  2212. +/* non-arch dependent serial structures are in linux/serial.h */
  2213. +#include <linux/serial.h>
  2214. +/* while we keep our own stuff (struct e100_serial) in a local .h file */
  2215. +#include "crisv10.h"
  2216. +#include <asm/fasttimer.h>
  2217. +#include <arch/io_interface_mux.h>
  2218. +
  2219. +#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
  2220. +#ifndef CONFIG_ETRAX_FAST_TIMER
  2221. +#error "Enable FAST_TIMER to use SERIAL_FAST_TIMER"
  2222. +#endif
  2223. +#endif
  2224. +
  2225. +#if defined(CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS) && \
  2226. + (CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS == 0)
  2227. +#error "RX_TIMEOUT_TICKS == 0 not allowed, use 1"
  2228. +#endif
  2229. +
  2230. +#if defined(CONFIG_ETRAX_RS485_ON_PA) && defined(CONFIG_ETRAX_RS485_ON_PORT_G)
  2231. +#error "Disable either CONFIG_ETRAX_RS485_ON_PA or CONFIG_ETRAX_RS485_ON_PORT_G"
  2232. +#endif
  2233. +
  2234. +/*
  2235. + * All of the compatibilty code so we can compile serial.c against
  2236. + * older kernels is hidden in serial_compat.h
  2237. + */
  2238. +#if defined(LOCAL_HEADERS)
  2239. +#include "serial_compat.h"
  2240. +#endif
  2241. +
  2242. +struct tty_driver *serial_driver;
  2243. +
  2244. +/* number of characters left in xmit buffer before we ask for more */
  2245. +#define WAKEUP_CHARS 256
  2246. +
  2247. +//#define SERIAL_DEBUG_INTR
  2248. +//#define SERIAL_DEBUG_OPEN
  2249. +//#define SERIAL_DEBUG_FLOW
  2250. +//#define SERIAL_DEBUG_DATA
  2251. +//#define SERIAL_DEBUG_THROTTLE
  2252. +//#define SERIAL_DEBUG_IO /* Debug for Extra control and status pins */
  2253. +//#define SERIAL_DEBUG_LINE 0 /* What serport we want to debug */
  2254. +
  2255. +/* Enable this to use serial interrupts to handle when you
  2256. + expect the first received event on the serial port to
  2257. + be an error, break or similar. Used to be able to flash IRMA
  2258. + from eLinux */
  2259. +#define SERIAL_HANDLE_EARLY_ERRORS
  2260. +
  2261. +/* Currently 16 descriptors x 128 bytes = 2048 bytes */
  2262. +#define SERIAL_DESCR_BUF_SIZE 256
  2263. +
  2264. +#define SERIAL_PRESCALE_BASE 3125000 /* 3.125MHz */
  2265. +#define DEF_BAUD_BASE SERIAL_PRESCALE_BASE
  2266. +
  2267. +/* We don't want to load the system with massive fast timer interrupt
  2268. + * on high baudrates so limit it to 250 us (4kHz) */
  2269. +#define MIN_FLUSH_TIME_USEC 250
  2270. +
  2271. +/* Add an x here to log a lot of timer stuff */
  2272. +#define TIMERD(x)
  2273. +/* Debug details of interrupt handling */
  2274. +#define DINTR1(x) /* irq on/off, errors */
  2275. +#define DINTR2(x) /* tx and rx */
  2276. +/* Debug flip buffer stuff */
  2277. +#define DFLIP(x)
  2278. +/* Debug flow control and overview of data flow */
  2279. +#define DFLOW(x)
  2280. +#define DBAUD(x)
  2281. +#define DLOG_INT_TRIG(x)
  2282. +
  2283. +//#define DEBUG_LOG_INCLUDED
  2284. +#ifndef DEBUG_LOG_INCLUDED
  2285. +#define DEBUG_LOG(line, string, value)
  2286. +#else
  2287. +struct debug_log_info
  2288. +{
  2289. + unsigned long time;
  2290. + unsigned long timer_data;
  2291. +// int line;
  2292. + const char *string;
  2293. + int value;
  2294. +};
  2295. +#define DEBUG_LOG_SIZE 4096
  2296. +
  2297. +struct debug_log_info debug_log[DEBUG_LOG_SIZE];
  2298. +int debug_log_pos = 0;
  2299. +
  2300. +#define DEBUG_LOG(_line, _string, _value) do { \
  2301. + if ((_line) == SERIAL_DEBUG_LINE) {\
  2302. + debug_log_func(_line, _string, _value); \
  2303. + }\
  2304. +}while(0)
  2305. +
  2306. +void debug_log_func(int line, const char *string, int value)
  2307. +{
  2308. + if (debug_log_pos < DEBUG_LOG_SIZE) {
  2309. + debug_log[debug_log_pos].time = jiffies;
  2310. + debug_log[debug_log_pos].timer_data = *R_TIMER_DATA;
  2311. +// debug_log[debug_log_pos].line = line;
  2312. + debug_log[debug_log_pos].string = string;
  2313. + debug_log[debug_log_pos].value = value;
  2314. + debug_log_pos++;
  2315. + }
  2316. + /*printk(string, value);*/
  2317. +}
  2318. +#endif
  2319. +
  2320. +#ifndef CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS
  2321. +/* Default number of timer ticks before flushing rx fifo
  2322. + * When using "little data, low latency applications: use 0
  2323. + * When using "much data applications (PPP)" use ~5
  2324. + */
  2325. +#define CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS 5
  2326. +#endif
  2327. +
  2328. +unsigned long timer_data_to_ns(unsigned long timer_data);
  2329. +
  2330. +static void change_speed(struct e100_serial *info);
  2331. +static void rs_throttle(struct tty_struct * tty);
  2332. +static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
  2333. +static int rs_write(struct tty_struct *tty,
  2334. + const unsigned char *buf, int count);
  2335. +#ifdef CONFIG_ETRAX_RS485
  2336. +static int e100_write_rs485(struct tty_struct *tty,
  2337. + const unsigned char *buf, int count);
  2338. +#endif
  2339. +static int get_lsr_info(struct e100_serial *info, unsigned int *value);
  2340. +
  2341. +
  2342. +#define DEF_BAUD 115200 /* 115.2 kbit/s */
  2343. +#define STD_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
  2344. +#define DEF_RX 0x20 /* or SERIAL_CTRL_W >> 8 */
  2345. +/* Default value of tx_ctrl register: has txd(bit 7)=1 (idle) as default */
  2346. +#define DEF_TX 0x80 /* or SERIAL_CTRL_B */
  2347. +
  2348. +/* offsets from R_SERIALx_CTRL */
  2349. +
  2350. +#define REG_DATA 0
  2351. +#define REG_DATA_STATUS32 0 /* this is the 32 bit register R_SERIALx_READ */
  2352. +#define REG_TR_DATA 0
  2353. +#define REG_STATUS 1
  2354. +#define REG_TR_CTRL 1
  2355. +#define REG_REC_CTRL 2
  2356. +#define REG_BAUD 3
  2357. +#define REG_XOFF 4 /* this is a 32 bit register */
  2358. +
  2359. +/* The bitfields are the same for all serial ports */
  2360. +#define SER_RXD_MASK IO_MASK(R_SERIAL0_STATUS, rxd)
  2361. +#define SER_DATA_AVAIL_MASK IO_MASK(R_SERIAL0_STATUS, data_avail)
  2362. +#define SER_FRAMING_ERR_MASK IO_MASK(R_SERIAL0_STATUS, framing_err)
  2363. +#define SER_PAR_ERR_MASK IO_MASK(R_SERIAL0_STATUS, par_err)
  2364. +#define SER_OVERRUN_MASK IO_MASK(R_SERIAL0_STATUS, overrun)
  2365. +
  2366. +#define SER_ERROR_MASK (SER_OVERRUN_MASK | SER_PAR_ERR_MASK | SER_FRAMING_ERR_MASK)
  2367. +
  2368. +/* Values for info->errorcode */
  2369. +#define ERRCODE_SET_BREAK (TTY_BREAK)
  2370. +#define ERRCODE_INSERT 0x100
  2371. +#define ERRCODE_INSERT_BREAK (ERRCODE_INSERT | TTY_BREAK)
  2372. +
  2373. +#define FORCE_EOP(info) *R_SET_EOP = 1U << info->iseteop;
  2374. +
  2375. +/*
  2376. + * General note regarding the use of IO_* macros in this file:
  2377. + *
  2378. + * We will use the bits defined for DMA channel 6 when using various
  2379. + * IO_* macros (e.g. IO_STATE, IO_MASK, IO_EXTRACT) and _assume_ they are
  2380. + * the same for all channels (which of course they are).
  2381. + *
  2382. + * We will also use the bits defined for serial port 0 when writing commands
  2383. + * to the different ports, as these bits too are the same for all ports.
  2384. + */
  2385. +
  2386. +
  2387. +/* Mask for the irqs possibly enabled in R_IRQ_MASK1_RD etc. */
  2388. +static const unsigned long e100_ser_int_mask = 0
  2389. +#ifdef CONFIG_ETRAX_SERIAL_PORT0
  2390. +| IO_MASK(R_IRQ_MASK1_RD, ser0_data) | IO_MASK(R_IRQ_MASK1_RD, ser0_ready)
  2391. +#endif
  2392. +#ifdef CONFIG_ETRAX_SERIAL_PORT1
  2393. +| IO_MASK(R_IRQ_MASK1_RD, ser1_data) | IO_MASK(R_IRQ_MASK1_RD, ser1_ready)
  2394. +#endif
  2395. +#ifdef CONFIG_ETRAX_SERIAL_PORT2
  2396. +| IO_MASK(R_IRQ_MASK1_RD, ser2_data) | IO_MASK(R_IRQ_MASK1_RD, ser2_ready)
  2397. +#endif
  2398. +#ifdef CONFIG_ETRAX_SERIAL_PORT3
  2399. +| IO_MASK(R_IRQ_MASK1_RD, ser3_data) | IO_MASK(R_IRQ_MASK1_RD, ser3_ready)
  2400. +#endif
  2401. +;
  2402. +unsigned long r_alt_ser_baudrate_shadow = 0;
  2403. +
  2404. +/* this is the data for the four serial ports in the etrax100 */
  2405. +/* DMA2(ser2), DMA4(ser3), DMA6(ser0) or DMA8(ser1) */
  2406. +/* R_DMA_CHx_CLR_INTR, R_DMA_CHx_FIRST, R_DMA_CHx_CMD */
  2407. +
  2408. +static struct e100_serial rs_table[] = {
  2409. + { .baud = DEF_BAUD,
  2410. + .ioport = (unsigned char *)R_SERIAL0_CTRL,
  2411. + .irq = 1U << 12, /* uses DMA 6 and 7 */
  2412. + .oclrintradr = R_DMA_CH6_CLR_INTR,
  2413. + .ofirstadr = R_DMA_CH6_FIRST,
  2414. + .ocmdadr = R_DMA_CH6_CMD,
  2415. + .ostatusadr = R_DMA_CH6_STATUS,
  2416. + .iclrintradr = R_DMA_CH7_CLR_INTR,
  2417. + .ifirstadr = R_DMA_CH7_FIRST,
  2418. + .icmdadr = R_DMA_CH7_CMD,
  2419. + .idescradr = R_DMA_CH7_DESCR,
  2420. + .flags = STD_FLAGS,
  2421. + .rx_ctrl = DEF_RX,
  2422. + .tx_ctrl = DEF_TX,
  2423. + .iseteop = 2,
  2424. + .dma_owner = dma_ser0,
  2425. + .io_if = if_serial_0,
  2426. +#ifdef CONFIG_ETRAX_SERIAL_PORT0
  2427. + .enabled = 1,
  2428. +#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
  2429. + .dma_out_enabled = 1,
  2430. + .dma_out_nbr = SER0_TX_DMA_NBR,
  2431. + .dma_out_irq_nbr = SER0_DMA_TX_IRQ_NBR,
  2432. + .dma_out_irq_flags = IRQF_DISABLED,
  2433. + .dma_out_irq_description = "serial 0 dma tr",
  2434. +#else
  2435. + .dma_out_enabled = 0,
  2436. + .dma_out_nbr = UINT_MAX,
  2437. + .dma_out_irq_nbr = 0,
  2438. + .dma_out_irq_flags = 0,
  2439. + .dma_out_irq_description = NULL,
  2440. +#endif
  2441. +#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
  2442. + .dma_in_enabled = 1,
  2443. + .dma_in_nbr = SER0_RX_DMA_NBR,
  2444. + .dma_in_irq_nbr = SER0_DMA_RX_IRQ_NBR,
  2445. + .dma_in_irq_flags = IRQF_DISABLED,
  2446. + .dma_in_irq_description = "serial 0 dma rec",
  2447. +#else
  2448. + .dma_in_enabled = 0,
  2449. + .dma_in_nbr = UINT_MAX,
  2450. + .dma_in_irq_nbr = 0,
  2451. + .dma_in_irq_flags = 0,
  2452. + .dma_in_irq_description = NULL,
  2453. +#endif
  2454. +#else
  2455. + .enabled = 0,
  2456. + .io_if_description = NULL,
  2457. + .dma_out_enabled = 0,
  2458. + .dma_in_enabled = 0
  2459. +#endif
  2460. +
  2461. +}, /* ttyS0 */
  2462. +#ifndef CONFIG_SVINTO_SIM
  2463. + { .baud = DEF_BAUD,
  2464. + .ioport = (unsigned char *)R_SERIAL1_CTRL,
  2465. + .irq = 1U << 16, /* uses DMA 8 and 9 */
  2466. + .oclrintradr = R_DMA_CH8_CLR_INTR,
  2467. + .ofirstadr = R_DMA_CH8_FIRST,
  2468. + .ocmdadr = R_DMA_CH8_CMD,
  2469. + .ostatusadr = R_DMA_CH8_STATUS,
  2470. + .iclrintradr = R_DMA_CH9_CLR_INTR,
  2471. + .ifirstadr = R_DMA_CH9_FIRST,
  2472. + .icmdadr = R_DMA_CH9_CMD,
  2473. + .idescradr = R_DMA_CH9_DESCR,
  2474. + .flags = STD_FLAGS,
  2475. + .rx_ctrl = DEF_RX,
  2476. + .tx_ctrl = DEF_TX,
  2477. + .iseteop = 3,
  2478. + .dma_owner = dma_ser1,
  2479. + .io_if = if_serial_1,
  2480. +#ifdef CONFIG_ETRAX_SERIAL_PORT1
  2481. + .enabled = 1,
  2482. + .io_if_description = "ser1",
  2483. +#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT
  2484. + .dma_out_enabled = 1,
  2485. + .dma_out_nbr = SER1_TX_DMA_NBR,
  2486. + .dma_out_irq_nbr = SER1_DMA_TX_IRQ_NBR,
  2487. + .dma_out_irq_flags = IRQF_DISABLED,
  2488. + .dma_out_irq_description = "serial 1 dma tr",
  2489. +#else
  2490. + .dma_out_enabled = 0,
  2491. + .dma_out_nbr = UINT_MAX,
  2492. + .dma_out_irq_nbr = 0,
  2493. + .dma_out_irq_flags = 0,
  2494. + .dma_out_irq_description = NULL,
  2495. +#endif
  2496. +#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN
  2497. + .dma_in_enabled = 1,
  2498. + .dma_in_nbr = SER1_RX_DMA_NBR,
  2499. + .dma_in_irq_nbr = SER1_DMA_RX_IRQ_NBR,
  2500. + .dma_in_irq_flags = IRQF_DISABLED,
  2501. + .dma_in_irq_description = "serial 1 dma rec",
  2502. +#else
  2503. + .dma_in_enabled = 0,
  2504. + .dma_in_enabled = 0,
  2505. + .dma_in_nbr = UINT_MAX,
  2506. + .dma_in_irq_nbr = 0,
  2507. + .dma_in_irq_flags = 0,
  2508. + .dma_in_irq_description = NULL,
  2509. +#endif
  2510. +#else
  2511. + .enabled = 0,
  2512. + .io_if_description = NULL,
  2513. + .dma_in_irq_nbr = 0,
  2514. + .dma_out_enabled = 0,
  2515. + .dma_in_enabled = 0
  2516. +#endif
  2517. +}, /* ttyS1 */
  2518. +
  2519. + { .baud = DEF_BAUD,
  2520. + .ioport = (unsigned char *)R_SERIAL2_CTRL,
  2521. + .irq = 1U << 4, /* uses DMA 2 and 3 */
  2522. + .oclrintradr = R_DMA_CH2_CLR_INTR,
  2523. + .ofirstadr = R_DMA_CH2_FIRST,
  2524. + .ocmdadr = R_DMA_CH2_CMD,
  2525. + .ostatusadr = R_DMA_CH2_STATUS,
  2526. + .iclrintradr = R_DMA_CH3_CLR_INTR,
  2527. + .ifirstadr = R_DMA_CH3_FIRST,
  2528. + .icmdadr = R_DMA_CH3_CMD,
  2529. + .idescradr = R_DMA_CH3_DESCR,
  2530. + .flags = STD_FLAGS,
  2531. + .rx_ctrl = DEF_RX,
  2532. + .tx_ctrl = DEF_TX,
  2533. + .iseteop = 0,
  2534. + .dma_owner = dma_ser2,
  2535. + .io_if = if_serial_2,
  2536. +#ifdef CONFIG_ETRAX_SERIAL_PORT2
  2537. + .enabled = 1,
  2538. + .io_if_description = "ser2",
  2539. +#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
  2540. + .dma_out_enabled = 1,
  2541. + .dma_out_nbr = SER2_TX_DMA_NBR,
  2542. + .dma_out_irq_nbr = SER2_DMA_TX_IRQ_NBR,
  2543. + .dma_out_irq_flags = IRQF_DISABLED,
  2544. + .dma_out_irq_description = "serial 2 dma tr",
  2545. +#else
  2546. + .dma_out_enabled = 0,
  2547. + .dma_out_nbr = UINT_MAX,
  2548. + .dma_out_irq_nbr = 0,
  2549. + .dma_out_irq_flags = 0,
  2550. + .dma_out_irq_description = NULL,
  2551. +#endif
  2552. +#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
  2553. + .dma_in_enabled = 1,
  2554. + .dma_in_nbr = SER2_RX_DMA_NBR,
  2555. + .dma_in_irq_nbr = SER2_DMA_RX_IRQ_NBR,
  2556. + .dma_in_irq_flags = IRQF_DISABLED,
  2557. + .dma_in_irq_description = "serial 2 dma rec",
  2558. +#else
  2559. + .dma_in_enabled = 0,
  2560. + .dma_in_nbr = UINT_MAX,
  2561. + .dma_in_irq_nbr = 0,
  2562. + .dma_in_irq_flags = 0,
  2563. + .dma_in_irq_description = NULL,
  2564. +#endif
  2565. +#else
  2566. + .enabled = 0,
  2567. + .io_if_description = NULL,
  2568. + .dma_out_enabled = 0,
  2569. + .dma_in_enabled = 0
  2570. +#endif
  2571. + }, /* ttyS2 */
  2572. +
  2573. + { .baud = DEF_BAUD,
  2574. + .ioport = (unsigned char *)R_SERIAL3_CTRL,
  2575. + .irq = 1U << 8, /* uses DMA 4 and 5 */
  2576. + .oclrintradr = R_DMA_CH4_CLR_INTR,
  2577. + .ofirstadr = R_DMA_CH4_FIRST,
  2578. + .ocmdadr = R_DMA_CH4_CMD,
  2579. + .ostatusadr = R_DMA_CH4_STATUS,
  2580. + .iclrintradr = R_DMA_CH5_CLR_INTR,
  2581. + .ifirstadr = R_DMA_CH5_FIRST,
  2582. + .icmdadr = R_DMA_CH5_CMD,
  2583. + .idescradr = R_DMA_CH5_DESCR,
  2584. + .flags = STD_FLAGS,
  2585. + .rx_ctrl = DEF_RX,
  2586. + .tx_ctrl = DEF_TX,
  2587. + .iseteop = 1,
  2588. + .dma_owner = dma_ser3,
  2589. + .io_if = if_serial_3,
  2590. +#ifdef CONFIG_ETRAX_SERIAL_PORT3
  2591. + .enabled = 1,
  2592. + .io_if_description = "ser3",
  2593. +#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT
  2594. + .dma_out_enabled = 1,
  2595. + .dma_out_nbr = SER3_TX_DMA_NBR,
  2596. + .dma_out_irq_nbr = SER3_DMA_TX_IRQ_NBR,
  2597. + .dma_out_irq_flags = IRQF_DISABLED,
  2598. + .dma_out_irq_description = "serial 3 dma tr",
  2599. +#else
  2600. + .dma_out_enabled = 0,
  2601. + .dma_out_nbr = UINT_MAX,
  2602. + .dma_out_irq_nbr = 0,
  2603. + .dma_out_irq_flags = 0,
  2604. + .dma_out_irq_description = NULL,
  2605. +#endif
  2606. +#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN
  2607. + .dma_in_enabled = 1,
  2608. + .dma_in_nbr = SER3_RX_DMA_NBR,
  2609. + .dma_in_irq_nbr = SER3_DMA_RX_IRQ_NBR,
  2610. + .dma_in_irq_flags = IRQF_DISABLED,
  2611. + .dma_in_irq_description = "serial 3 dma rec",
  2612. +#else
  2613. + .dma_in_enabled = 0,
  2614. + .dma_in_nbr = UINT_MAX,
  2615. + .dma_in_irq_nbr = 0,
  2616. + .dma_in_irq_flags = 0,
  2617. + .dma_in_irq_description = NULL
  2618. +#endif
  2619. +#else
  2620. + .enabled = 0,
  2621. + .io_if_description = NULL,
  2622. + .dma_out_enabled = 0,
  2623. + .dma_in_enabled = 0
  2624. +#endif
  2625. + } /* ttyS3 */
  2626. +#endif
  2627. +};
  2628. +
  2629. +
  2630. +#define NR_PORTS (sizeof(rs_table)/sizeof(struct e100_serial))
  2631. +
  2632. +#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
  2633. +static struct fast_timer fast_timers[NR_PORTS];
  2634. +#endif
  2635. +
  2636. +#ifdef CONFIG_ETRAX_SERIAL_PROC_ENTRY
  2637. +#define PROCSTAT(x) x
  2638. +struct ser_statistics_type {
  2639. + int overrun_cnt;
  2640. + int early_errors_cnt;
  2641. + int ser_ints_ok_cnt;
  2642. + int errors_cnt;
  2643. + unsigned long int processing_flip;
  2644. + unsigned long processing_flip_still_room;
  2645. + unsigned long int timeout_flush_cnt;
  2646. + int rx_dma_ints;
  2647. + int tx_dma_ints;
  2648. + int rx_tot;
  2649. + int tx_tot;
  2650. +};
  2651. +
  2652. +static struct ser_statistics_type ser_stat[NR_PORTS];
  2653. +
  2654. +#else
  2655. +
  2656. +#define PROCSTAT(x)
  2657. +
  2658. +#endif /* CONFIG_ETRAX_SERIAL_PROC_ENTRY */
  2659. +
  2660. +/* RS-485 */
  2661. +#if defined(CONFIG_ETRAX_RS485)
  2662. +#ifdef CONFIG_ETRAX_FAST_TIMER
  2663. +static struct fast_timer fast_timers_rs485[NR_PORTS];
  2664. +#endif
  2665. +#if defined(CONFIG_ETRAX_RS485_ON_PA)
  2666. +static int rs485_pa_bit = CONFIG_ETRAX_RS485_ON_PA_BIT;
  2667. +#endif
  2668. +#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
  2669. +static int rs485_port_g_bit = CONFIG_ETRAX_RS485_ON_PORT_G_BIT;
  2670. +#endif
  2671. +#endif
  2672. +
  2673. +/* Info and macros needed for each ports extra control/status signals. */
  2674. +#define E100_STRUCT_PORT(line, pinname) \
  2675. + ((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \
  2676. + (R_PORT_PA_DATA): ( \
  2677. + (CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \
  2678. + (R_PORT_PB_DATA):&dummy_ser[line]))
  2679. +
  2680. +#define E100_STRUCT_SHADOW(line, pinname) \
  2681. + ((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \
  2682. + (&port_pa_data_shadow): ( \
  2683. + (CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \
  2684. + (&port_pb_data_shadow):&dummy_ser[line]))
  2685. +#define E100_STRUCT_MASK(line, pinname) \
  2686. + ((CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT >= 0)? \
  2687. + (1<<CONFIG_ETRAX_SER##line##_##pinname##_ON_PA_BIT): ( \
  2688. + (CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT >= 0)? \
  2689. + (1<<CONFIG_ETRAX_SER##line##_##pinname##_ON_PB_BIT):DUMMY_##pinname##_MASK))
  2690. +
  2691. +#define DUMMY_DTR_MASK 1
  2692. +#define DUMMY_RI_MASK 2
  2693. +#define DUMMY_DSR_MASK 4
  2694. +#define DUMMY_CD_MASK 8
  2695. +static unsigned char dummy_ser[NR_PORTS] = {0xFF, 0xFF, 0xFF,0xFF};
  2696. +
  2697. +/* If not all status pins are used or disabled, use mixed mode */
  2698. +#ifdef CONFIG_ETRAX_SERIAL_PORT0
  2699. +
  2700. +#define SER0_PA_BITSUM (CONFIG_ETRAX_SER0_DTR_ON_PA_BIT+CONFIG_ETRAX_SER0_RI_ON_PA_BIT+CONFIG_ETRAX_SER0_DSR_ON_PA_BIT+CONFIG_ETRAX_SER0_CD_ON_PA_BIT)
  2701. +
  2702. +#if SER0_PA_BITSUM != -4
  2703. +# if CONFIG_ETRAX_SER0_DTR_ON_PA_BIT == -1
  2704. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  2705. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  2706. +# endif
  2707. +# endif
  2708. +# if CONFIG_ETRAX_SER0_RI_ON_PA_BIT == -1
  2709. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  2710. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  2711. +# endif
  2712. +# endif
  2713. +# if CONFIG_ETRAX_SER0_DSR_ON_PA_BIT == -1
  2714. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  2715. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  2716. +# endif
  2717. +# endif
  2718. +# if CONFIG_ETRAX_SER0_CD_ON_PA_BIT == -1
  2719. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  2720. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  2721. +# endif
  2722. +# endif
  2723. +#endif
  2724. +
  2725. +#define SER0_PB_BITSUM (CONFIG_ETRAX_SER0_DTR_ON_PB_BIT+CONFIG_ETRAX_SER0_RI_ON_PB_BIT+CONFIG_ETRAX_SER0_DSR_ON_PB_BIT+CONFIG_ETRAX_SER0_CD_ON_PB_BIT)
  2726. +
  2727. +#if SER0_PB_BITSUM != -4
  2728. +# if CONFIG_ETRAX_SER0_DTR_ON_PB_BIT == -1
  2729. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  2730. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  2731. +# endif
  2732. +# endif
  2733. +# if CONFIG_ETRAX_SER0_RI_ON_PB_BIT == -1
  2734. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  2735. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  2736. +# endif
  2737. +# endif
  2738. +# if CONFIG_ETRAX_SER0_DSR_ON_PB_BIT == -1
  2739. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  2740. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  2741. +# endif
  2742. +# endif
  2743. +# if CONFIG_ETRAX_SER0_CD_ON_PB_BIT == -1
  2744. +# ifndef CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED
  2745. +# define CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED 1
  2746. +# endif
  2747. +# endif
  2748. +#endif
  2749. +
  2750. +#endif /* PORT0 */
  2751. +
  2752. +
  2753. +#ifdef CONFIG_ETRAX_SERIAL_PORT1
  2754. +
  2755. +#define SER1_PA_BITSUM (CONFIG_ETRAX_SER1_DTR_ON_PA_BIT+CONFIG_ETRAX_SER1_RI_ON_PA_BIT+CONFIG_ETRAX_SER1_DSR_ON_PA_BIT+CONFIG_ETRAX_SER1_CD_ON_PA_BIT)
  2756. +
  2757. +#if SER1_PA_BITSUM != -4
  2758. +# if CONFIG_ETRAX_SER1_DTR_ON_PA_BIT == -1
  2759. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  2760. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  2761. +# endif
  2762. +# endif
  2763. +# if CONFIG_ETRAX_SER1_RI_ON_PA_BIT == -1
  2764. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  2765. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  2766. +# endif
  2767. +# endif
  2768. +# if CONFIG_ETRAX_SER1_DSR_ON_PA_BIT == -1
  2769. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  2770. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  2771. +# endif
  2772. +# endif
  2773. +# if CONFIG_ETRAX_SER1_CD_ON_PA_BIT == -1
  2774. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  2775. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  2776. +# endif
  2777. +# endif
  2778. +#endif
  2779. +
  2780. +#define SER1_PB_BITSUM (CONFIG_ETRAX_SER1_DTR_ON_PB_BIT+CONFIG_ETRAX_SER1_RI_ON_PB_BIT+CONFIG_ETRAX_SER1_DSR_ON_PB_BIT+CONFIG_ETRAX_SER1_CD_ON_PB_BIT)
  2781. +
  2782. +#if SER1_PB_BITSUM != -4
  2783. +# if CONFIG_ETRAX_SER1_DTR_ON_PB_BIT == -1
  2784. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  2785. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  2786. +# endif
  2787. +# endif
  2788. +# if CONFIG_ETRAX_SER1_RI_ON_PB_BIT == -1
  2789. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  2790. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  2791. +# endif
  2792. +# endif
  2793. +# if CONFIG_ETRAX_SER1_DSR_ON_PB_BIT == -1
  2794. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  2795. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  2796. +# endif
  2797. +# endif
  2798. +# if CONFIG_ETRAX_SER1_CD_ON_PB_BIT == -1
  2799. +# ifndef CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED
  2800. +# define CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED 1
  2801. +# endif
  2802. +# endif
  2803. +#endif
  2804. +
  2805. +#endif /* PORT1 */
  2806. +
  2807. +#ifdef CONFIG_ETRAX_SERIAL_PORT2
  2808. +
  2809. +#define SER2_PA_BITSUM (CONFIG_ETRAX_SER2_DTR_ON_PA_BIT+CONFIG_ETRAX_SER2_RI_ON_PA_BIT+CONFIG_ETRAX_SER2_DSR_ON_PA_BIT+CONFIG_ETRAX_SER2_CD_ON_PA_BIT)
  2810. +
  2811. +#if SER2_PA_BITSUM != -4
  2812. +# if CONFIG_ETRAX_SER2_DTR_ON_PA_BIT == -1
  2813. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  2814. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  2815. +# endif
  2816. +# endif
  2817. +# if CONFIG_ETRAX_SER2_RI_ON_PA_BIT == -1
  2818. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  2819. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  2820. +# endif
  2821. +# endif
  2822. +# if CONFIG_ETRAX_SER2_DSR_ON_PA_BIT == -1
  2823. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  2824. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  2825. +# endif
  2826. +# endif
  2827. +# if CONFIG_ETRAX_SER2_CD_ON_PA_BIT == -1
  2828. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  2829. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  2830. +# endif
  2831. +# endif
  2832. +#endif
  2833. +
  2834. +#define SER2_PB_BITSUM (CONFIG_ETRAX_SER2_DTR_ON_PB_BIT+CONFIG_ETRAX_SER2_RI_ON_PB_BIT+CONFIG_ETRAX_SER2_DSR_ON_PB_BIT+CONFIG_ETRAX_SER2_CD_ON_PB_BIT)
  2835. +
  2836. +#if SER2_PB_BITSUM != -4
  2837. +# if CONFIG_ETRAX_SER2_DTR_ON_PB_BIT == -1
  2838. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  2839. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  2840. +# endif
  2841. +# endif
  2842. +# if CONFIG_ETRAX_SER2_RI_ON_PB_BIT == -1
  2843. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  2844. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  2845. +# endif
  2846. +# endif
  2847. +# if CONFIG_ETRAX_SER2_DSR_ON_PB_BIT == -1
  2848. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  2849. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  2850. +# endif
  2851. +# endif
  2852. +# if CONFIG_ETRAX_SER2_CD_ON_PB_BIT == -1
  2853. +# ifndef CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED
  2854. +# define CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED 1
  2855. +# endif
  2856. +# endif
  2857. +#endif
  2858. +
  2859. +#endif /* PORT2 */
  2860. +
  2861. +#ifdef CONFIG_ETRAX_SERIAL_PORT3
  2862. +
  2863. +#define SER3_PA_BITSUM (CONFIG_ETRAX_SER3_DTR_ON_PA_BIT+CONFIG_ETRAX_SER3_RI_ON_PA_BIT+CONFIG_ETRAX_SER3_DSR_ON_PA_BIT+CONFIG_ETRAX_SER3_CD_ON_PA_BIT)
  2864. +
  2865. +#if SER3_PA_BITSUM != -4
  2866. +# if CONFIG_ETRAX_SER3_DTR_ON_PA_BIT == -1
  2867. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  2868. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  2869. +# endif
  2870. +# endif
  2871. +# if CONFIG_ETRAX_SER3_RI_ON_PA_BIT == -1
  2872. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  2873. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  2874. +# endif
  2875. +# endif
  2876. +# if CONFIG_ETRAX_SER3_DSR_ON_PA_BIT == -1
  2877. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  2878. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  2879. +# endif
  2880. +# endif
  2881. +# if CONFIG_ETRAX_SER3_CD_ON_PA_BIT == -1
  2882. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  2883. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  2884. +# endif
  2885. +# endif
  2886. +#endif
  2887. +
  2888. +#define SER3_PB_BITSUM (CONFIG_ETRAX_SER3_DTR_ON_PB_BIT+CONFIG_ETRAX_SER3_RI_ON_PB_BIT+CONFIG_ETRAX_SER3_DSR_ON_PB_BIT+CONFIG_ETRAX_SER3_CD_ON_PB_BIT)
  2889. +
  2890. +#if SER3_PB_BITSUM != -4
  2891. +# if CONFIG_ETRAX_SER3_DTR_ON_PB_BIT == -1
  2892. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  2893. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  2894. +# endif
  2895. +# endif
  2896. +# if CONFIG_ETRAX_SER3_RI_ON_PB_BIT == -1
  2897. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  2898. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  2899. +# endif
  2900. +# endif
  2901. +# if CONFIG_ETRAX_SER3_DSR_ON_PB_BIT == -1
  2902. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  2903. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  2904. +# endif
  2905. +# endif
  2906. +# if CONFIG_ETRAX_SER3_CD_ON_PB_BIT == -1
  2907. +# ifndef CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED
  2908. +# define CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED 1
  2909. +# endif
  2910. +# endif
  2911. +#endif
  2912. +
  2913. +#endif /* PORT3 */
  2914. +
  2915. +
  2916. +#if defined(CONFIG_ETRAX_SER0_DTR_RI_DSR_CD_MIXED) || \
  2917. + defined(CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED) || \
  2918. + defined(CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED) || \
  2919. + defined(CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED)
  2920. +#define CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED
  2921. +#endif
  2922. +
  2923. +#ifdef CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED
  2924. +/* The pins can be mixed on PA and PB */
  2925. +#define CONTROL_PINS_PORT_NOT_USED(line) \
  2926. + &dummy_ser[line], &dummy_ser[line], \
  2927. + &dummy_ser[line], &dummy_ser[line], \
  2928. + &dummy_ser[line], &dummy_ser[line], \
  2929. + &dummy_ser[line], &dummy_ser[line], \
  2930. + DUMMY_DTR_MASK, DUMMY_RI_MASK, DUMMY_DSR_MASK, DUMMY_CD_MASK
  2931. +
  2932. +
  2933. +struct control_pins
  2934. +{
  2935. + volatile unsigned char *dtr_port;
  2936. + unsigned char *dtr_shadow;
  2937. + volatile unsigned char *ri_port;
  2938. + unsigned char *ri_shadow;
  2939. + volatile unsigned char *dsr_port;
  2940. + unsigned char *dsr_shadow;
  2941. + volatile unsigned char *cd_port;
  2942. + unsigned char *cd_shadow;
  2943. +
  2944. + unsigned char dtr_mask;
  2945. + unsigned char ri_mask;
  2946. + unsigned char dsr_mask;
  2947. + unsigned char cd_mask;
  2948. +};
  2949. +
  2950. +static const struct control_pins e100_modem_pins[NR_PORTS] =
  2951. +{
  2952. + /* Ser 0 */
  2953. + {
  2954. +#ifdef CONFIG_ETRAX_SERIAL_PORT0
  2955. + E100_STRUCT_PORT(0,DTR), E100_STRUCT_SHADOW(0,DTR),
  2956. + E100_STRUCT_PORT(0,RI), E100_STRUCT_SHADOW(0,RI),
  2957. + E100_STRUCT_PORT(0,DSR), E100_STRUCT_SHADOW(0,DSR),
  2958. + E100_STRUCT_PORT(0,CD), E100_STRUCT_SHADOW(0,CD),
  2959. + E100_STRUCT_MASK(0,DTR),
  2960. + E100_STRUCT_MASK(0,RI),
  2961. + E100_STRUCT_MASK(0,DSR),
  2962. + E100_STRUCT_MASK(0,CD)
  2963. +#else
  2964. + CONTROL_PINS_PORT_NOT_USED(0)
  2965. +#endif
  2966. + },
  2967. +
  2968. + /* Ser 1 */
  2969. + {
  2970. +#ifdef CONFIG_ETRAX_SERIAL_PORT1
  2971. + E100_STRUCT_PORT(1,DTR), E100_STRUCT_SHADOW(1,DTR),
  2972. + E100_STRUCT_PORT(1,RI), E100_STRUCT_SHADOW(1,RI),
  2973. + E100_STRUCT_PORT(1,DSR), E100_STRUCT_SHADOW(1,DSR),
  2974. + E100_STRUCT_PORT(1,CD), E100_STRUCT_SHADOW(1,CD),
  2975. + E100_STRUCT_MASK(1,DTR),
  2976. + E100_STRUCT_MASK(1,RI),
  2977. + E100_STRUCT_MASK(1,DSR),
  2978. + E100_STRUCT_MASK(1,CD)
  2979. +#else
  2980. + CONTROL_PINS_PORT_NOT_USED(1)
  2981. +#endif
  2982. + },
  2983. +
  2984. + /* Ser 2 */
  2985. + {
  2986. +#ifdef CONFIG_ETRAX_SERIAL_PORT2
  2987. + E100_STRUCT_PORT(2,DTR), E100_STRUCT_SHADOW(2,DTR),
  2988. + E100_STRUCT_PORT(2,RI), E100_STRUCT_SHADOW(2,RI),
  2989. + E100_STRUCT_PORT(2,DSR), E100_STRUCT_SHADOW(2,DSR),
  2990. + E100_STRUCT_PORT(2,CD), E100_STRUCT_SHADOW(2,CD),
  2991. + E100_STRUCT_MASK(2,DTR),
  2992. + E100_STRUCT_MASK(2,RI),
  2993. + E100_STRUCT_MASK(2,DSR),
  2994. + E100_STRUCT_MASK(2,CD)
  2995. +#else
  2996. + CONTROL_PINS_PORT_NOT_USED(2)
  2997. +#endif
  2998. + },
  2999. +
  3000. + /* Ser 3 */
  3001. + {
  3002. +#ifdef CONFIG_ETRAX_SERIAL_PORT3
  3003. + E100_STRUCT_PORT(3,DTR), E100_STRUCT_SHADOW(3,DTR),
  3004. + E100_STRUCT_PORT(3,RI), E100_STRUCT_SHADOW(3,RI),
  3005. + E100_STRUCT_PORT(3,DSR), E100_STRUCT_SHADOW(3,DSR),
  3006. + E100_STRUCT_PORT(3,CD), E100_STRUCT_SHADOW(3,CD),
  3007. + E100_STRUCT_MASK(3,DTR),
  3008. + E100_STRUCT_MASK(3,RI),
  3009. + E100_STRUCT_MASK(3,DSR),
  3010. + E100_STRUCT_MASK(3,CD)
  3011. +#else
  3012. + CONTROL_PINS_PORT_NOT_USED(3)
  3013. +#endif
  3014. + }
  3015. +};
  3016. +#else /* CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED */
  3017. +
  3018. +/* All pins are on either PA or PB for each serial port */
  3019. +#define CONTROL_PINS_PORT_NOT_USED(line) \
  3020. + &dummy_ser[line], &dummy_ser[line], \
  3021. + DUMMY_DTR_MASK, DUMMY_RI_MASK, DUMMY_DSR_MASK, DUMMY_CD_MASK
  3022. +
  3023. +
  3024. +struct control_pins
  3025. +{
  3026. + volatile unsigned char *port;
  3027. + unsigned char *shadow;
  3028. +
  3029. + unsigned char dtr_mask;
  3030. + unsigned char ri_mask;
  3031. + unsigned char dsr_mask;
  3032. + unsigned char cd_mask;
  3033. +};
  3034. +
  3035. +#define dtr_port port
  3036. +#define dtr_shadow shadow
  3037. +#define ri_port port
  3038. +#define ri_shadow shadow
  3039. +#define dsr_port port
  3040. +#define dsr_shadow shadow
  3041. +#define cd_port port
  3042. +#define cd_shadow shadow
  3043. +
  3044. +static const struct control_pins e100_modem_pins[NR_PORTS] =
  3045. +{
  3046. + /* Ser 0 */
  3047. + {
  3048. +#ifdef CONFIG_ETRAX_SERIAL_PORT0
  3049. + E100_STRUCT_PORT(0,DTR), E100_STRUCT_SHADOW(0,DTR),
  3050. + E100_STRUCT_MASK(0,DTR),
  3051. + E100_STRUCT_MASK(0,RI),
  3052. + E100_STRUCT_MASK(0,DSR),
  3053. + E100_STRUCT_MASK(0,CD)
  3054. +#else
  3055. + CONTROL_PINS_PORT_NOT_USED(0)
  3056. +#endif
  3057. + },
  3058. +
  3059. + /* Ser 1 */
  3060. + {
  3061. +#ifdef CONFIG_ETRAX_SERIAL_PORT1
  3062. + E100_STRUCT_PORT(1,DTR), E100_STRUCT_SHADOW(1,DTR),
  3063. + E100_STRUCT_MASK(1,DTR),
  3064. + E100_STRUCT_MASK(1,RI),
  3065. + E100_STRUCT_MASK(1,DSR),
  3066. + E100_STRUCT_MASK(1,CD)
  3067. +#else
  3068. + CONTROL_PINS_PORT_NOT_USED(1)
  3069. +#endif
  3070. + },
  3071. +
  3072. + /* Ser 2 */
  3073. + {
  3074. +#ifdef CONFIG_ETRAX_SERIAL_PORT2
  3075. + E100_STRUCT_PORT(2,DTR), E100_STRUCT_SHADOW(2,DTR),
  3076. + E100_STRUCT_MASK(2,DTR),
  3077. + E100_STRUCT_MASK(2,RI),
  3078. + E100_STRUCT_MASK(2,DSR),
  3079. + E100_STRUCT_MASK(2,CD)
  3080. +#else
  3081. + CONTROL_PINS_PORT_NOT_USED(2)
  3082. +#endif
  3083. + },
  3084. +
  3085. + /* Ser 3 */
  3086. + {
  3087. +#ifdef CONFIG_ETRAX_SERIAL_PORT3
  3088. + E100_STRUCT_PORT(3,DTR), E100_STRUCT_SHADOW(3,DTR),
  3089. + E100_STRUCT_MASK(3,DTR),
  3090. + E100_STRUCT_MASK(3,RI),
  3091. + E100_STRUCT_MASK(3,DSR),
  3092. + E100_STRUCT_MASK(3,CD)
  3093. +#else
  3094. + CONTROL_PINS_PORT_NOT_USED(3)
  3095. +#endif
  3096. + }
  3097. +};
  3098. +#endif /* !CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED */
  3099. +
  3100. +#define E100_RTS_MASK 0x20
  3101. +#define E100_CTS_MASK 0x40
  3102. +
  3103. +/* All serial port signals are active low:
  3104. + * active = 0 -> 3.3V to RS-232 driver -> -12V on RS-232 level
  3105. + * inactive = 1 -> 0V to RS-232 driver -> +12V on RS-232 level
  3106. + *
  3107. + * These macros returns the pin value: 0=0V, >=1 = 3.3V on ETRAX chip
  3108. + */
  3109. +
  3110. +/* Output */
  3111. +#define E100_RTS_GET(info) ((info)->rx_ctrl & E100_RTS_MASK)
  3112. +/* Input */
  3113. +#define E100_CTS_GET(info) ((info)->ioport[REG_STATUS] & E100_CTS_MASK)
  3114. +
  3115. +/* These are typically PA or PB and 0 means 0V, 1 means 3.3V */
  3116. +/* Is an output */
  3117. +#define E100_DTR_GET(info) ((*e100_modem_pins[(info)->line].dtr_shadow) & e100_modem_pins[(info)->line].dtr_mask)
  3118. +
  3119. +/* Normally inputs */
  3120. +#define E100_RI_GET(info) ((*e100_modem_pins[(info)->line].ri_port) & e100_modem_pins[(info)->line].ri_mask)
  3121. +#define E100_CD_GET(info) ((*e100_modem_pins[(info)->line].cd_port) & e100_modem_pins[(info)->line].cd_mask)
  3122. +
  3123. +/* Input */
  3124. +#define E100_DSR_GET(info) ((*e100_modem_pins[(info)->line].dsr_port) & e100_modem_pins[(info)->line].dsr_mask)
  3125. +
  3126. +
  3127. +/*
  3128. + * tmp_buf is used as a temporary buffer by serial_write. We need to
  3129. + * lock it in case the memcpy_fromfs blocks while swapping in a page,
  3130. + * and some other program tries to do a serial write at the same time.
  3131. + * Since the lock will only come under contention when the system is
  3132. + * swapping and available memory is low, it makes sense to share one
  3133. + * buffer across all the serial ports, since it significantly saves
  3134. + * memory if large numbers of serial ports are open.
  3135. + */
  3136. +static unsigned char *tmp_buf;
  3137. +static DEFINE_MUTEX(tmp_buf_mutex);
  3138. +
  3139. +/* Calculate the chartime depending on baudrate, numbor of bits etc. */
  3140. +static void update_char_time(struct e100_serial * info)
  3141. +{
  3142. + tcflag_t cflags = info->port.tty->termios->c_cflag;
  3143. + int bits;
  3144. +
  3145. + /* calc. number of bits / data byte */
  3146. + /* databits + startbit and 1 stopbit */
  3147. + if ((cflags & CSIZE) == CS7)
  3148. + bits = 9;
  3149. + else
  3150. + bits = 10;
  3151. +
  3152. + if (cflags & CSTOPB) /* 2 stopbits ? */
  3153. + bits++;
  3154. +
  3155. + if (cflags & PARENB) /* parity bit ? */
  3156. + bits++;
  3157. +
  3158. + /* calc timeout */
  3159. + info->char_time_usec = ((bits * 1000000) / info->baud) + 1;
  3160. + info->flush_time_usec = 4*info->char_time_usec;
  3161. + if (info->flush_time_usec < MIN_FLUSH_TIME_USEC)
  3162. + info->flush_time_usec = MIN_FLUSH_TIME_USEC;
  3163. +
  3164. +}
  3165. +
  3166. +/*
  3167. + * This function maps from the Bxxxx defines in asm/termbits.h into real
  3168. + * baud rates.
  3169. + */
  3170. +
  3171. +static int
  3172. +cflag_to_baud(unsigned int cflag)
  3173. +{
  3174. + static int baud_table[] = {
  3175. + 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400,
  3176. + 4800, 9600, 19200, 38400 };
  3177. +
  3178. + static int ext_baud_table[] = {
  3179. + 0, 57600, 115200, 230400, 460800, 921600, 1843200, 6250000,
  3180. + 0, 0, 0, 0, 0, 0, 0, 0 };
  3181. +
  3182. + if (cflag & CBAUDEX)
  3183. + return ext_baud_table[(cflag & CBAUD) & ~CBAUDEX];
  3184. + else
  3185. + return baud_table[cflag & CBAUD];
  3186. +}
  3187. +
  3188. +/* and this maps to an etrax100 hardware baud constant */
  3189. +
  3190. +static unsigned char
  3191. +cflag_to_etrax_baud(unsigned int cflag)
  3192. +{
  3193. + char retval;
  3194. +
  3195. + static char baud_table[] = {
  3196. + -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, 4, 5, 6, 7 };
  3197. +
  3198. + static char ext_baud_table[] = {
  3199. + -1, 8, 9, 10, 11, 12, 13, 14, -1, -1, -1, -1, -1, -1, -1, -1 };
  3200. +
  3201. + if (cflag & CBAUDEX)
  3202. + retval = ext_baud_table[(cflag & CBAUD) & ~CBAUDEX];
  3203. + else
  3204. + retval = baud_table[cflag & CBAUD];
  3205. +
  3206. + if (retval < 0) {
  3207. + printk(KERN_WARNING "serdriver tried setting invalid baud rate, flags %x.\n", cflag);
  3208. + retval = 5; /* choose default 9600 instead */
  3209. + }
  3210. +
  3211. + return retval | (retval << 4); /* choose same for both TX and RX */
  3212. +}
  3213. +
  3214. +
  3215. +/* Various static support functions */
  3216. +
  3217. +/* Functions to set or clear DTR/RTS on the requested line */
  3218. +/* It is complicated by the fact that RTS is a serial port register, while
  3219. + * DTR might not be implemented in the HW at all, and if it is, it can be on
  3220. + * any general port.
  3221. + */
  3222. +
  3223. +
  3224. +static inline void
  3225. +e100_dtr(struct e100_serial *info, int set)
  3226. +{
  3227. +#ifndef CONFIG_SVINTO_SIM
  3228. + unsigned char mask = e100_modem_pins[info->line].dtr_mask;
  3229. +
  3230. +#ifdef SERIAL_DEBUG_IO
  3231. + printk("ser%i dtr %i mask: 0x%02X\n", info->line, set, mask);
  3232. + printk("ser%i shadow before 0x%02X get: %i\n",
  3233. + info->line, *e100_modem_pins[info->line].dtr_shadow,
  3234. + E100_DTR_GET(info));
  3235. +#endif
  3236. + /* DTR is active low */
  3237. + {
  3238. + unsigned long flags;
  3239. +
  3240. + local_irq_save(flags);
  3241. + *e100_modem_pins[info->line].dtr_shadow &= ~mask;
  3242. + *e100_modem_pins[info->line].dtr_shadow |= (set ? 0 : mask);
  3243. + *e100_modem_pins[info->line].dtr_port = *e100_modem_pins[info->line].dtr_shadow;
  3244. + local_irq_restore(flags);
  3245. + }
  3246. +
  3247. +#ifdef SERIAL_DEBUG_IO
  3248. + printk("ser%i shadow after 0x%02X get: %i\n",
  3249. + info->line, *e100_modem_pins[info->line].dtr_shadow,
  3250. + E100_DTR_GET(info));
  3251. +#endif
  3252. +#endif
  3253. +}
  3254. +
  3255. +/* set = 0 means 3.3V on the pin, bitvalue: 0=active, 1=inactive
  3256. + * 0=0V , 1=3.3V
  3257. + */
  3258. +static inline void
  3259. +e100_rts(struct e100_serial *info, int set)
  3260. +{
  3261. +#ifndef CONFIG_SVINTO_SIM
  3262. + unsigned long flags;
  3263. + local_irq_save(flags);
  3264. + info->rx_ctrl &= ~E100_RTS_MASK;
  3265. + info->rx_ctrl |= (set ? 0 : E100_RTS_MASK); /* RTS is active low */
  3266. + info->ioport[REG_REC_CTRL] = info->rx_ctrl;
  3267. + local_irq_restore(flags);
  3268. +#ifdef SERIAL_DEBUG_IO
  3269. + printk("ser%i rts %i\n", info->line, set);
  3270. +#endif
  3271. +#endif
  3272. +}
  3273. +
  3274. +
  3275. +/* If this behaves as a modem, RI and CD is an output */
  3276. +static inline void
  3277. +e100_ri_out(struct e100_serial *info, int set)
  3278. +{
  3279. +#ifndef CONFIG_SVINTO_SIM
  3280. + /* RI is active low */
  3281. + {
  3282. + unsigned char mask = e100_modem_pins[info->line].ri_mask;
  3283. + unsigned long flags;
  3284. +
  3285. + local_irq_save(flags);
  3286. + *e100_modem_pins[info->line].ri_shadow &= ~mask;
  3287. + *e100_modem_pins[info->line].ri_shadow |= (set ? 0 : mask);
  3288. + *e100_modem_pins[info->line].ri_port = *e100_modem_pins[info->line].ri_shadow;
  3289. + local_irq_restore(flags);
  3290. + }
  3291. +#endif
  3292. +}
  3293. +static inline void
  3294. +e100_cd_out(struct e100_serial *info, int set)
  3295. +{
  3296. +#ifndef CONFIG_SVINTO_SIM
  3297. + /* CD is active low */
  3298. + {
  3299. + unsigned char mask = e100_modem_pins[info->line].cd_mask;
  3300. + unsigned long flags;
  3301. +
  3302. + local_irq_save(flags);
  3303. + *e100_modem_pins[info->line].cd_shadow &= ~mask;
  3304. + *e100_modem_pins[info->line].cd_shadow |= (set ? 0 : mask);
  3305. + *e100_modem_pins[info->line].cd_port = *e100_modem_pins[info->line].cd_shadow;
  3306. + local_irq_restore(flags);
  3307. + }
  3308. +#endif
  3309. +}
  3310. +
  3311. +static inline void
  3312. +e100_disable_rx(struct e100_serial *info)
  3313. +{
  3314. +#ifndef CONFIG_SVINTO_SIM
  3315. + /* disable the receiver */
  3316. + info->ioport[REG_REC_CTRL] =
  3317. + (info->rx_ctrl &= ~IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
  3318. +#endif
  3319. +}
  3320. +
  3321. +static inline void
  3322. +e100_enable_rx(struct e100_serial *info)
  3323. +{
  3324. +#ifndef CONFIG_SVINTO_SIM
  3325. + /* enable the receiver */
  3326. + info->ioport[REG_REC_CTRL] =
  3327. + (info->rx_ctrl |= IO_MASK(R_SERIAL0_REC_CTRL, rec_enable));
  3328. +#endif
  3329. +}
  3330. +
  3331. +/* the rx DMA uses both the dma_descr and the dma_eop interrupts */
  3332. +
  3333. +static inline void
  3334. +e100_disable_rxdma_irq(struct e100_serial *info)
  3335. +{
  3336. +#ifdef SERIAL_DEBUG_INTR
  3337. + printk("rxdma_irq(%d): 0\n",info->line);
  3338. +#endif
  3339. + DINTR1(DEBUG_LOG(info->line,"IRQ disable_rxdma_irq %i\n", info->line));
  3340. + *R_IRQ_MASK2_CLR = (info->irq << 2) | (info->irq << 3);
  3341. +}
  3342. +
  3343. +static inline void
  3344. +e100_enable_rxdma_irq(struct e100_serial *info)
  3345. +{
  3346. +#ifdef SERIAL_DEBUG_INTR
  3347. + printk("rxdma_irq(%d): 1\n",info->line);
  3348. +#endif
  3349. + DINTR1(DEBUG_LOG(info->line,"IRQ enable_rxdma_irq %i\n", info->line));
  3350. + *R_IRQ_MASK2_SET = (info->irq << 2) | (info->irq << 3);
  3351. +}
  3352. +
  3353. +/* the tx DMA uses only dma_descr interrupt */
  3354. +
  3355. +static void e100_disable_txdma_irq(struct e100_serial *info)
  3356. +{
  3357. +#ifdef SERIAL_DEBUG_INTR
  3358. + printk("txdma_irq(%d): 0\n",info->line);
  3359. +#endif
  3360. + DINTR1(DEBUG_LOG(info->line,"IRQ disable_txdma_irq %i\n", info->line));
  3361. + *R_IRQ_MASK2_CLR = info->irq;
  3362. +}
  3363. +
  3364. +static void e100_enable_txdma_irq(struct e100_serial *info)
  3365. +{
  3366. +#ifdef SERIAL_DEBUG_INTR
  3367. + printk("txdma_irq(%d): 1\n",info->line);
  3368. +#endif
  3369. + DINTR1(DEBUG_LOG(info->line,"IRQ enable_txdma_irq %i\n", info->line));
  3370. + *R_IRQ_MASK2_SET = info->irq;
  3371. +}
  3372. +
  3373. +static void e100_disable_txdma_channel(struct e100_serial *info)
  3374. +{
  3375. + unsigned long flags;
  3376. +
  3377. + /* Disable output DMA channel for the serial port in question
  3378. + * ( set to something other than serialX)
  3379. + */
  3380. + local_irq_save(flags);
  3381. + DFLOW(DEBUG_LOG(info->line, "disable_txdma_channel %i\n", info->line));
  3382. + if (info->line == 0) {
  3383. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma6)) ==
  3384. + IO_STATE(R_GEN_CONFIG, dma6, serial0)) {
  3385. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma6);
  3386. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma6, unused);
  3387. + }
  3388. + } else if (info->line == 1) {
  3389. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma8)) ==
  3390. + IO_STATE(R_GEN_CONFIG, dma8, serial1)) {
  3391. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma8);
  3392. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma8, usb);
  3393. + }
  3394. + } else if (info->line == 2) {
  3395. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma2)) ==
  3396. + IO_STATE(R_GEN_CONFIG, dma2, serial2)) {
  3397. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma2);
  3398. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma2, par0);
  3399. + }
  3400. + } else if (info->line == 3) {
  3401. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma4)) ==
  3402. + IO_STATE(R_GEN_CONFIG, dma4, serial3)) {
  3403. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma4);
  3404. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, par1);
  3405. + }
  3406. + }
  3407. + *R_GEN_CONFIG = genconfig_shadow;
  3408. + local_irq_restore(flags);
  3409. +}
  3410. +
  3411. +
  3412. +static void e100_enable_txdma_channel(struct e100_serial *info)
  3413. +{
  3414. + unsigned long flags;
  3415. +
  3416. + local_irq_save(flags);
  3417. + DFLOW(DEBUG_LOG(info->line, "enable_txdma_channel %i\n", info->line));
  3418. + /* Enable output DMA channel for the serial port in question */
  3419. + if (info->line == 0) {
  3420. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma6);
  3421. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma6, serial0);
  3422. + } else if (info->line == 1) {
  3423. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma8);
  3424. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma8, serial1);
  3425. + } else if (info->line == 2) {
  3426. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma2);
  3427. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma2, serial2);
  3428. + } else if (info->line == 3) {
  3429. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma4);
  3430. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, serial3);
  3431. + }
  3432. + *R_GEN_CONFIG = genconfig_shadow;
  3433. + local_irq_restore(flags);
  3434. +}
  3435. +
  3436. +static void e100_disable_rxdma_channel(struct e100_serial *info)
  3437. +{
  3438. + unsigned long flags;
  3439. +
  3440. + /* Disable input DMA channel for the serial port in question
  3441. + * ( set to something other than serialX)
  3442. + */
  3443. + local_irq_save(flags);
  3444. + if (info->line == 0) {
  3445. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma7)) ==
  3446. + IO_STATE(R_GEN_CONFIG, dma7, serial0)) {
  3447. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7);
  3448. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma7, unused);
  3449. + }
  3450. + } else if (info->line == 1) {
  3451. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma9)) ==
  3452. + IO_STATE(R_GEN_CONFIG, dma9, serial1)) {
  3453. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma9);
  3454. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma9, usb);
  3455. + }
  3456. + } else if (info->line == 2) {
  3457. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma3)) ==
  3458. + IO_STATE(R_GEN_CONFIG, dma3, serial2)) {
  3459. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma3);
  3460. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma3, par0);
  3461. + }
  3462. + } else if (info->line == 3) {
  3463. + if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma5)) ==
  3464. + IO_STATE(R_GEN_CONFIG, dma5, serial3)) {
  3465. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma5);
  3466. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, par1);
  3467. + }
  3468. + }
  3469. + *R_GEN_CONFIG = genconfig_shadow;
  3470. + local_irq_restore(flags);
  3471. +}
  3472. +
  3473. +
  3474. +static void e100_enable_rxdma_channel(struct e100_serial *info)
  3475. +{
  3476. + unsigned long flags;
  3477. +
  3478. + local_irq_save(flags);
  3479. + /* Enable input DMA channel for the serial port in question */
  3480. + if (info->line == 0) {
  3481. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7);
  3482. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma7, serial0);
  3483. + } else if (info->line == 1) {
  3484. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma9);
  3485. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma9, serial1);
  3486. + } else if (info->line == 2) {
  3487. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma3);
  3488. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma3, serial2);
  3489. + } else if (info->line == 3) {
  3490. + genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma5);
  3491. + genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, serial3);
  3492. + }
  3493. + *R_GEN_CONFIG = genconfig_shadow;
  3494. + local_irq_restore(flags);
  3495. +}
  3496. +
  3497. +#ifdef SERIAL_HANDLE_EARLY_ERRORS
  3498. +/* in order to detect and fix errors on the first byte
  3499. + we have to use the serial interrupts as well. */
  3500. +
  3501. +static inline void
  3502. +e100_disable_serial_data_irq(struct e100_serial *info)
  3503. +{
  3504. +#ifdef SERIAL_DEBUG_INTR
  3505. + printk("ser_irq(%d): 0\n",info->line);
  3506. +#endif
  3507. + DINTR1(DEBUG_LOG(info->line,"IRQ disable data_irq %i\n", info->line));
  3508. + *R_IRQ_MASK1_CLR = (1U << (8+2*info->line));
  3509. +}
  3510. +
  3511. +static inline void
  3512. +e100_enable_serial_data_irq(struct e100_serial *info)
  3513. +{
  3514. +#ifdef SERIAL_DEBUG_INTR
  3515. + printk("ser_irq(%d): 1\n",info->line);
  3516. + printk("**** %d = %d\n",
  3517. + (8+2*info->line),
  3518. + (1U << (8+2*info->line)));
  3519. +#endif
  3520. + DINTR1(DEBUG_LOG(info->line,"IRQ enable data_irq %i\n", info->line));
  3521. + *R_IRQ_MASK1_SET = (1U << (8+2*info->line));
  3522. +}
  3523. +#endif
  3524. +
  3525. +static inline void
  3526. +e100_disable_serial_tx_ready_irq(struct e100_serial *info)
  3527. +{
  3528. +#ifdef SERIAL_DEBUG_INTR
  3529. + printk("ser_tx_irq(%d): 0\n",info->line);
  3530. +#endif
  3531. + DINTR1(DEBUG_LOG(info->line,"IRQ disable ready_irq %i\n", info->line));
  3532. + *R_IRQ_MASK1_CLR = (1U << (8+1+2*info->line));
  3533. +}
  3534. +
  3535. +static inline void
  3536. +e100_enable_serial_tx_ready_irq(struct e100_serial *info)
  3537. +{
  3538. +#ifdef SERIAL_DEBUG_INTR
  3539. + printk("ser_tx_irq(%d): 1\n",info->line);
  3540. + printk("**** %d = %d\n",
  3541. + (8+1+2*info->line),
  3542. + (1U << (8+1+2*info->line)));
  3543. +#endif
  3544. + DINTR2(DEBUG_LOG(info->line,"IRQ enable ready_irq %i\n", info->line));
  3545. + *R_IRQ_MASK1_SET = (1U << (8+1+2*info->line));
  3546. +}
  3547. +
  3548. +static inline void e100_enable_rx_irq(struct e100_serial *info)
  3549. +{
  3550. + if (info->uses_dma_in)
  3551. + e100_enable_rxdma_irq(info);
  3552. + else
  3553. + e100_enable_serial_data_irq(info);
  3554. +}
  3555. +static inline void e100_disable_rx_irq(struct e100_serial *info)
  3556. +{
  3557. + if (info->uses_dma_in)
  3558. + e100_disable_rxdma_irq(info);
  3559. + else
  3560. + e100_disable_serial_data_irq(info);
  3561. +}
  3562. +
  3563. +#if defined(CONFIG_ETRAX_RS485)
  3564. +/* Enable RS-485 mode on selected port. This is UGLY. */
  3565. +static int
  3566. +e100_enable_rs485(struct tty_struct *tty, struct serial_rs485 *r)
  3567. +{
  3568. + struct e100_serial * info = (struct e100_serial *)tty->driver_data;
  3569. +
  3570. +#if defined(CONFIG_ETRAX_RS485_ON_PA)
  3571. + *R_PORT_PA_DATA = port_pa_data_shadow |= (1 << rs485_pa_bit);
  3572. +#endif
  3573. +#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
  3574. + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
  3575. + rs485_port_g_bit, 1);
  3576. +#endif
  3577. +#if defined(CONFIG_ETRAX_RS485_LTC1387)
  3578. + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
  3579. + CONFIG_ETRAX_RS485_LTC1387_DXEN_PORT_G_BIT, 1);
  3580. + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
  3581. + CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 1);
  3582. +#endif
  3583. +
  3584. + info->rs485.flags = r->flags;
  3585. + if (r->delay_rts_before_send >= 1000)
  3586. + info->rs485.delay_rts_before_send = 1000;
  3587. + else
  3588. + info->rs485.delay_rts_before_send = r->delay_rts_before_send;
  3589. +/* printk("rts: on send = %i, after = %i, enabled = %i",
  3590. + info->rs485.rts_on_send,
  3591. + info->rs485.rts_after_sent,
  3592. + info->rs485.enabled
  3593. + );
  3594. +*/
  3595. + return 0;
  3596. +}
  3597. +
  3598. +static int
  3599. +e100_write_rs485(struct tty_struct *tty,
  3600. + const unsigned char *buf, int count)
  3601. +{
  3602. + struct e100_serial * info = (struct e100_serial *)tty->driver_data;
  3603. + int old_value = (info->rs485.flags) & SER_RS485_ENABLED;
  3604. +
  3605. + /* rs485 is always implicitly enabled if we're using the ioctl()
  3606. + * but it doesn't have to be set in the serial_rs485
  3607. + * (to be backward compatible with old apps)
  3608. + * So we store, set and restore it.
  3609. + */
  3610. + info->rs485.flags |= SER_RS485_ENABLED;
  3611. + /* rs_write now deals with RS485 if enabled */
  3612. + count = rs_write(tty, buf, count);
  3613. + if (!old_value)
  3614. + info->rs485.flags &= ~(SER_RS485_ENABLED);
  3615. + return count;
  3616. +}
  3617. +
  3618. +#ifdef CONFIG_ETRAX_FAST_TIMER
  3619. +/* Timer function to toggle RTS when using FAST_TIMER */
  3620. +static void rs485_toggle_rts_timer_function(unsigned long data)
  3621. +{
  3622. + struct e100_serial *info = (struct e100_serial *)data;
  3623. +
  3624. + fast_timers_rs485[info->line].function = NULL;
  3625. + e100_rts(info, (info->rs485.flags & SER_RS485_RTS_AFTER_SEND));
  3626. +#if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER)
  3627. + e100_enable_rx(info);
  3628. + e100_enable_rx_irq(info);
  3629. +#endif
  3630. +}
  3631. +#endif
  3632. +#endif /* CONFIG_ETRAX_RS485 */
  3633. +
  3634. +/*
  3635. + * ------------------------------------------------------------
  3636. + * rs_stop() and rs_start()
  3637. + *
  3638. + * This routines are called before setting or resetting tty->stopped.
  3639. + * They enable or disable transmitter using the XOFF registers, as necessary.
  3640. + * ------------------------------------------------------------
  3641. + */
  3642. +
  3643. +static void
  3644. +rs_stop(struct tty_struct *tty)
  3645. +{
  3646. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  3647. + if (info) {
  3648. + unsigned long flags;
  3649. + unsigned long xoff;
  3650. +
  3651. + local_irq_save(flags);
  3652. + DFLOW(DEBUG_LOG(info->line, "XOFF rs_stop xmit %i\n",
  3653. + CIRC_CNT(info->xmit.head,
  3654. + info->xmit.tail,SERIAL_XMIT_SIZE)));
  3655. +
  3656. + xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char,
  3657. + STOP_CHAR(info->port.tty));
  3658. + xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, stop);
  3659. + if (tty->termios->c_iflag & IXON ) {
  3660. + xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
  3661. + }
  3662. +
  3663. + *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
  3664. + local_irq_restore(flags);
  3665. + }
  3666. +}
  3667. +
  3668. +static void
  3669. +rs_start(struct tty_struct *tty)
  3670. +{
  3671. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  3672. + if (info) {
  3673. + unsigned long flags;
  3674. + unsigned long xoff;
  3675. +
  3676. + local_irq_save(flags);
  3677. + DFLOW(DEBUG_LOG(info->line, "XOFF rs_start xmit %i\n",
  3678. + CIRC_CNT(info->xmit.head,
  3679. + info->xmit.tail,SERIAL_XMIT_SIZE)));
  3680. + xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(tty));
  3681. + xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable);
  3682. + if (tty->termios->c_iflag & IXON ) {
  3683. + xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
  3684. + }
  3685. +
  3686. + *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
  3687. + if (!info->uses_dma_out &&
  3688. + info->xmit.head != info->xmit.tail && info->xmit.buf)
  3689. + e100_enable_serial_tx_ready_irq(info);
  3690. +
  3691. + local_irq_restore(flags);
  3692. + }
  3693. +}
  3694. +
  3695. +/*
  3696. + * ----------------------------------------------------------------------
  3697. + *
  3698. + * Here starts the interrupt handling routines. All of the following
  3699. + * subroutines are declared as inline and are folded into
  3700. + * rs_interrupt(). They were separated out for readability's sake.
  3701. + *
  3702. + * Note: rs_interrupt() is a "fast" interrupt, which means that it
  3703. + * runs with interrupts turned off. People who may want to modify
  3704. + * rs_interrupt() should try to keep the interrupt handler as fast as
  3705. + * possible. After you are done making modifications, it is not a bad
  3706. + * idea to do:
  3707. + *
  3708. + * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c
  3709. + *
  3710. + * and look at the resulting assemble code in serial.s.
  3711. + *
  3712. + * - Ted Ts'o (tytso@mit.edu), 7-Mar-93
  3713. + * -----------------------------------------------------------------------
  3714. + */
  3715. +
  3716. +/*
  3717. + * This routine is used by the interrupt handler to schedule
  3718. + * processing in the software interrupt portion of the driver.
  3719. + */
  3720. +static void rs_sched_event(struct e100_serial *info, int event)
  3721. +{
  3722. + if (info->event & (1 << event))
  3723. + return;
  3724. + info->event |= 1 << event;
  3725. + schedule_work(&info->work);
  3726. +}
  3727. +
  3728. +/* The output DMA channel is free - use it to send as many chars as possible
  3729. + * NOTES:
  3730. + * We don't pay attention to info->x_char, which means if the TTY wants to
  3731. + * use XON/XOFF it will set info->x_char but we won't send any X char!
  3732. + *
  3733. + * To implement this, we'd just start a DMA send of 1 byte pointing at a
  3734. + * buffer containing the X char, and skip updating xmit. We'd also have to
  3735. + * check if the last sent char was the X char when we enter this function
  3736. + * the next time, to avoid updating xmit with the sent X value.
  3737. + */
  3738. +
  3739. +static void
  3740. +transmit_chars_dma(struct e100_serial *info)
  3741. +{
  3742. + unsigned int c, sentl;
  3743. + struct etrax_dma_descr *descr;
  3744. +
  3745. +#ifdef CONFIG_SVINTO_SIM
  3746. + /* This will output too little if tail is not 0 always since
  3747. + * we don't reloop to send the other part. Anyway this SHOULD be a
  3748. + * no-op - transmit_chars_dma would never really be called during sim
  3749. + * since rs_write does not write into the xmit buffer then.
  3750. + */
  3751. + if (info->xmit.tail)
  3752. + printk("Error in serial.c:transmit_chars-dma(), tail!=0\n");
  3753. + if (info->xmit.head != info->xmit.tail) {
  3754. + SIMCOUT(info->xmit.buf + info->xmit.tail,
  3755. + CIRC_CNT(info->xmit.head,
  3756. + info->xmit.tail,
  3757. + SERIAL_XMIT_SIZE));
  3758. + info->xmit.head = info->xmit.tail; /* move back head */
  3759. + info->tr_running = 0;
  3760. + }
  3761. + return;
  3762. +#endif
  3763. + /* acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */
  3764. + *info->oclrintradr =
  3765. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
  3766. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
  3767. +
  3768. +#ifdef SERIAL_DEBUG_INTR
  3769. + if (info->line == SERIAL_DEBUG_LINE)
  3770. + printk("tc\n");
  3771. +#endif
  3772. + if (!info->tr_running) {
  3773. + /* weirdo... we shouldn't get here! */
  3774. + printk(KERN_WARNING "Achtung: transmit_chars_dma with !tr_running\n");
  3775. + return;
  3776. + }
  3777. +
  3778. + descr = &info->tr_descr;
  3779. +
  3780. + /* first get the amount of bytes sent during the last DMA transfer,
  3781. + and update xmit accordingly */
  3782. +
  3783. + /* if the stop bit was not set, all data has been sent */
  3784. + if (!(descr->status & d_stop)) {
  3785. + sentl = descr->sw_len;
  3786. + } else
  3787. + /* otherwise we find the amount of data sent here */
  3788. + sentl = descr->hw_len;
  3789. +
  3790. + DFLOW(DEBUG_LOG(info->line, "TX %i done\n", sentl));
  3791. +
  3792. + /* update stats */
  3793. + info->icount.tx += sentl;
  3794. +
  3795. + /* update xmit buffer */
  3796. + info->xmit.tail = (info->xmit.tail + sentl) & (SERIAL_XMIT_SIZE - 1);
  3797. +
  3798. + /* if there is only a few chars left in the buf, wake up the blocked
  3799. + write if any */
  3800. + if (CIRC_CNT(info->xmit.head,
  3801. + info->xmit.tail,
  3802. + SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
  3803. + rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
  3804. +
  3805. + /* find out the largest amount of consecutive bytes we want to send now */
  3806. +
  3807. + c = CIRC_CNT_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
  3808. +
  3809. + /* Don't send all in one DMA transfer - divide it so we wake up
  3810. + * application before all is sent
  3811. + */
  3812. +
  3813. + if (c >= 4*WAKEUP_CHARS)
  3814. + c = c/2;
  3815. +
  3816. + if (c <= 0) {
  3817. + /* our job here is done, don't schedule any new DMA transfer */
  3818. + info->tr_running = 0;
  3819. +
  3820. +#if defined(CONFIG_ETRAX_RS485) && defined(CONFIG_ETRAX_FAST_TIMER)
  3821. + if (info->rs485.flags & SER_RS485_ENABLED) {
  3822. + /* Set a short timer to toggle RTS */
  3823. + start_one_shot_timer(&fast_timers_rs485[info->line],
  3824. + rs485_toggle_rts_timer_function,
  3825. + (unsigned long)info,
  3826. + info->char_time_usec*2,
  3827. + "RS-485");
  3828. + }
  3829. +#endif /* RS485 */
  3830. + return;
  3831. + }
  3832. +
  3833. + /* ok we can schedule a dma send of c chars starting at info->xmit.tail */
  3834. + /* set up the descriptor correctly for output */
  3835. + DFLOW(DEBUG_LOG(info->line, "TX %i\n", c));
  3836. + descr->ctrl = d_int | d_eol | d_wait; /* Wait needed for tty_wait_until_sent() */
  3837. + descr->sw_len = c;
  3838. + descr->buf = virt_to_phys(info->xmit.buf + info->xmit.tail);
  3839. + descr->status = 0;
  3840. +
  3841. + *info->ofirstadr = virt_to_phys(descr); /* write to R_DMAx_FIRST */
  3842. + *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, start);
  3843. +
  3844. + /* DMA is now running (hopefully) */
  3845. +} /* transmit_chars_dma */
  3846. +
  3847. +static void
  3848. +start_transmit(struct e100_serial *info)
  3849. +{
  3850. +#if 0
  3851. + if (info->line == SERIAL_DEBUG_LINE)
  3852. + printk("x\n");
  3853. +#endif
  3854. +
  3855. + info->tr_descr.sw_len = 0;
  3856. + info->tr_descr.hw_len = 0;
  3857. + info->tr_descr.status = 0;
  3858. + info->tr_running = 1;
  3859. + if (info->uses_dma_out)
  3860. + transmit_chars_dma(info);
  3861. + else
  3862. + e100_enable_serial_tx_ready_irq(info);
  3863. +} /* start_transmit */
  3864. +
  3865. +#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
  3866. +static int serial_fast_timer_started = 0;
  3867. +static int serial_fast_timer_expired = 0;
  3868. +static void flush_timeout_function(unsigned long data);
  3869. +#define START_FLUSH_FAST_TIMER_TIME(info, string, usec) {\
  3870. + unsigned long timer_flags; \
  3871. + local_irq_save(timer_flags); \
  3872. + if (fast_timers[info->line].function == NULL) { \
  3873. + serial_fast_timer_started++; \
  3874. + TIMERD(DEBUG_LOG(info->line, "start_timer %i ", info->line)); \
  3875. + TIMERD(DEBUG_LOG(info->line, "num started: %i\n", serial_fast_timer_started)); \
  3876. + start_one_shot_timer(&fast_timers[info->line], \
  3877. + flush_timeout_function, \
  3878. + (unsigned long)info, \
  3879. + (usec), \
  3880. + string); \
  3881. + } \
  3882. + else { \
  3883. + TIMERD(DEBUG_LOG(info->line, "timer %i already running\n", info->line)); \
  3884. + } \
  3885. + local_irq_restore(timer_flags); \
  3886. +}
  3887. +#define START_FLUSH_FAST_TIMER(info, string) START_FLUSH_FAST_TIMER_TIME(info, string, info->flush_time_usec)
  3888. +
  3889. +#else
  3890. +#define START_FLUSH_FAST_TIMER_TIME(info, string, usec)
  3891. +#define START_FLUSH_FAST_TIMER(info, string)
  3892. +#endif
  3893. +
  3894. +static struct etrax_recv_buffer *
  3895. +alloc_recv_buffer(unsigned int size)
  3896. +{
  3897. + struct etrax_recv_buffer *buffer;
  3898. +
  3899. + if (!(buffer = kmalloc(sizeof *buffer + size, GFP_ATOMIC)))
  3900. + return NULL;
  3901. +
  3902. + buffer->next = NULL;
  3903. + buffer->length = 0;
  3904. + buffer->error = TTY_NORMAL;
  3905. +
  3906. + return buffer;
  3907. +}
  3908. +
  3909. +static void
  3910. +append_recv_buffer(struct e100_serial *info, struct etrax_recv_buffer *buffer)
  3911. +{
  3912. + unsigned long flags;
  3913. +
  3914. + local_irq_save(flags);
  3915. +
  3916. + if (!info->first_recv_buffer)
  3917. + info->first_recv_buffer = buffer;
  3918. + else
  3919. + info->last_recv_buffer->next = buffer;
  3920. +
  3921. + info->last_recv_buffer = buffer;
  3922. +
  3923. + info->recv_cnt += buffer->length;
  3924. + if (info->recv_cnt > info->max_recv_cnt)
  3925. + info->max_recv_cnt = info->recv_cnt;
  3926. +
  3927. + local_irq_restore(flags);
  3928. +}
  3929. +
  3930. +static int
  3931. +add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char flag)
  3932. +{
  3933. + struct etrax_recv_buffer *buffer;
  3934. + if (info->uses_dma_in) {
  3935. + if (!(buffer = alloc_recv_buffer(4)))
  3936. + return 0;
  3937. +
  3938. + buffer->length = 1;
  3939. + buffer->error = flag;
  3940. + buffer->buffer[0] = data;
  3941. +
  3942. + append_recv_buffer(info, buffer);
  3943. +
  3944. + info->icount.rx++;
  3945. + } else {
  3946. + struct tty_struct *tty = info->port.tty;
  3947. + tty_insert_flip_char(tty, data, flag);
  3948. + info->icount.rx++;
  3949. + }
  3950. +
  3951. + return 1;
  3952. +}
  3953. +
  3954. +static unsigned int handle_descr_data(struct e100_serial *info,
  3955. + struct etrax_dma_descr *descr,
  3956. + unsigned int recvl)
  3957. +{
  3958. + struct etrax_recv_buffer *buffer = phys_to_virt(descr->buf) - sizeof *buffer;
  3959. +
  3960. + if (info->recv_cnt + recvl > 65536) {
  3961. + printk(KERN_CRIT
  3962. + "%s: Too much pending incoming serial data! Dropping %u bytes.\n", __func__, recvl);
  3963. + return 0;
  3964. + }
  3965. +
  3966. + buffer->length = recvl;
  3967. +
  3968. + if (info->errorcode == ERRCODE_SET_BREAK)
  3969. + buffer->error = TTY_BREAK;
  3970. + info->errorcode = 0;
  3971. +
  3972. + append_recv_buffer(info, buffer);
  3973. +
  3974. + if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
  3975. + panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
  3976. +
  3977. + descr->buf = virt_to_phys(buffer->buffer);
  3978. +
  3979. + return recvl;
  3980. +}
  3981. +
  3982. +static unsigned int handle_all_descr_data(struct e100_serial *info)
  3983. +{
  3984. + struct etrax_dma_descr *descr;
  3985. + unsigned int recvl;
  3986. + unsigned int ret = 0;
  3987. +
  3988. + while (1)
  3989. + {
  3990. + descr = &info->rec_descr[info->cur_rec_descr];
  3991. +
  3992. + if (descr == phys_to_virt(*info->idescradr))
  3993. + break;
  3994. +
  3995. + if (++info->cur_rec_descr == SERIAL_RECV_DESCRIPTORS)
  3996. + info->cur_rec_descr = 0;
  3997. +
  3998. + /* find out how many bytes were read */
  3999. +
  4000. + /* if the eop bit was not set, all data has been received */
  4001. + if (!(descr->status & d_eop)) {
  4002. + recvl = descr->sw_len;
  4003. + } else {
  4004. + /* otherwise we find the amount of data received here */
  4005. + recvl = descr->hw_len;
  4006. + }
  4007. +
  4008. + /* Reset the status information */
  4009. + descr->status = 0;
  4010. +
  4011. + DFLOW( DEBUG_LOG(info->line, "RX %lu\n", recvl);
  4012. + if (info->port.tty->stopped) {
  4013. + unsigned char *buf = phys_to_virt(descr->buf);
  4014. + DEBUG_LOG(info->line, "rx 0x%02X\n", buf[0]);
  4015. + DEBUG_LOG(info->line, "rx 0x%02X\n", buf[1]);
  4016. + DEBUG_LOG(info->line, "rx 0x%02X\n", buf[2]);
  4017. + }
  4018. + );
  4019. +
  4020. + /* update stats */
  4021. + info->icount.rx += recvl;
  4022. +
  4023. + ret += handle_descr_data(info, descr, recvl);
  4024. + }
  4025. +
  4026. + return ret;
  4027. +}
  4028. +
  4029. +static void receive_chars_dma(struct e100_serial *info)
  4030. +{
  4031. + struct tty_struct *tty;
  4032. + unsigned char rstat;
  4033. +
  4034. +#ifdef CONFIG_SVINTO_SIM
  4035. + /* No receive in the simulator. Will probably be when the rest of
  4036. + * the serial interface works, and this piece will just be removed.
  4037. + */
  4038. + return;
  4039. +#endif
  4040. +
  4041. + /* Acknowledge both dma_descr and dma_eop irq in R_DMA_CHx_CLR_INTR */
  4042. + *info->iclrintradr =
  4043. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
  4044. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
  4045. +
  4046. + tty = info->port.tty;
  4047. + if (!tty) /* Something wrong... */
  4048. + return;
  4049. +
  4050. +#ifdef SERIAL_HANDLE_EARLY_ERRORS
  4051. + if (info->uses_dma_in)
  4052. + e100_enable_serial_data_irq(info);
  4053. +#endif
  4054. +
  4055. + if (info->errorcode == ERRCODE_INSERT_BREAK)
  4056. + add_char_and_flag(info, '\0', TTY_BREAK);
  4057. +
  4058. + handle_all_descr_data(info);
  4059. +
  4060. + /* Read the status register to detect errors */
  4061. + rstat = info->ioport[REG_STATUS];
  4062. + if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) {
  4063. + DFLOW(DEBUG_LOG(info->line, "XOFF detect stat %x\n", rstat));
  4064. + }
  4065. +
  4066. + if (rstat & SER_ERROR_MASK) {
  4067. + /* If we got an error, we must reset it by reading the
  4068. + * data_in field
  4069. + */
  4070. + unsigned char data = info->ioport[REG_DATA];
  4071. +
  4072. + PROCSTAT(ser_stat[info->line].errors_cnt++);
  4073. + DEBUG_LOG(info->line, "#dERR: s d 0x%04X\n",
  4074. + ((rstat & SER_ERROR_MASK) << 8) | data);
  4075. +
  4076. + if (rstat & SER_PAR_ERR_MASK)
  4077. + add_char_and_flag(info, data, TTY_PARITY);
  4078. + else if (rstat & SER_OVERRUN_MASK)
  4079. + add_char_and_flag(info, data, TTY_OVERRUN);
  4080. + else if (rstat & SER_FRAMING_ERR_MASK)
  4081. + add_char_and_flag(info, data, TTY_FRAME);
  4082. + }
  4083. +
  4084. + START_FLUSH_FAST_TIMER(info, "receive_chars");
  4085. +
  4086. + /* Restart the receiving DMA */
  4087. + *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart);
  4088. +}
  4089. +
  4090. +static int start_recv_dma(struct e100_serial *info)
  4091. +{
  4092. + struct etrax_dma_descr *descr = info->rec_descr;
  4093. + struct etrax_recv_buffer *buffer;
  4094. + int i;
  4095. +
  4096. + /* Set up the receiving descriptors */
  4097. + for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) {
  4098. + if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
  4099. + panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
  4100. +
  4101. + descr[i].ctrl = d_int;
  4102. + descr[i].buf = virt_to_phys(buffer->buffer);
  4103. + descr[i].sw_len = SERIAL_DESCR_BUF_SIZE;
  4104. + descr[i].hw_len = 0;
  4105. + descr[i].status = 0;
  4106. + descr[i].next = virt_to_phys(&descr[i+1]);
  4107. + }
  4108. +
  4109. + /* Link the last descriptor to the first */
  4110. + descr[i-1].next = virt_to_phys(&descr[0]);
  4111. +
  4112. + /* Start with the first descriptor in the list */
  4113. + info->cur_rec_descr = 0;
  4114. +
  4115. + /* Start the DMA */
  4116. + *info->ifirstadr = virt_to_phys(&descr[info->cur_rec_descr]);
  4117. + *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, start);
  4118. +
  4119. + /* Input DMA should be running now */
  4120. + return 1;
  4121. +}
  4122. +
  4123. +static void
  4124. +start_receive(struct e100_serial *info)
  4125. +{
  4126. +#ifdef CONFIG_SVINTO_SIM
  4127. + /* No receive in the simulator. Will probably be when the rest of
  4128. + * the serial interface works, and this piece will just be removed.
  4129. + */
  4130. + return;
  4131. +#endif
  4132. + if (info->uses_dma_in) {
  4133. + /* reset the input dma channel to be sure it works */
  4134. +
  4135. + *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
  4136. + while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->icmdadr) ==
  4137. + IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
  4138. +
  4139. + start_recv_dma(info);
  4140. + }
  4141. +}
  4142. +
  4143. +
  4144. +/* the bits in the MASK2 register are laid out like this:
  4145. + DMAI_EOP DMAI_DESCR DMAO_EOP DMAO_DESCR
  4146. + where I is the input channel and O is the output channel for the port.
  4147. + info->irq is the bit number for the DMAO_DESCR so to check the others we
  4148. + shift info->irq to the left.
  4149. +*/
  4150. +
  4151. +/* dma output channel interrupt handler
  4152. + this interrupt is called from DMA2(ser2), DMA4(ser3), DMA6(ser0) or
  4153. + DMA8(ser1) when they have finished a descriptor with the intr flag set.
  4154. +*/
  4155. +
  4156. +static irqreturn_t
  4157. +tr_interrupt(int irq, void *dev_id)
  4158. +{
  4159. + struct e100_serial *info;
  4160. + unsigned long ireg;
  4161. + int i;
  4162. + int handled = 0;
  4163. +
  4164. +#ifdef CONFIG_SVINTO_SIM
  4165. + /* No receive in the simulator. Will probably be when the rest of
  4166. + * the serial interface works, and this piece will just be removed.
  4167. + */
  4168. + {
  4169. + const char *s = "What? tr_interrupt in simulator??\n";
  4170. + SIMCOUT(s,strlen(s));
  4171. + }
  4172. + return IRQ_HANDLED;
  4173. +#endif
  4174. +
  4175. + /* find out the line that caused this irq and get it from rs_table */
  4176. +
  4177. + ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */
  4178. +
  4179. + for (i = 0; i < NR_PORTS; i++) {
  4180. + info = rs_table + i;
  4181. + if (!info->enabled || !info->uses_dma_out)
  4182. + continue;
  4183. + /* check for dma_descr (don't need to check for dma_eop in output dma for serial */
  4184. + if (ireg & info->irq) {
  4185. + handled = 1;
  4186. + /* we can send a new dma bunch. make it so. */
  4187. + DINTR2(DEBUG_LOG(info->line, "tr_interrupt %i\n", i));
  4188. + /* Read jiffies_usec first,
  4189. + * we want this time to be as late as possible
  4190. + */
  4191. + PROCSTAT(ser_stat[info->line].tx_dma_ints++);
  4192. + info->last_tx_active_usec = GET_JIFFIES_USEC();
  4193. + info->last_tx_active = jiffies;
  4194. + transmit_chars_dma(info);
  4195. + }
  4196. +
  4197. + /* FIXME: here we should really check for a change in the
  4198. + status lines and if so call status_handle(info) */
  4199. + }
  4200. + return IRQ_RETVAL(handled);
  4201. +} /* tr_interrupt */
  4202. +
  4203. +/* dma input channel interrupt handler */
  4204. +
  4205. +static irqreturn_t
  4206. +rec_interrupt(int irq, void *dev_id)
  4207. +{
  4208. + struct e100_serial *info;
  4209. + unsigned long ireg;
  4210. + int i;
  4211. + int handled = 0;
  4212. +
  4213. +#ifdef CONFIG_SVINTO_SIM
  4214. + /* No receive in the simulator. Will probably be when the rest of
  4215. + * the serial interface works, and this piece will just be removed.
  4216. + */
  4217. + {
  4218. + const char *s = "What? rec_interrupt in simulator??\n";
  4219. + SIMCOUT(s,strlen(s));
  4220. + }
  4221. + return IRQ_HANDLED;
  4222. +#endif
  4223. +
  4224. + /* find out the line that caused this irq and get it from rs_table */
  4225. +
  4226. + ireg = *R_IRQ_MASK2_RD; /* get the active irq bits for the dma channels */
  4227. +
  4228. + for (i = 0; i < NR_PORTS; i++) {
  4229. + info = rs_table + i;
  4230. + if (!info->enabled || !info->uses_dma_in)
  4231. + continue;
  4232. + /* check for both dma_eop and dma_descr for the input dma channel */
  4233. + if (ireg & ((info->irq << 2) | (info->irq << 3))) {
  4234. + handled = 1;
  4235. + /* we have received something */
  4236. + receive_chars_dma(info);
  4237. + }
  4238. +
  4239. + /* FIXME: here we should really check for a change in the
  4240. + status lines and if so call status_handle(info) */
  4241. + }
  4242. + return IRQ_RETVAL(handled);
  4243. +} /* rec_interrupt */
  4244. +
  4245. +static int force_eop_if_needed(struct e100_serial *info)
  4246. +{
  4247. + /* We check data_avail bit to determine if data has
  4248. + * arrived since last time
  4249. + */
  4250. + unsigned char rstat = info->ioport[REG_STATUS];
  4251. +
  4252. + /* error or datavail? */
  4253. + if (rstat & SER_ERROR_MASK) {
  4254. + /* Some error has occurred. If there has been valid data, an
  4255. + * EOP interrupt will be made automatically. If no data, the
  4256. + * normal ser_interrupt should be enabled and handle it.
  4257. + * So do nothing!
  4258. + */
  4259. + DEBUG_LOG(info->line, "timeout err: rstat 0x%03X\n",
  4260. + rstat | (info->line << 8));
  4261. + return 0;
  4262. + }
  4263. +
  4264. + if (rstat & SER_DATA_AVAIL_MASK) {
  4265. + /* Ok data, no error, count it */
  4266. + TIMERD(DEBUG_LOG(info->line, "timeout: rstat 0x%03X\n",
  4267. + rstat | (info->line << 8)));
  4268. + /* Read data to clear status flags */
  4269. + (void)info->ioport[REG_DATA];
  4270. +
  4271. + info->forced_eop = 0;
  4272. + START_FLUSH_FAST_TIMER(info, "magic");
  4273. + return 0;
  4274. + }
  4275. +
  4276. + /* hit the timeout, force an EOP for the input
  4277. + * dma channel if we haven't already
  4278. + */
  4279. + if (!info->forced_eop) {
  4280. + info->forced_eop = 1;
  4281. + PROCSTAT(ser_stat[info->line].timeout_flush_cnt++);
  4282. + TIMERD(DEBUG_LOG(info->line, "timeout EOP %i\n", info->line));
  4283. + FORCE_EOP(info);
  4284. + }
  4285. +
  4286. + return 1;
  4287. +}
  4288. +
  4289. +static void flush_to_flip_buffer(struct e100_serial *info)
  4290. +{
  4291. + struct tty_struct *tty;
  4292. + struct etrax_recv_buffer *buffer;
  4293. + unsigned long flags;
  4294. +
  4295. + local_irq_save(flags);
  4296. + tty = info->port.tty;
  4297. +
  4298. + if (!tty) {
  4299. + local_irq_restore(flags);
  4300. + return;
  4301. + }
  4302. +
  4303. + while ((buffer = info->first_recv_buffer) != NULL) {
  4304. + unsigned int count = buffer->length;
  4305. +
  4306. + tty_insert_flip_string(tty, buffer->buffer, count);
  4307. + info->recv_cnt -= count;
  4308. +
  4309. + if (count == buffer->length) {
  4310. + info->first_recv_buffer = buffer->next;
  4311. + kfree(buffer);
  4312. + } else {
  4313. + buffer->length -= count;
  4314. + memmove(buffer->buffer, buffer->buffer + count, buffer->length);
  4315. + buffer->error = TTY_NORMAL;
  4316. + }
  4317. + }
  4318. +
  4319. + if (!info->first_recv_buffer)
  4320. + info->last_recv_buffer = NULL;
  4321. +
  4322. + local_irq_restore(flags);
  4323. +
  4324. + /* This includes a check for low-latency */
  4325. + tty_flip_buffer_push(tty);
  4326. +}
  4327. +
  4328. +static void check_flush_timeout(struct e100_serial *info)
  4329. +{
  4330. + /* Flip what we've got (if we can) */
  4331. + flush_to_flip_buffer(info);
  4332. +
  4333. + /* We might need to flip later, but not to fast
  4334. + * since the system is busy processing input... */
  4335. + if (info->first_recv_buffer)
  4336. + START_FLUSH_FAST_TIMER_TIME(info, "flip", 2000);
  4337. +
  4338. + /* Force eop last, since data might have come while we're processing
  4339. + * and if we started the slow timer above, we won't start a fast
  4340. + * below.
  4341. + */
  4342. + force_eop_if_needed(info);
  4343. +}
  4344. +
  4345. +#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
  4346. +static void flush_timeout_function(unsigned long data)
  4347. +{
  4348. + struct e100_serial *info = (struct e100_serial *)data;
  4349. +
  4350. + fast_timers[info->line].function = NULL;
  4351. + serial_fast_timer_expired++;
  4352. + TIMERD(DEBUG_LOG(info->line, "flush_timout %i ", info->line));
  4353. + TIMERD(DEBUG_LOG(info->line, "num expired: %i\n", serial_fast_timer_expired));
  4354. + check_flush_timeout(info);
  4355. +}
  4356. +
  4357. +#else
  4358. +
  4359. +/* dma fifo/buffer timeout handler
  4360. + forces an end-of-packet for the dma input channel if no chars
  4361. + have been received for CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS/100 s.
  4362. +*/
  4363. +
  4364. +static struct timer_list flush_timer;
  4365. +
  4366. +static void
  4367. +timed_flush_handler(unsigned long ptr)
  4368. +{
  4369. + struct e100_serial *info;
  4370. + int i;
  4371. +
  4372. +#ifdef CONFIG_SVINTO_SIM
  4373. + return;
  4374. +#endif
  4375. +
  4376. + for (i = 0; i < NR_PORTS; i++) {
  4377. + info = rs_table + i;
  4378. + if (info->uses_dma_in)
  4379. + check_flush_timeout(info);
  4380. + }
  4381. +
  4382. + /* restart flush timer */
  4383. + mod_timer(&flush_timer, jiffies + CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS);
  4384. +}
  4385. +#endif
  4386. +
  4387. +#ifdef SERIAL_HANDLE_EARLY_ERRORS
  4388. +
  4389. +/* If there is an error (ie break) when the DMA is running and
  4390. + * there are no bytes in the fifo the DMA is stopped and we get no
  4391. + * eop interrupt. Thus we have to monitor the first bytes on a DMA
  4392. + * transfer, and if it is without error we can turn the serial
  4393. + * interrupts off.
  4394. + */
  4395. +
  4396. +/*
  4397. +BREAK handling on ETRAX 100:
  4398. +ETRAX will generate interrupt although there is no stop bit between the
  4399. +characters.
  4400. +
  4401. +Depending on how long the break sequence is, the end of the breaksequence
  4402. +will look differently:
  4403. +| indicates start/end of a character.
  4404. +
  4405. +B= Break character (0x00) with framing error.
  4406. +E= Error byte with parity error received after B characters.
  4407. +F= "Faked" valid byte received immediately after B characters.
  4408. +V= Valid byte
  4409. +
  4410. +1.
  4411. + B BL ___________________________ V
  4412. +.._|__________|__________| |valid data |
  4413. +
  4414. +Multiple frame errors with data == 0x00 (B),
  4415. +the timing matches up "perfectly" so no extra ending char is detected.
  4416. +The RXD pin is 1 in the last interrupt, in that case
  4417. +we set info->errorcode = ERRCODE_INSERT_BREAK, but we can't really
  4418. +know if another byte will come and this really is case 2. below
  4419. +(e.g F=0xFF or 0xFE)
  4420. +If RXD pin is 0 we can expect another character (see 2. below).
  4421. +
  4422. +
  4423. +2.
  4424. +
  4425. + B B E or F__________________..__ V
  4426. +.._|__________|__________|______ | |valid data
  4427. + "valid" or
  4428. + parity error
  4429. +
  4430. +Multiple frame errors with data == 0x00 (B),
  4431. +but the part of the break trigs is interpreted as a start bit (and possibly
  4432. +some 0 bits followed by a number of 1 bits and a stop bit).
  4433. +Depending on parity settings etc. this last character can be either
  4434. +a fake "valid" char (F) or have a parity error (E).
  4435. +
  4436. +If the character is valid it will be put in the buffer,
  4437. +we set info->errorcode = ERRCODE_SET_BREAK so the receive interrupt
  4438. +will set the flags so the tty will handle it,
  4439. +if it's an error byte it will not be put in the buffer
  4440. +and we set info->errorcode = ERRCODE_INSERT_BREAK.
  4441. +
  4442. +To distinguish a V byte in 1. from an F byte in 2. we keep a timestamp
  4443. +of the last faulty char (B) and compares it with the current time:
  4444. +If the time elapsed time is less then 2*char_time_usec we will assume
  4445. +it's a faked F char and not a Valid char and set
  4446. +info->errorcode = ERRCODE_SET_BREAK.
  4447. +
  4448. +Flaws in the above solution:
  4449. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  4450. +We use the timer to distinguish a F character from a V character,
  4451. +if a V character is to close after the break we might make the wrong decision.
  4452. +
  4453. +TODO: The break will be delayed until an F or V character is received.
  4454. +
  4455. +*/
  4456. +
  4457. +static
  4458. +struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info)
  4459. +{
  4460. + unsigned long data_read;
  4461. + struct tty_struct *tty = info->port.tty;
  4462. +
  4463. + if (!tty) {
  4464. + printk("!NO TTY!\n");
  4465. + return info;
  4466. + }
  4467. +
  4468. + /* Read data and status at the same time */
  4469. + data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]);
  4470. +more_data:
  4471. + if (data_read & IO_MASK(R_SERIAL0_READ, xoff_detect) ) {
  4472. + DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0));
  4473. + }
  4474. + DINTR2(DEBUG_LOG(info->line, "ser_rx %c\n", IO_EXTRACT(R_SERIAL0_READ, data_in, data_read)));
  4475. +
  4476. + if (data_read & ( IO_MASK(R_SERIAL0_READ, framing_err) |
  4477. + IO_MASK(R_SERIAL0_READ, par_err) |
  4478. + IO_MASK(R_SERIAL0_READ, overrun) )) {
  4479. + /* An error */
  4480. + info->last_rx_active_usec = GET_JIFFIES_USEC();
  4481. + info->last_rx_active = jiffies;
  4482. + DINTR1(DEBUG_LOG(info->line, "ser_rx err stat_data %04X\n", data_read));
  4483. + DLOG_INT_TRIG(
  4484. + if (!log_int_trig1_pos) {
  4485. + log_int_trig1_pos = log_int_pos;
  4486. + log_int(rdpc(), 0, 0);
  4487. + }
  4488. + );
  4489. +
  4490. +
  4491. + if ( ((data_read & IO_MASK(R_SERIAL0_READ, data_in)) == 0) &&
  4492. + (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) ) {
  4493. + /* Most likely a break, but we get interrupts over and
  4494. + * over again.
  4495. + */
  4496. +
  4497. + if (!info->break_detected_cnt) {
  4498. + DEBUG_LOG(info->line, "#BRK start\n", 0);
  4499. + }
  4500. + if (data_read & IO_MASK(R_SERIAL0_READ, rxd)) {
  4501. + /* The RX pin is high now, so the break
  4502. + * must be over, but....
  4503. + * we can't really know if we will get another
  4504. + * last byte ending the break or not.
  4505. + * And we don't know if the byte (if any) will
  4506. + * have an error or look valid.
  4507. + */
  4508. + DEBUG_LOG(info->line, "# BL BRK\n", 0);
  4509. + info->errorcode = ERRCODE_INSERT_BREAK;
  4510. + }
  4511. + info->break_detected_cnt++;
  4512. + } else {
  4513. + /* The error does not look like a break, but could be
  4514. + * the end of one
  4515. + */
  4516. + if (info->break_detected_cnt) {
  4517. + DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt);
  4518. + info->errorcode = ERRCODE_INSERT_BREAK;
  4519. + } else {
  4520. + unsigned char data = IO_EXTRACT(R_SERIAL0_READ,
  4521. + data_in, data_read);
  4522. + char flag = TTY_NORMAL;
  4523. + if (info->errorcode == ERRCODE_INSERT_BREAK) {
  4524. + struct tty_struct *tty = info->port.tty;
  4525. + tty_insert_flip_char(tty, 0, flag);
  4526. + info->icount.rx++;
  4527. + }
  4528. +
  4529. + if (data_read & IO_MASK(R_SERIAL0_READ, par_err)) {
  4530. + info->icount.parity++;
  4531. + flag = TTY_PARITY;
  4532. + } else if (data_read & IO_MASK(R_SERIAL0_READ, overrun)) {
  4533. + info->icount.overrun++;
  4534. + flag = TTY_OVERRUN;
  4535. + } else if (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) {
  4536. + info->icount.frame++;
  4537. + flag = TTY_FRAME;
  4538. + }
  4539. + tty_insert_flip_char(tty, data, flag);
  4540. + info->errorcode = 0;
  4541. + }
  4542. + info->break_detected_cnt = 0;
  4543. + }
  4544. + } else if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) {
  4545. + /* No error */
  4546. + DLOG_INT_TRIG(
  4547. + if (!log_int_trig1_pos) {
  4548. + if (log_int_pos >= log_int_size) {
  4549. + log_int_pos = 0;
  4550. + }
  4551. + log_int_trig0_pos = log_int_pos;
  4552. + log_int(rdpc(), 0, 0);
  4553. + }
  4554. + );
  4555. + tty_insert_flip_char(tty,
  4556. + IO_EXTRACT(R_SERIAL0_READ, data_in, data_read),
  4557. + TTY_NORMAL);
  4558. + } else {
  4559. + DEBUG_LOG(info->line, "ser_rx int but no data_avail %08lX\n", data_read);
  4560. + }
  4561. +
  4562. +
  4563. + info->icount.rx++;
  4564. + data_read = *((unsigned long *)&info->ioport[REG_DATA_STATUS32]);
  4565. + if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) {
  4566. + DEBUG_LOG(info->line, "ser_rx %c in loop\n", IO_EXTRACT(R_SERIAL0_READ, data_in, data_read));
  4567. + goto more_data;
  4568. + }
  4569. +
  4570. + tty_flip_buffer_push(info->port.tty);
  4571. + return info;
  4572. +}
  4573. +
  4574. +static struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info)
  4575. +{
  4576. + unsigned char rstat;
  4577. +
  4578. +#ifdef SERIAL_DEBUG_INTR
  4579. + printk("Interrupt from serport %d\n", i);
  4580. +#endif
  4581. +/* DEBUG_LOG(info->line, "ser_interrupt stat %03X\n", rstat | (i << 8)); */
  4582. + if (!info->uses_dma_in) {
  4583. + return handle_ser_rx_interrupt_no_dma(info);
  4584. + }
  4585. + /* DMA is used */
  4586. + rstat = info->ioport[REG_STATUS];
  4587. + if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect) ) {
  4588. + DFLOW(DEBUG_LOG(info->line, "XOFF detect\n", 0));
  4589. + }
  4590. +
  4591. + if (rstat & SER_ERROR_MASK) {
  4592. + unsigned char data;
  4593. +
  4594. + info->last_rx_active_usec = GET_JIFFIES_USEC();
  4595. + info->last_rx_active = jiffies;
  4596. + /* If we got an error, we must reset it by reading the
  4597. + * data_in field
  4598. + */
  4599. + data = info->ioport[REG_DATA];
  4600. + DINTR1(DEBUG_LOG(info->line, "ser_rx! %c\n", data));
  4601. + DINTR1(DEBUG_LOG(info->line, "ser_rx err stat %02X\n", rstat));
  4602. + if (!data && (rstat & SER_FRAMING_ERR_MASK)) {
  4603. + /* Most likely a break, but we get interrupts over and
  4604. + * over again.
  4605. + */
  4606. +
  4607. + if (!info->break_detected_cnt) {
  4608. + DEBUG_LOG(info->line, "#BRK start\n", 0);
  4609. + }
  4610. + if (rstat & SER_RXD_MASK) {
  4611. + /* The RX pin is high now, so the break
  4612. + * must be over, but....
  4613. + * we can't really know if we will get another
  4614. + * last byte ending the break or not.
  4615. + * And we don't know if the byte (if any) will
  4616. + * have an error or look valid.
  4617. + */
  4618. + DEBUG_LOG(info->line, "# BL BRK\n", 0);
  4619. + info->errorcode = ERRCODE_INSERT_BREAK;
  4620. + }
  4621. + info->break_detected_cnt++;
  4622. + } else {
  4623. + /* The error does not look like a break, but could be
  4624. + * the end of one
  4625. + */
  4626. + if (info->break_detected_cnt) {
  4627. + DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt);
  4628. + info->errorcode = ERRCODE_INSERT_BREAK;
  4629. + } else {
  4630. + if (info->errorcode == ERRCODE_INSERT_BREAK) {
  4631. + info->icount.brk++;
  4632. + add_char_and_flag(info, '\0', TTY_BREAK);
  4633. + }
  4634. +
  4635. + if (rstat & SER_PAR_ERR_MASK) {
  4636. + info->icount.parity++;
  4637. + add_char_and_flag(info, data, TTY_PARITY);
  4638. + } else if (rstat & SER_OVERRUN_MASK) {
  4639. + info->icount.overrun++;
  4640. + add_char_and_flag(info, data, TTY_OVERRUN);
  4641. + } else if (rstat & SER_FRAMING_ERR_MASK) {
  4642. + info->icount.frame++;
  4643. + add_char_and_flag(info, data, TTY_FRAME);
  4644. + }
  4645. +
  4646. + info->errorcode = 0;
  4647. + }
  4648. + info->break_detected_cnt = 0;
  4649. + DEBUG_LOG(info->line, "#iERR s d %04X\n",
  4650. + ((rstat & SER_ERROR_MASK) << 8) | data);
  4651. + }
  4652. + PROCSTAT(ser_stat[info->line].early_errors_cnt++);
  4653. + } else { /* It was a valid byte, now let the DMA do the rest */
  4654. + unsigned long curr_time_u = GET_JIFFIES_USEC();
  4655. + unsigned long curr_time = jiffies;
  4656. +
  4657. + if (info->break_detected_cnt) {
  4658. + /* Detect if this character is a new valid char or the
  4659. + * last char in a break sequence: If LSBits are 0 and
  4660. + * MSBits are high AND the time is close to the
  4661. + * previous interrupt we should discard it.
  4662. + */
  4663. + long elapsed_usec =
  4664. + (curr_time - info->last_rx_active) * (1000000/HZ) +
  4665. + curr_time_u - info->last_rx_active_usec;
  4666. + if (elapsed_usec < 2*info->char_time_usec) {
  4667. + DEBUG_LOG(info->line, "FBRK %i\n", info->line);
  4668. + /* Report as BREAK (error) and let
  4669. + * receive_chars_dma() handle it
  4670. + */
  4671. + info->errorcode = ERRCODE_SET_BREAK;
  4672. + } else {
  4673. + DEBUG_LOG(info->line, "Not end of BRK (V)%i\n", info->line);
  4674. + }
  4675. + DEBUG_LOG(info->line, "num brk %i\n", info->break_detected_cnt);
  4676. + }
  4677. +
  4678. +#ifdef SERIAL_DEBUG_INTR
  4679. + printk("** OK, disabling ser_interrupts\n");
  4680. +#endif
  4681. + e100_disable_serial_data_irq(info);
  4682. + DINTR2(DEBUG_LOG(info->line, "ser_rx OK %d\n", info->line));
  4683. + info->break_detected_cnt = 0;
  4684. +
  4685. + PROCSTAT(ser_stat[info->line].ser_ints_ok_cnt++);
  4686. + }
  4687. + /* Restarting the DMA never hurts */
  4688. + *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart);
  4689. + START_FLUSH_FAST_TIMER(info, "ser_int");
  4690. + return info;
  4691. +} /* handle_ser_rx_interrupt */
  4692. +
  4693. +static void handle_ser_tx_interrupt(struct e100_serial *info)
  4694. +{
  4695. + unsigned long flags;
  4696. +
  4697. + if (info->x_char) {
  4698. + unsigned char rstat;
  4699. + DFLOW(DEBUG_LOG(info->line, "tx_int: xchar 0x%02X\n", info->x_char));
  4700. + local_irq_save(flags);
  4701. + rstat = info->ioport[REG_STATUS];
  4702. + DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
  4703. +
  4704. + info->ioport[REG_TR_DATA] = info->x_char;
  4705. + info->icount.tx++;
  4706. + info->x_char = 0;
  4707. + /* We must enable since it is disabled in ser_interrupt */
  4708. + e100_enable_serial_tx_ready_irq(info);
  4709. + local_irq_restore(flags);
  4710. + return;
  4711. + }
  4712. + if (info->uses_dma_out) {
  4713. + unsigned char rstat;
  4714. + int i;
  4715. + /* We only use normal tx interrupt when sending x_char */
  4716. + DFLOW(DEBUG_LOG(info->line, "tx_int: xchar sent\n", 0));
  4717. + local_irq_save(flags);
  4718. + rstat = info->ioport[REG_STATUS];
  4719. + DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
  4720. + e100_disable_serial_tx_ready_irq(info);
  4721. + if (info->port.tty->stopped)
  4722. + rs_stop(info->port.tty);
  4723. + /* Enable the DMA channel and tell it to continue */
  4724. + e100_enable_txdma_channel(info);
  4725. + /* Wait 12 cycles before doing the DMA command */
  4726. + for(i = 6; i > 0; i--)
  4727. + nop();
  4728. +
  4729. + *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, continue);
  4730. + local_irq_restore(flags);
  4731. + return;
  4732. + }
  4733. + /* Normal char-by-char interrupt */
  4734. + if (info->xmit.head == info->xmit.tail
  4735. + || info->port.tty->stopped
  4736. + || info->port.tty->hw_stopped) {
  4737. + DFLOW(DEBUG_LOG(info->line, "tx_int: stopped %i\n",
  4738. + info->port.tty->stopped));
  4739. + e100_disable_serial_tx_ready_irq(info);
  4740. + info->tr_running = 0;
  4741. + return;
  4742. + }
  4743. + DINTR2(DEBUG_LOG(info->line, "tx_int %c\n", info->xmit.buf[info->xmit.tail]));
  4744. + /* Send a byte, rs485 timing is critical so turn of ints */
  4745. + local_irq_save(flags);
  4746. + info->ioport[REG_TR_DATA] = info->xmit.buf[info->xmit.tail];
  4747. + info->xmit.tail = (info->xmit.tail + 1) & (SERIAL_XMIT_SIZE-1);
  4748. + info->icount.tx++;
  4749. + if (info->xmit.head == info->xmit.tail) {
  4750. +#if defined(CONFIG_ETRAX_RS485) && defined(CONFIG_ETRAX_FAST_TIMER)
  4751. + if (info->rs485.flags & SER_RS485_ENABLED) {
  4752. + /* Set a short timer to toggle RTS */
  4753. + start_one_shot_timer(&fast_timers_rs485[info->line],
  4754. + rs485_toggle_rts_timer_function,
  4755. + (unsigned long)info,
  4756. + info->char_time_usec*2,
  4757. + "RS-485");
  4758. + }
  4759. +#endif /* RS485 */
  4760. + info->last_tx_active_usec = GET_JIFFIES_USEC();
  4761. + info->last_tx_active = jiffies;
  4762. + e100_disable_serial_tx_ready_irq(info);
  4763. + info->tr_running = 0;
  4764. + DFLOW(DEBUG_LOG(info->line, "tx_int: stop2\n", 0));
  4765. + } else {
  4766. + /* We must enable since it is disabled in ser_interrupt */
  4767. + e100_enable_serial_tx_ready_irq(info);
  4768. + }
  4769. + local_irq_restore(flags);
  4770. +
  4771. + if (CIRC_CNT(info->xmit.head,
  4772. + info->xmit.tail,
  4773. + SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
  4774. + rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
  4775. +
  4776. +} /* handle_ser_tx_interrupt */
  4777. +
  4778. +/* result of time measurements:
  4779. + * RX duration 54-60 us when doing something, otherwise 6-9 us
  4780. + * ser_int duration: just sending: 8-15 us normally, up to 73 us
  4781. + */
  4782. +static irqreturn_t
  4783. +ser_interrupt(int irq, void *dev_id)
  4784. +{
  4785. + static volatile int tx_started = 0;
  4786. + struct e100_serial *info;
  4787. + int i;
  4788. + unsigned long flags;
  4789. + unsigned long irq_mask1_rd;
  4790. + unsigned long data_mask = (1 << (8+2*0)); /* ser0 data_avail */
  4791. + int handled = 0;
  4792. + static volatile unsigned long reentered_ready_mask = 0;
  4793. +
  4794. + local_irq_save(flags);
  4795. + irq_mask1_rd = *R_IRQ_MASK1_RD;
  4796. + /* First handle all rx interrupts with ints disabled */
  4797. + info = rs_table;
  4798. + irq_mask1_rd &= e100_ser_int_mask;
  4799. + for (i = 0; i < NR_PORTS; i++) {
  4800. + /* Which line caused the data irq? */
  4801. + if (irq_mask1_rd & data_mask) {
  4802. + handled = 1;
  4803. + handle_ser_rx_interrupt(info);
  4804. + }
  4805. + info += 1;
  4806. + data_mask <<= 2;
  4807. + }
  4808. + /* Handle tx interrupts with interrupts enabled so we
  4809. + * can take care of new data interrupts while transmitting
  4810. + * We protect the tx part with the tx_started flag.
  4811. + * We disable the tr_ready interrupts we are about to handle and
  4812. + * unblock the serial interrupt so new serial interrupts may come.
  4813. + *
  4814. + * If we get a new interrupt:
  4815. + * - it migth be due to synchronous serial ports.
  4816. + * - serial irq will be blocked by general irq handler.
  4817. + * - async data will be handled above (sync will be ignored).
  4818. + * - tx_started flag will prevent us from trying to send again and
  4819. + * we will exit fast - no need to unblock serial irq.
  4820. + * - Next (sync) serial interrupt handler will be runned with
  4821. + * disabled interrupt due to restore_flags() at end of function,
  4822. + * so sync handler will not be preempted or reentered.
  4823. + */
  4824. + if (!tx_started) {
  4825. + unsigned long ready_mask;
  4826. + unsigned long
  4827. + tx_started = 1;
  4828. + /* Only the tr_ready interrupts left */
  4829. + irq_mask1_rd &= (IO_MASK(R_IRQ_MASK1_RD, ser0_ready) |
  4830. + IO_MASK(R_IRQ_MASK1_RD, ser1_ready) |
  4831. + IO_MASK(R_IRQ_MASK1_RD, ser2_ready) |
  4832. + IO_MASK(R_IRQ_MASK1_RD, ser3_ready));
  4833. + while (irq_mask1_rd) {
  4834. + /* Disable those we are about to handle */
  4835. + *R_IRQ_MASK1_CLR = irq_mask1_rd;
  4836. + /* Unblock the serial interrupt */
  4837. + *R_VECT_MASK_SET = IO_STATE(R_VECT_MASK_SET, serial, set);
  4838. +
  4839. + local_irq_enable();
  4840. + ready_mask = (1 << (8+1+2*0)); /* ser0 tr_ready */
  4841. + info = rs_table;
  4842. + for (i = 0; i < NR_PORTS; i++) {
  4843. + /* Which line caused the ready irq? */
  4844. + if (irq_mask1_rd & ready_mask) {
  4845. + handled = 1;
  4846. + handle_ser_tx_interrupt(info);
  4847. + }
  4848. + info += 1;
  4849. + ready_mask <<= 2;
  4850. + }
  4851. + /* handle_ser_tx_interrupt enables tr_ready interrupts */
  4852. + local_irq_disable();
  4853. + /* Handle reentered TX interrupt */
  4854. + irq_mask1_rd = reentered_ready_mask;
  4855. + }
  4856. + local_irq_disable();
  4857. + tx_started = 0;
  4858. + } else {
  4859. + unsigned long ready_mask;
  4860. + ready_mask = irq_mask1_rd & (IO_MASK(R_IRQ_MASK1_RD, ser0_ready) |
  4861. + IO_MASK(R_IRQ_MASK1_RD, ser1_ready) |
  4862. + IO_MASK(R_IRQ_MASK1_RD, ser2_ready) |
  4863. + IO_MASK(R_IRQ_MASK1_RD, ser3_ready));
  4864. + if (ready_mask) {
  4865. + reentered_ready_mask |= ready_mask;
  4866. + /* Disable those we are about to handle */
  4867. + *R_IRQ_MASK1_CLR = ready_mask;
  4868. + DFLOW(DEBUG_LOG(SERIAL_DEBUG_LINE, "ser_int reentered with TX %X\n", ready_mask));
  4869. + }
  4870. + }
  4871. +
  4872. + local_irq_restore(flags);
  4873. + return IRQ_RETVAL(handled);
  4874. +} /* ser_interrupt */
  4875. +#endif
  4876. +
  4877. +/*
  4878. + * -------------------------------------------------------------------
  4879. + * Here ends the serial interrupt routines.
  4880. + * -------------------------------------------------------------------
  4881. + */
  4882. +
  4883. +/*
  4884. + * This routine is used to handle the "bottom half" processing for the
  4885. + * serial driver, known also the "software interrupt" processing.
  4886. + * This processing is done at the kernel interrupt level, after the
  4887. + * rs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON. This
  4888. + * is where time-consuming activities which can not be done in the
  4889. + * interrupt driver proper are done; the interrupt driver schedules
  4890. + * them using rs_sched_event(), and they get done here.
  4891. + */
  4892. +static void
  4893. +do_softint(struct work_struct *work)
  4894. +{
  4895. + struct e100_serial *info;
  4896. + struct tty_struct *tty;
  4897. +
  4898. + info = container_of(work, struct e100_serial, work);
  4899. +
  4900. + tty = info->port.tty;
  4901. + if (!tty)
  4902. + return;
  4903. +
  4904. + if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event))
  4905. + tty_wakeup(tty);
  4906. +}
  4907. +
  4908. +static int
  4909. +startup(struct e100_serial * info)
  4910. +{
  4911. + unsigned long flags;
  4912. + unsigned long xmit_page;
  4913. + int i;
  4914. +
  4915. + xmit_page = get_zeroed_page(GFP_KERNEL);
  4916. + if (!xmit_page)
  4917. + return -ENOMEM;
  4918. +
  4919. + local_irq_save(flags);
  4920. +
  4921. + /* if it was already initialized, skip this */
  4922. +
  4923. + if (info->flags & ASYNC_INITIALIZED) {
  4924. + local_irq_restore(flags);
  4925. + free_page(xmit_page);
  4926. + return 0;
  4927. + }
  4928. +
  4929. + if (info->xmit.buf)
  4930. + free_page(xmit_page);
  4931. + else
  4932. + info->xmit.buf = (unsigned char *) xmit_page;
  4933. +
  4934. +#ifdef SERIAL_DEBUG_OPEN
  4935. + printk("starting up ttyS%d (xmit_buf 0x%p)...\n", info->line, info->xmit.buf);
  4936. +#endif
  4937. +
  4938. +#ifdef CONFIG_SVINTO_SIM
  4939. + /* Bits and pieces collected from below. Better to have them
  4940. + in one ifdef:ed clause than to mix in a lot of ifdefs,
  4941. + right? */
  4942. + if (info->port.tty)
  4943. + clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
  4944. +
  4945. + info->xmit.head = info->xmit.tail = 0;
  4946. + info->first_recv_buffer = info->last_recv_buffer = NULL;
  4947. + info->recv_cnt = info->max_recv_cnt = 0;
  4948. +
  4949. + for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
  4950. + info->rec_descr[i].buf = NULL;
  4951. +
  4952. + /* No real action in the simulator, but may set info important
  4953. + to ioctl. */
  4954. + change_speed(info);
  4955. +#else
  4956. +
  4957. + /*
  4958. + * Clear the FIFO buffers and disable them
  4959. + * (they will be reenabled in change_speed())
  4960. + */
  4961. +
  4962. + /*
  4963. + * Reset the DMA channels and make sure their interrupts are cleared
  4964. + */
  4965. +
  4966. + if (info->dma_in_enabled) {
  4967. + info->uses_dma_in = 1;
  4968. + e100_enable_rxdma_channel(info);
  4969. +
  4970. + *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
  4971. +
  4972. + /* Wait until reset cycle is complete */
  4973. + while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->icmdadr) ==
  4974. + IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
  4975. +
  4976. + /* Make sure the irqs are cleared */
  4977. + *info->iclrintradr =
  4978. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
  4979. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
  4980. + } else {
  4981. + e100_disable_rxdma_channel(info);
  4982. + }
  4983. +
  4984. + if (info->dma_out_enabled) {
  4985. + info->uses_dma_out = 1;
  4986. + e100_enable_txdma_channel(info);
  4987. + *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
  4988. +
  4989. + while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->ocmdadr) ==
  4990. + IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
  4991. +
  4992. + /* Make sure the irqs are cleared */
  4993. + *info->oclrintradr =
  4994. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_descr, do) |
  4995. + IO_STATE(R_DMA_CH6_CLR_INTR, clr_eop, do);
  4996. + } else {
  4997. + e100_disable_txdma_channel(info);
  4998. + }
  4999. +
  5000. + if (info->port.tty)
  5001. + clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
  5002. +
  5003. + info->xmit.head = info->xmit.tail = 0;
  5004. + info->first_recv_buffer = info->last_recv_buffer = NULL;
  5005. + info->recv_cnt = info->max_recv_cnt = 0;
  5006. +
  5007. + for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
  5008. + info->rec_descr[i].buf = 0;
  5009. +
  5010. + /*
  5011. + * and set the speed and other flags of the serial port
  5012. + * this will start the rx/tx as well
  5013. + */
  5014. +#ifdef SERIAL_HANDLE_EARLY_ERRORS
  5015. + e100_enable_serial_data_irq(info);
  5016. +#endif
  5017. + change_speed(info);
  5018. +
  5019. + /* dummy read to reset any serial errors */
  5020. +
  5021. + (void)info->ioport[REG_DATA];
  5022. +
  5023. + /* enable the interrupts */
  5024. + if (info->uses_dma_out)
  5025. + e100_enable_txdma_irq(info);
  5026. +
  5027. + e100_enable_rx_irq(info);
  5028. +
  5029. + info->tr_running = 0; /* to be sure we don't lock up the transmitter */
  5030. +
  5031. + /* setup the dma input descriptor and start dma */
  5032. +
  5033. + start_receive(info);
  5034. +
  5035. + /* for safety, make sure the descriptors last result is 0 bytes written */
  5036. +
  5037. + info->tr_descr.sw_len = 0;
  5038. + info->tr_descr.hw_len = 0;
  5039. + info->tr_descr.status = 0;
  5040. +
  5041. + /* enable RTS/DTR last */
  5042. +
  5043. + e100_rts(info, 1);
  5044. + e100_dtr(info, 1);
  5045. +
  5046. +#endif /* CONFIG_SVINTO_SIM */
  5047. +
  5048. + info->flags |= ASYNC_INITIALIZED;
  5049. +
  5050. + local_irq_restore(flags);
  5051. + return 0;
  5052. +}
  5053. +
  5054. +/*
  5055. + * This routine will shutdown a serial port; interrupts are disabled, and
  5056. + * DTR is dropped if the hangup on close termio flag is on.
  5057. + */
  5058. +static void
  5059. +shutdown(struct e100_serial * info)
  5060. +{
  5061. + unsigned long flags;
  5062. + struct etrax_dma_descr *descr = info->rec_descr;
  5063. + struct etrax_recv_buffer *buffer;
  5064. + int i;
  5065. +
  5066. +#ifndef CONFIG_SVINTO_SIM
  5067. + /* shut down the transmitter and receiver */
  5068. + DFLOW(DEBUG_LOG(info->line, "shutdown %i\n", info->line));
  5069. + e100_disable_rx(info);
  5070. + info->ioport[REG_TR_CTRL] = (info->tx_ctrl &= ~0x40);
  5071. +
  5072. + /* disable interrupts, reset dma channels */
  5073. + if (info->uses_dma_in) {
  5074. + e100_disable_rxdma_irq(info);
  5075. + *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
  5076. + info->uses_dma_in = 0;
  5077. + } else {
  5078. + e100_disable_serial_data_irq(info);
  5079. + }
  5080. +
  5081. + if (info->uses_dma_out) {
  5082. + e100_disable_txdma_irq(info);
  5083. + info->tr_running = 0;
  5084. + *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
  5085. + info->uses_dma_out = 0;
  5086. + } else {
  5087. + e100_disable_serial_tx_ready_irq(info);
  5088. + info->tr_running = 0;
  5089. + }
  5090. +
  5091. +#endif /* CONFIG_SVINTO_SIM */
  5092. +
  5093. + if (!(info->flags & ASYNC_INITIALIZED))
  5094. + return;
  5095. +
  5096. +#ifdef SERIAL_DEBUG_OPEN
  5097. + printk("Shutting down serial port %d (irq %d)....\n", info->line,
  5098. + info->irq);
  5099. +#endif
  5100. +
  5101. + local_irq_save(flags);
  5102. +
  5103. + if (info->xmit.buf) {
  5104. + free_page((unsigned long)info->xmit.buf);
  5105. + info->xmit.buf = NULL;
  5106. + }
  5107. +
  5108. + for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++)
  5109. + if (descr[i].buf) {
  5110. + buffer = phys_to_virt(descr[i].buf) - sizeof *buffer;
  5111. + kfree(buffer);
  5112. + descr[i].buf = 0;
  5113. + }
  5114. +
  5115. + if (!info->port.tty || (info->port.tty->termios->c_cflag & HUPCL)) {
  5116. + /* hang up DTR and RTS if HUPCL is enabled */
  5117. + e100_dtr(info, 0);
  5118. + e100_rts(info, 0); /* could check CRTSCTS before doing this */
  5119. + }
  5120. +
  5121. + if (info->port.tty)
  5122. + set_bit(TTY_IO_ERROR, &info->port.tty->flags);
  5123. +
  5124. + info->flags &= ~ASYNC_INITIALIZED;
  5125. + local_irq_restore(flags);
  5126. +}
  5127. +
  5128. +
  5129. +/* change baud rate and other assorted parameters */
  5130. +
  5131. +static void
  5132. +change_speed(struct e100_serial *info)
  5133. +{
  5134. + unsigned int cflag;
  5135. + unsigned long xoff;
  5136. + unsigned long flags;
  5137. + /* first some safety checks */
  5138. +
  5139. + if (!info->port.tty || !info->port.tty->termios)
  5140. + return;
  5141. + if (!info->ioport)
  5142. + return;
  5143. +
  5144. + cflag = info->port.tty->termios->c_cflag;
  5145. +
  5146. + /* possibly, the tx/rx should be disabled first to do this safely */
  5147. +
  5148. + /* change baud-rate and write it to the hardware */
  5149. + if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) {
  5150. + /* Special baudrate */
  5151. + u32 mask = 0xFF << (info->line*8); /* Each port has 8 bits */
  5152. + unsigned long alt_source =
  5153. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, normal) |
  5154. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, normal);
  5155. + /* R_ALT_SER_BAUDRATE selects the source */
  5156. + DBAUD(printk("Custom baudrate: baud_base/divisor %lu/%i\n",
  5157. + (unsigned long)info->baud_base, info->custom_divisor));
  5158. + if (info->baud_base == SERIAL_PRESCALE_BASE) {
  5159. + /* 0, 2-65535 (0=65536) */
  5160. + u16 divisor = info->custom_divisor;
  5161. + /* R_SERIAL_PRESCALE (upper 16 bits of R_CLOCK_PRESCALE) */
  5162. + /* baudrate is 3.125MHz/custom_divisor */
  5163. + alt_source =
  5164. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, prescale) |
  5165. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, prescale);
  5166. + alt_source = 0x11;
  5167. + DBAUD(printk("Writing SERIAL_PRESCALE: divisor %i\n", divisor));
  5168. + *R_SERIAL_PRESCALE = divisor;
  5169. + info->baud = SERIAL_PRESCALE_BASE/divisor;
  5170. + }
  5171. +#ifdef CONFIG_ETRAX_EXTERN_PB6CLK_ENABLED
  5172. + else if ((info->baud_base==CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8 &&
  5173. + info->custom_divisor == 1) ||
  5174. + (info->baud_base==CONFIG_ETRAX_EXTERN_PB6CLK_FREQ &&
  5175. + info->custom_divisor == 8)) {
  5176. + /* ext_clk selected */
  5177. + alt_source =
  5178. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, extern) |
  5179. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, extern);
  5180. + DBAUD(printk("using external baudrate: %lu\n", CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8));
  5181. + info->baud = CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8;
  5182. + }
  5183. +#endif
  5184. + else
  5185. + {
  5186. + /* Bad baudbase, we don't support using timer0
  5187. + * for baudrate.
  5188. + */
  5189. + printk(KERN_WARNING "Bad baud_base/custom_divisor: %lu/%i\n",
  5190. + (unsigned long)info->baud_base, info->custom_divisor);
  5191. + }
  5192. + r_alt_ser_baudrate_shadow &= ~mask;
  5193. + r_alt_ser_baudrate_shadow |= (alt_source << (info->line*8));
  5194. + *R_ALT_SER_BAUDRATE = r_alt_ser_baudrate_shadow;
  5195. + } else {
  5196. + /* Normal baudrate */
  5197. + /* Make sure we use normal baudrate */
  5198. + u32 mask = 0xFF << (info->line*8); /* Each port has 8 bits */
  5199. + unsigned long alt_source =
  5200. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, normal) |
  5201. + IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, normal);
  5202. + r_alt_ser_baudrate_shadow &= ~mask;
  5203. + r_alt_ser_baudrate_shadow |= (alt_source << (info->line*8));
  5204. +#ifndef CONFIG_SVINTO_SIM
  5205. + *R_ALT_SER_BAUDRATE = r_alt_ser_baudrate_shadow;
  5206. +#endif /* CONFIG_SVINTO_SIM */
  5207. +
  5208. + info->baud = cflag_to_baud(cflag);
  5209. +#ifndef CONFIG_SVINTO_SIM
  5210. + info->ioport[REG_BAUD] = cflag_to_etrax_baud(cflag);
  5211. +#endif /* CONFIG_SVINTO_SIM */
  5212. + }
  5213. +
  5214. +#ifndef CONFIG_SVINTO_SIM
  5215. + /* start with default settings and then fill in changes */
  5216. + local_irq_save(flags);
  5217. + /* 8 bit, no/even parity */
  5218. + info->rx_ctrl &= ~(IO_MASK(R_SERIAL0_REC_CTRL, rec_bitnr) |
  5219. + IO_MASK(R_SERIAL0_REC_CTRL, rec_par_en) |
  5220. + IO_MASK(R_SERIAL0_REC_CTRL, rec_par));
  5221. +
  5222. + /* 8 bit, no/even parity, 1 stop bit, no cts */
  5223. + info->tx_ctrl &= ~(IO_MASK(R_SERIAL0_TR_CTRL, tr_bitnr) |
  5224. + IO_MASK(R_SERIAL0_TR_CTRL, tr_par_en) |
  5225. + IO_MASK(R_SERIAL0_TR_CTRL, tr_par) |
  5226. + IO_MASK(R_SERIAL0_TR_CTRL, stop_bits) |
  5227. + IO_MASK(R_SERIAL0_TR_CTRL, auto_cts));
  5228. +
  5229. + if ((cflag & CSIZE) == CS7) {
  5230. + /* set 7 bit mode */
  5231. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_bitnr, tr_7bit);
  5232. + info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_bitnr, rec_7bit);
  5233. + }
  5234. +
  5235. + if (cflag & CSTOPB) {
  5236. + /* set 2 stop bit mode */
  5237. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, stop_bits, two_bits);
  5238. + }
  5239. +
  5240. + if (cflag & PARENB) {
  5241. + /* enable parity */
  5242. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_par_en, enable);
  5243. + info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_par_en, enable);
  5244. + }
  5245. +
  5246. + if (cflag & CMSPAR) {
  5247. + /* enable stick parity, PARODD mean Mark which matches ETRAX */
  5248. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_stick_par, stick);
  5249. + info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_stick_par, stick);
  5250. + }
  5251. + if (cflag & PARODD) {
  5252. + /* set odd parity (or Mark if CMSPAR) */
  5253. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_par, odd);
  5254. + info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_par, odd);
  5255. + }
  5256. +
  5257. + if (cflag & CRTSCTS) {
  5258. + /* enable automatic CTS handling */
  5259. + DFLOW(DEBUG_LOG(info->line, "FLOW auto_cts enabled\n", 0));
  5260. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, auto_cts, active);
  5261. + }
  5262. +
  5263. + /* make sure the tx and rx are enabled */
  5264. +
  5265. + info->tx_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_enable, enable);
  5266. + info->rx_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_enable, enable);
  5267. +
  5268. + /* actually write the control regs to the hardware */
  5269. +
  5270. + info->ioport[REG_TR_CTRL] = info->tx_ctrl;
  5271. + info->ioport[REG_REC_CTRL] = info->rx_ctrl;
  5272. + xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(info->port.tty));
  5273. + xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable);
  5274. + if (info->port.tty->termios->c_iflag & IXON ) {
  5275. + DFLOW(DEBUG_LOG(info->line, "FLOW XOFF enabled 0x%02X\n",
  5276. + STOP_CHAR(info->port.tty)));
  5277. + xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
  5278. + }
  5279. +
  5280. + *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
  5281. + local_irq_restore(flags);
  5282. +#endif /* !CONFIG_SVINTO_SIM */
  5283. +
  5284. + update_char_time(info);
  5285. +
  5286. +} /* change_speed */
  5287. +
  5288. +/* start transmitting chars NOW */
  5289. +
  5290. +static void
  5291. +rs_flush_chars(struct tty_struct *tty)
  5292. +{
  5293. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  5294. + unsigned long flags;
  5295. +
  5296. + if (info->tr_running ||
  5297. + info->xmit.head == info->xmit.tail ||
  5298. + tty->stopped ||
  5299. + tty->hw_stopped ||
  5300. + !info->xmit.buf)
  5301. + return;
  5302. +
  5303. +#ifdef SERIAL_DEBUG_FLOW
  5304. + printk("rs_flush_chars\n");
  5305. +#endif
  5306. +
  5307. + /* this protection might not exactly be necessary here */
  5308. +
  5309. + local_irq_save(flags);
  5310. + start_transmit(info);
  5311. + local_irq_restore(flags);
  5312. +}
  5313. +
  5314. +static int rs_raw_write(struct tty_struct *tty,
  5315. + const unsigned char *buf, int count)
  5316. +{
  5317. + int c, ret = 0;
  5318. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  5319. + unsigned long flags;
  5320. +
  5321. + /* first some sanity checks */
  5322. +
  5323. + if (!tty || !info->xmit.buf || !tmp_buf)
  5324. + return 0;
  5325. +
  5326. +#ifdef SERIAL_DEBUG_DATA
  5327. + if (info->line == SERIAL_DEBUG_LINE)
  5328. + printk("rs_raw_write (%d), status %d\n",
  5329. + count, info->ioport[REG_STATUS]);
  5330. +#endif
  5331. +
  5332. +#ifdef CONFIG_SVINTO_SIM
  5333. + /* Really simple. The output is here and now. */
  5334. + SIMCOUT(buf, count);
  5335. + return count;
  5336. +#endif
  5337. + local_save_flags(flags);
  5338. + DFLOW(DEBUG_LOG(info->line, "write count %i ", count));
  5339. + DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty)));
  5340. +
  5341. +
  5342. + /* The local_irq_disable/restore_flags pairs below are needed
  5343. + * because the DMA interrupt handler moves the info->xmit values.
  5344. + * the memcpy needs to be in the critical region unfortunately,
  5345. + * because we need to read xmit values, memcpy, write xmit values
  5346. + * in one atomic operation... this could perhaps be avoided by
  5347. + * more clever design.
  5348. + */
  5349. + local_irq_disable();
  5350. + while (count) {
  5351. + c = CIRC_SPACE_TO_END(info->xmit.head,
  5352. + info->xmit.tail,
  5353. + SERIAL_XMIT_SIZE);
  5354. +
  5355. + if (count < c)
  5356. + c = count;
  5357. + if (c <= 0)
  5358. + break;
  5359. +
  5360. + memcpy(info->xmit.buf + info->xmit.head, buf, c);
  5361. + info->xmit.head = (info->xmit.head + c) &
  5362. + (SERIAL_XMIT_SIZE-1);
  5363. + buf += c;
  5364. + count -= c;
  5365. + ret += c;
  5366. + }
  5367. + local_irq_restore(flags);
  5368. +
  5369. + /* enable transmitter if not running, unless the tty is stopped
  5370. + * this does not need IRQ protection since if tr_running == 0
  5371. + * the IRQ's are not running anyway for this port.
  5372. + */
  5373. + DFLOW(DEBUG_LOG(info->line, "write ret %i\n", ret));
  5374. +
  5375. + if (info->xmit.head != info->xmit.tail &&
  5376. + !tty->stopped &&
  5377. + !tty->hw_stopped &&
  5378. + !info->tr_running) {
  5379. + start_transmit(info);
  5380. + }
  5381. +
  5382. + return ret;
  5383. +} /* raw_raw_write() */
  5384. +
  5385. +static int
  5386. +rs_write(struct tty_struct *tty,
  5387. + const unsigned char *buf, int count)
  5388. +{
  5389. +#if defined(CONFIG_ETRAX_RS485)
  5390. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  5391. +
  5392. + if (info->rs485.flags & SER_RS485_ENABLED)
  5393. + {
  5394. + /* If we are in RS-485 mode, we need to toggle RTS and disable
  5395. + * the receiver before initiating a DMA transfer
  5396. + */
  5397. +#ifdef CONFIG_ETRAX_FAST_TIMER
  5398. + /* Abort any started timer */
  5399. + fast_timers_rs485[info->line].function = NULL;
  5400. + del_fast_timer(&fast_timers_rs485[info->line]);
  5401. +#endif
  5402. + e100_rts(info, (info->rs485.flags & SER_RS485_RTS_ON_SEND));
  5403. +#if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER)
  5404. + e100_disable_rx(info);
  5405. + e100_enable_rx_irq(info);
  5406. +#endif
  5407. +
  5408. + if (info->rs485.delay_rts_before_send > 0)
  5409. + msleep(info->rs485.delay_rts_before_send);
  5410. + }
  5411. +#endif /* CONFIG_ETRAX_RS485 */
  5412. +
  5413. + count = rs_raw_write(tty, buf, count);
  5414. +
  5415. +#if defined(CONFIG_ETRAX_RS485)
  5416. + if (info->rs485.flags & SER_RS485_ENABLED)
  5417. + {
  5418. + unsigned int val;
  5419. + /* If we are in RS-485 mode the following has to be done:
  5420. + * wait until DMA is ready
  5421. + * wait on transmit shift register
  5422. + * toggle RTS
  5423. + * enable the receiver
  5424. + */
  5425. +
  5426. + /* Sleep until all sent */
  5427. + tty_wait_until_sent(tty, 0);
  5428. +#ifdef CONFIG_ETRAX_FAST_TIMER
  5429. + /* Now sleep a little more so that shift register is empty */
  5430. + schedule_usleep(info->char_time_usec * 2);
  5431. +#endif
  5432. + /* wait on transmit shift register */
  5433. + do{
  5434. + get_lsr_info(info, &val);
  5435. + }while (!(val & TIOCSER_TEMT));
  5436. +
  5437. + e100_rts(info, (info->rs485.flags & SER_RS485_RTS_AFTER_SEND));
  5438. +
  5439. +#if defined(CONFIG_ETRAX_RS485_DISABLE_RECEIVER)
  5440. + e100_enable_rx(info);
  5441. + e100_enable_rxdma_irq(info);
  5442. +#endif
  5443. + }
  5444. +#endif /* CONFIG_ETRAX_RS485 */
  5445. +
  5446. + return count;
  5447. +} /* rs_write */
  5448. +
  5449. +
  5450. +/* how much space is available in the xmit buffer? */
  5451. +
  5452. +static int
  5453. +rs_write_room(struct tty_struct *tty)
  5454. +{
  5455. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  5456. +
  5457. + return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
  5458. +}
  5459. +
  5460. +/* How many chars are in the xmit buffer?
  5461. + * This does not include any chars in the transmitter FIFO.
  5462. + * Use wait_until_sent for waiting for FIFO drain.
  5463. + */
  5464. +
  5465. +static int
  5466. +rs_chars_in_buffer(struct tty_struct *tty)
  5467. +{
  5468. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  5469. +
  5470. + return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
  5471. +}
  5472. +
  5473. +/* discard everything in the xmit buffer */
  5474. +
  5475. +static void
  5476. +rs_flush_buffer(struct tty_struct *tty)
  5477. +{
  5478. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  5479. + unsigned long flags;
  5480. +
  5481. + local_irq_save(flags);
  5482. + info->xmit.head = info->xmit.tail = 0;
  5483. + local_irq_restore(flags);
  5484. +
  5485. + tty_wakeup(tty);
  5486. +}
  5487. +
  5488. +/*
  5489. + * This function is used to send a high-priority XON/XOFF character to
  5490. + * the device
  5491. + *
  5492. + * Since we use DMA we don't check for info->x_char in transmit_chars_dma(),
  5493. + * but we do it in handle_ser_tx_interrupt().
  5494. + * We disable DMA channel and enable tx ready interrupt and write the
  5495. + * character when possible.
  5496. + */
  5497. +static void rs_send_xchar(struct tty_struct *tty, char ch)
  5498. +{
  5499. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  5500. + unsigned long flags;
  5501. + local_irq_save(flags);
  5502. + if (info->uses_dma_out) {
  5503. + /* Put the DMA on hold and disable the channel */
  5504. + *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, hold);
  5505. + while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *info->ocmdadr) !=
  5506. + IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, hold));
  5507. + e100_disable_txdma_channel(info);
  5508. + }
  5509. +
  5510. + /* Must make sure transmitter is not stopped before we can transmit */
  5511. + if (tty->stopped)
  5512. + rs_start(tty);
  5513. +
  5514. + /* Enable manual transmit interrupt and send from there */
  5515. + DFLOW(DEBUG_LOG(info->line, "rs_send_xchar 0x%02X\n", ch));
  5516. + info->x_char = ch;
  5517. + e100_enable_serial_tx_ready_irq(info);
  5518. + local_irq_restore(flags);
  5519. +}
  5520. +
  5521. +/*
  5522. + * ------------------------------------------------------------
  5523. + * rs_throttle()
  5524. + *
  5525. + * This routine is called by the upper-layer tty layer to signal that
  5526. + * incoming characters should be throttled.
  5527. + * ------------------------------------------------------------
  5528. + */
  5529. +static void
  5530. +rs_throttle(struct tty_struct * tty)
  5531. +{
  5532. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  5533. +#ifdef SERIAL_DEBUG_THROTTLE
  5534. + char buf[64];
  5535. +
  5536. + printk("throttle %s: %lu....\n", tty_name(tty, buf),
  5537. + (unsigned long)tty->ldisc.chars_in_buffer(tty));
  5538. +#endif
  5539. + DFLOW(DEBUG_LOG(info->line,"rs_throttle %lu\n", tty->ldisc.chars_in_buffer(tty)));
  5540. +
  5541. + /* Do RTS before XOFF since XOFF might take some time */
  5542. + if (tty->termios->c_cflag & CRTSCTS) {
  5543. + /* Turn off RTS line */
  5544. + e100_rts(info, 0);
  5545. + }
  5546. + if (I_IXOFF(tty))
  5547. + rs_send_xchar(tty, STOP_CHAR(tty));
  5548. +
  5549. +}
  5550. +
  5551. +static void
  5552. +rs_unthrottle(struct tty_struct * tty)
  5553. +{
  5554. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  5555. +#ifdef SERIAL_DEBUG_THROTTLE
  5556. + char buf[64];
  5557. +
  5558. + printk("unthrottle %s: %lu....\n", tty_name(tty, buf),
  5559. + (unsigned long)tty->ldisc.chars_in_buffer(tty));
  5560. +#endif
  5561. + DFLOW(DEBUG_LOG(info->line,"rs_unthrottle ldisc %d\n", tty->ldisc.chars_in_buffer(tty)));
  5562. + DFLOW(DEBUG_LOG(info->line,"rs_unthrottle flip.count: %i\n", tty->flip.count));
  5563. + /* Do RTS before XOFF since XOFF might take some time */
  5564. + if (tty->termios->c_cflag & CRTSCTS) {
  5565. + /* Assert RTS line */
  5566. + e100_rts(info, 1);
  5567. + }
  5568. +
  5569. + if (I_IXOFF(tty)) {
  5570. + if (info->x_char)
  5571. + info->x_char = 0;
  5572. + else
  5573. + rs_send_xchar(tty, START_CHAR(tty));
  5574. + }
  5575. +
  5576. +}
  5577. +
  5578. +/*
  5579. + * ------------------------------------------------------------
  5580. + * rs_ioctl() and friends
  5581. + * ------------------------------------------------------------
  5582. + */
  5583. +
  5584. +static int
  5585. +get_serial_info(struct e100_serial * info,
  5586. + struct serial_struct * retinfo)
  5587. +{
  5588. + struct serial_struct tmp;
  5589. +
  5590. + /* this is all probably wrong, there are a lot of fields
  5591. + * here that we don't have in e100_serial and maybe we
  5592. + * should set them to something else than 0.
  5593. + */
  5594. +
  5595. + if (!retinfo)
  5596. + return -EFAULT;
  5597. + memset(&tmp, 0, sizeof(tmp));
  5598. + tmp.type = info->type;
  5599. + tmp.line = info->line;
  5600. + tmp.port = (int)info->ioport;
  5601. + tmp.irq = info->irq;
  5602. + tmp.flags = info->flags;
  5603. + tmp.baud_base = info->baud_base;
  5604. + tmp.close_delay = info->close_delay;
  5605. + tmp.closing_wait = info->closing_wait;
  5606. + tmp.custom_divisor = info->custom_divisor;
  5607. + if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
  5608. + return -EFAULT;
  5609. + return 0;
  5610. +}
  5611. +
  5612. +static int
  5613. +set_serial_info(struct e100_serial *info,
  5614. + struct serial_struct *new_info)
  5615. +{
  5616. + struct serial_struct new_serial;
  5617. + struct e100_serial old_info;
  5618. + int retval = 0;
  5619. +
  5620. + if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
  5621. + return -EFAULT;
  5622. +
  5623. + old_info = *info;
  5624. +
  5625. + if (!capable(CAP_SYS_ADMIN)) {
  5626. + if ((new_serial.type != info->type) ||
  5627. + (new_serial.close_delay != info->close_delay) ||
  5628. + ((new_serial.flags & ~ASYNC_USR_MASK) !=
  5629. + (info->flags & ~ASYNC_USR_MASK)))
  5630. + return -EPERM;
  5631. + info->flags = ((info->flags & ~ASYNC_USR_MASK) |
  5632. + (new_serial.flags & ASYNC_USR_MASK));
  5633. + goto check_and_exit;
  5634. + }
  5635. +
  5636. + if (info->count > 1)
  5637. + return -EBUSY;
  5638. +
  5639. + /*
  5640. + * OK, past this point, all the error checking has been done.
  5641. + * At this point, we start making changes.....
  5642. + */
  5643. +
  5644. + info->baud_base = new_serial.baud_base;
  5645. + info->flags = ((info->flags & ~ASYNC_FLAGS) |
  5646. + (new_serial.flags & ASYNC_FLAGS));
  5647. + info->custom_divisor = new_serial.custom_divisor;
  5648. + info->type = new_serial.type;
  5649. + info->close_delay = new_serial.close_delay;
  5650. + info->closing_wait = new_serial.closing_wait;
  5651. + info->port.tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
  5652. +
  5653. + check_and_exit:
  5654. + if (info->flags & ASYNC_INITIALIZED) {
  5655. + change_speed(info);
  5656. + } else
  5657. + retval = startup(info);
  5658. + return retval;
  5659. +}
  5660. +
  5661. +/*
  5662. + * get_lsr_info - get line status register info
  5663. + *
  5664. + * Purpose: Let user call ioctl() to get info when the UART physically
  5665. + * is emptied. On bus types like RS485, the transmitter must
  5666. + * release the bus after transmitting. This must be done when
  5667. + * the transmit shift register is empty, not be done when the
  5668. + * transmit holding register is empty. This functionality
  5669. + * allows an RS485 driver to be written in user space.
  5670. + */
  5671. +static int
  5672. +get_lsr_info(struct e100_serial * info, unsigned int *value)
  5673. +{
  5674. + unsigned int result = TIOCSER_TEMT;
  5675. +#ifndef CONFIG_SVINTO_SIM
  5676. + unsigned long curr_time = jiffies;
  5677. + unsigned long curr_time_usec = GET_JIFFIES_USEC();
  5678. + unsigned long elapsed_usec =
  5679. + (curr_time - info->last_tx_active) * 1000000/HZ +
  5680. + curr_time_usec - info->last_tx_active_usec;
  5681. +
  5682. + if (info->xmit.head != info->xmit.tail ||
  5683. + elapsed_usec < 2*info->char_time_usec) {
  5684. + result = 0;
  5685. + }
  5686. +#endif
  5687. +
  5688. + if (copy_to_user(value, &result, sizeof(int)))
  5689. + return -EFAULT;
  5690. + return 0;
  5691. +}
  5692. +
  5693. +#ifdef SERIAL_DEBUG_IO
  5694. +struct state_str
  5695. +{
  5696. + int state;
  5697. + const char *str;
  5698. +};
  5699. +
  5700. +const struct state_str control_state_str[] = {
  5701. + {TIOCM_DTR, "DTR" },
  5702. + {TIOCM_RTS, "RTS"},
  5703. + {TIOCM_ST, "ST?" },
  5704. + {TIOCM_SR, "SR?" },
  5705. + {TIOCM_CTS, "CTS" },
  5706. + {TIOCM_CD, "CD" },
  5707. + {TIOCM_RI, "RI" },
  5708. + {TIOCM_DSR, "DSR" },
  5709. + {0, NULL }
  5710. +};
  5711. +
  5712. +char *get_control_state_str(int MLines, char *s)
  5713. +{
  5714. + int i = 0;
  5715. +
  5716. + s[0]='\0';
  5717. + while (control_state_str[i].str != NULL) {
  5718. + if (MLines & control_state_str[i].state) {
  5719. + if (s[0] != '\0') {
  5720. + strcat(s, ", ");
  5721. + }
  5722. + strcat(s, control_state_str[i].str);
  5723. + }
  5724. + i++;
  5725. + }
  5726. + return s;
  5727. +}
  5728. +#endif
  5729. +
  5730. +static int
  5731. +rs_break(struct tty_struct *tty, int break_state)
  5732. +{
  5733. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  5734. + unsigned long flags;
  5735. +
  5736. + if (!info->ioport)
  5737. + return -EIO;
  5738. +
  5739. + local_irq_save(flags);
  5740. + if (break_state == -1) {
  5741. + /* Go to manual mode and set the txd pin to 0 */
  5742. + /* Clear bit 7 (txd) and 6 (tr_enable) */
  5743. + info->tx_ctrl &= 0x3F;
  5744. + } else {
  5745. + /* Set bit 7 (txd) and 6 (tr_enable) */
  5746. + info->tx_ctrl |= (0x80 | 0x40);
  5747. + }
  5748. + info->ioport[REG_TR_CTRL] = info->tx_ctrl;
  5749. + local_irq_restore(flags);
  5750. + return 0;
  5751. +}
  5752. +
  5753. +static int
  5754. +rs_tiocmset(struct tty_struct *tty, struct file *file,
  5755. + unsigned int set, unsigned int clear)
  5756. +{
  5757. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  5758. + unsigned long flags;
  5759. +
  5760. + local_irq_save(flags);
  5761. +
  5762. + if (clear & TIOCM_RTS)
  5763. + e100_rts(info, 0);
  5764. + if (clear & TIOCM_DTR)
  5765. + e100_dtr(info, 0);
  5766. + /* Handle FEMALE behaviour */
  5767. + if (clear & TIOCM_RI)
  5768. + e100_ri_out(info, 0);
  5769. + if (clear & TIOCM_CD)
  5770. + e100_cd_out(info, 0);
  5771. +
  5772. + if (set & TIOCM_RTS)
  5773. + e100_rts(info, 1);
  5774. + if (set & TIOCM_DTR)
  5775. + e100_dtr(info, 1);
  5776. + /* Handle FEMALE behaviour */
  5777. + if (set & TIOCM_RI)
  5778. + e100_ri_out(info, 1);
  5779. + if (set & TIOCM_CD)
  5780. + e100_cd_out(info, 1);
  5781. +
  5782. + local_irq_restore(flags);
  5783. + return 0;
  5784. +}
  5785. +
  5786. +static int
  5787. +rs_tiocmget(struct tty_struct *tty, struct file *file)
  5788. +{
  5789. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  5790. + unsigned int result;
  5791. + unsigned long flags;
  5792. +
  5793. + local_irq_save(flags);
  5794. +
  5795. + result =
  5796. + (!E100_RTS_GET(info) ? TIOCM_RTS : 0)
  5797. + | (!E100_DTR_GET(info) ? TIOCM_DTR : 0)
  5798. + | (!E100_RI_GET(info) ? TIOCM_RNG : 0)
  5799. + | (!E100_DSR_GET(info) ? TIOCM_DSR : 0)
  5800. + | (!E100_CD_GET(info) ? TIOCM_CAR : 0)
  5801. + | (!E100_CTS_GET(info) ? TIOCM_CTS : 0);
  5802. +
  5803. + local_irq_restore(flags);
  5804. +
  5805. +#ifdef SERIAL_DEBUG_IO
  5806. + printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n",
  5807. + info->line, result, result);
  5808. + {
  5809. + char s[100];
  5810. +
  5811. + get_control_state_str(result, s);
  5812. + printk(KERN_DEBUG "state: %s\n", s);
  5813. + }
  5814. +#endif
  5815. + return result;
  5816. +
  5817. +}
  5818. +
  5819. +
  5820. +static int
  5821. +rs_ioctl(struct tty_struct *tty, struct file * file,
  5822. + unsigned int cmd, unsigned long arg)
  5823. +{
  5824. + struct e100_serial * info = (struct e100_serial *)tty->driver_data;
  5825. +
  5826. + if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
  5827. + (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD) &&
  5828. + (cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT)) {
  5829. + if (tty->flags & (1 << TTY_IO_ERROR))
  5830. + return -EIO;
  5831. + }
  5832. +
  5833. + switch (cmd) {
  5834. + case TIOCGSERIAL:
  5835. + return get_serial_info(info,
  5836. + (struct serial_struct *) arg);
  5837. + case TIOCSSERIAL:
  5838. + return set_serial_info(info,
  5839. + (struct serial_struct *) arg);
  5840. + case TIOCSERGETLSR: /* Get line status register */
  5841. + return get_lsr_info(info, (unsigned int *) arg);
  5842. +
  5843. + case TIOCSERGSTRUCT:
  5844. + if (copy_to_user((struct e100_serial *) arg,
  5845. + info, sizeof(struct e100_serial)))
  5846. + return -EFAULT;
  5847. + return 0;
  5848. +
  5849. +#if defined(CONFIG_ETRAX_RS485)
  5850. + case TIOCSERSETRS485:
  5851. + {
  5852. + /* In this ioctl we still use the old structure
  5853. + * rs485_control for backward compatibility
  5854. + * (if we use serial_rs485, then old user-level code
  5855. + * wouldn't work anymore...).
  5856. + * The use of this ioctl is deprecated: use TIOCSRS485
  5857. + * instead.*/
  5858. + struct rs485_control rs485ctrl;
  5859. + struct serial_rs485 rs485data;
  5860. + printk(KERN_DEBUG "The use of this ioctl is deprecated. Use TIOCSRS485 instead\n");
  5861. + if (copy_from_user(&rs485ctrl, (struct rs485_control *)arg,
  5862. + sizeof(rs485ctrl)))
  5863. + return -EFAULT;
  5864. +
  5865. + rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send;
  5866. + rs485data.flags = 0;
  5867. + if (rs485ctrl.enabled)
  5868. + rs485data.flags |= SER_RS485_ENABLED;
  5869. + else
  5870. + rs485data.flags &= ~(SER_RS485_ENABLED);
  5871. +
  5872. + if (rs485ctrl.rts_on_send)
  5873. + rs485data.flags |= SER_RS485_RTS_ON_SEND;
  5874. + else
  5875. + rs485data.flags &= ~(SER_RS485_RTS_ON_SEND);
  5876. +
  5877. + if (rs485ctrl.rts_after_sent)
  5878. + rs485data.flags |= SER_RS485_RTS_AFTER_SEND;
  5879. + else
  5880. + rs485data.flags &= ~(SER_RS485_RTS_AFTER_SEND);
  5881. +
  5882. + return e100_enable_rs485(tty, &rs485data);
  5883. + }
  5884. +
  5885. + case TIOCSRS485:
  5886. + {
  5887. + /* This is the new version of TIOCSRS485, with new
  5888. + * data structure serial_rs485 */
  5889. + struct serial_rs485 rs485data;
  5890. + if (copy_from_user(&rs485data, (struct rs485_control *)arg,
  5891. + sizeof(rs485data)))
  5892. + return -EFAULT;
  5893. +
  5894. + return e100_enable_rs485(tty, &rs485data);
  5895. + }
  5896. +
  5897. +
  5898. + case TIOCSERWRRS485:
  5899. + {
  5900. + struct rs485_write rs485wr;
  5901. + if (copy_from_user(&rs485wr, (struct rs485_write *)arg,
  5902. + sizeof(rs485wr)))
  5903. + return -EFAULT;
  5904. +
  5905. + return e100_write_rs485(tty, rs485wr.outc, rs485wr.outc_size);
  5906. + }
  5907. +#endif
  5908. +
  5909. + default:
  5910. + return -ENOIOCTLCMD;
  5911. + }
  5912. + return 0;
  5913. +}
  5914. +
  5915. +static void
  5916. +rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
  5917. +{
  5918. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  5919. +
  5920. + change_speed(info);
  5921. +
  5922. + /* Handle turning off CRTSCTS */
  5923. + if ((old_termios->c_cflag & CRTSCTS) &&
  5924. + !(tty->termios->c_cflag & CRTSCTS)) {
  5925. + tty->hw_stopped = 0;
  5926. + rs_start(tty);
  5927. + }
  5928. +
  5929. +}
  5930. +
  5931. +/*
  5932. + * ------------------------------------------------------------
  5933. + * rs_close()
  5934. + *
  5935. + * This routine is called when the serial port gets closed. First, we
  5936. + * wait for the last remaining data to be sent. Then, we unlink its
  5937. + * S structure from the interrupt chain if necessary, and we free
  5938. + * that IRQ if nothing is left in the chain.
  5939. + * ------------------------------------------------------------
  5940. + */
  5941. +static void
  5942. +rs_close(struct tty_struct *tty, struct file * filp)
  5943. +{
  5944. + struct e100_serial * info = (struct e100_serial *)tty->driver_data;
  5945. + unsigned long flags;
  5946. +
  5947. + if (!info)
  5948. + return;
  5949. +
  5950. + /* interrupts are disabled for this entire function */
  5951. +
  5952. + local_irq_save(flags);
  5953. +
  5954. + if (tty_hung_up_p(filp)) {
  5955. + local_irq_restore(flags);
  5956. + return;
  5957. + }
  5958. +
  5959. +#ifdef SERIAL_DEBUG_OPEN
  5960. + printk("[%d] rs_close ttyS%d, count = %d\n", current->pid,
  5961. + info->line, info->count);
  5962. +#endif
  5963. + if ((tty->count == 1) && (info->count != 1)) {
  5964. + /*
  5965. + * Uh, oh. tty->count is 1, which means that the tty
  5966. + * structure will be freed. Info->count should always
  5967. + * be one in these conditions. If it's greater than
  5968. + * one, we've got real problems, since it means the
  5969. + * serial port won't be shutdown.
  5970. + */
  5971. + printk(KERN_CRIT
  5972. + "rs_close: bad serial port count; tty->count is 1, "
  5973. + "info->count is %d\n", info->count);
  5974. + info->count = 1;
  5975. + }
  5976. + if (--info->count < 0) {
  5977. + printk(KERN_CRIT "rs_close: bad serial port count for ttyS%d: %d\n",
  5978. + info->line, info->count);
  5979. + info->count = 0;
  5980. + }
  5981. + if (info->count) {
  5982. + local_irq_restore(flags);
  5983. + return;
  5984. + }
  5985. + info->flags |= ASYNC_CLOSING;
  5986. + /*
  5987. + * Save the termios structure, since this port may have
  5988. + * separate termios for callout and dialin.
  5989. + */
  5990. + if (info->flags & ASYNC_NORMAL_ACTIVE)
  5991. + info->normal_termios = *tty->termios;
  5992. + /*
  5993. + * Now we wait for the transmit buffer to clear; and we notify
  5994. + * the line discipline to only process XON/XOFF characters.
  5995. + */
  5996. + tty->closing = 1;
  5997. + if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE)
  5998. + tty_wait_until_sent(tty, info->closing_wait);
  5999. + /*
  6000. + * At this point we stop accepting input. To do this, we
  6001. + * disable the serial receiver and the DMA receive interrupt.
  6002. + */
  6003. +#ifdef SERIAL_HANDLE_EARLY_ERRORS
  6004. + e100_disable_serial_data_irq(info);
  6005. +#endif
  6006. +
  6007. +#ifndef CONFIG_SVINTO_SIM
  6008. + e100_disable_rx(info);
  6009. + e100_disable_rx_irq(info);
  6010. +
  6011. + if (info->flags & ASYNC_INITIALIZED) {
  6012. + /*
  6013. + * Before we drop DTR, make sure the UART transmitter
  6014. + * has completely drained; this is especially
  6015. + * important as we have a transmit FIFO!
  6016. + */
  6017. + rs_wait_until_sent(tty, HZ);
  6018. + }
  6019. +#endif
  6020. +
  6021. + shutdown(info);
  6022. + rs_flush_buffer(tty);
  6023. + tty_ldisc_flush(tty);
  6024. + tty->closing = 0;
  6025. + info->event = 0;
  6026. + info->port.tty = NULL;
  6027. + if (info->blocked_open) {
  6028. + if (info->close_delay)
  6029. + schedule_timeout_interruptible(info->close_delay);
  6030. + wake_up_interruptible(&info->open_wait);
  6031. + }
  6032. + info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
  6033. + wake_up_interruptible(&info->close_wait);
  6034. + local_irq_restore(flags);
  6035. +
  6036. + /* port closed */
  6037. +
  6038. +#if defined(CONFIG_ETRAX_RS485)
  6039. + if (info->rs485.flags & SER_RS485_ENABLED) {
  6040. + info->rs485.flags &= ~(SER_RS485_ENABLED);
  6041. +#if defined(CONFIG_ETRAX_RS485_ON_PA)
  6042. + *R_PORT_PA_DATA = port_pa_data_shadow &= ~(1 << rs485_pa_bit);
  6043. +#endif
  6044. +#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
  6045. + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
  6046. + rs485_port_g_bit, 0);
  6047. +#endif
  6048. +#if defined(CONFIG_ETRAX_RS485_LTC1387)
  6049. + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
  6050. + CONFIG_ETRAX_RS485_LTC1387_DXEN_PORT_G_BIT, 0);
  6051. + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
  6052. + CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 0);
  6053. +#endif
  6054. + }
  6055. +#endif
  6056. +
  6057. + /*
  6058. + * Release any allocated DMA irq's.
  6059. + */
  6060. + if (info->dma_in_enabled) {
  6061. + free_irq(info->dma_in_irq_nbr, info);
  6062. + cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
  6063. + info->uses_dma_in = 0;
  6064. +#ifdef SERIAL_DEBUG_OPEN
  6065. + printk(KERN_DEBUG "DMA irq '%s' freed\n",
  6066. + info->dma_in_irq_description);
  6067. +#endif
  6068. + }
  6069. + if (info->dma_out_enabled) {
  6070. + free_irq(info->dma_out_irq_nbr, info);
  6071. + cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
  6072. + info->uses_dma_out = 0;
  6073. +#ifdef SERIAL_DEBUG_OPEN
  6074. + printk(KERN_DEBUG "DMA irq '%s' freed\n",
  6075. + info->dma_out_irq_description);
  6076. +#endif
  6077. + }
  6078. +}
  6079. +
  6080. +/*
  6081. + * rs_wait_until_sent() --- wait until the transmitter is empty
  6082. + */
  6083. +static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
  6084. +{
  6085. + unsigned long orig_jiffies;
  6086. + struct e100_serial *info = (struct e100_serial *)tty->driver_data;
  6087. + unsigned long curr_time = jiffies;
  6088. + unsigned long curr_time_usec = GET_JIFFIES_USEC();
  6089. + long elapsed_usec =
  6090. + (curr_time - info->last_tx_active) * (1000000/HZ) +
  6091. + curr_time_usec - info->last_tx_active_usec;
  6092. +
  6093. + /*
  6094. + * Check R_DMA_CHx_STATUS bit 0-6=number of available bytes in FIFO
  6095. + * R_DMA_CHx_HWSW bit 31-16=nbr of bytes left in DMA buffer (0=64k)
  6096. + */
  6097. + lock_kernel();
  6098. + orig_jiffies = jiffies;
  6099. + while (info->xmit.head != info->xmit.tail || /* More in send queue */
  6100. + (*info->ostatusadr & 0x007f) || /* more in FIFO */
  6101. + (elapsed_usec < 2*info->char_time_usec)) {
  6102. + schedule_timeout_interruptible(1);
  6103. + if (signal_pending(current))
  6104. + break;
  6105. + if (timeout && time_after(jiffies, orig_jiffies + timeout))
  6106. + break;
  6107. + curr_time = jiffies;
  6108. + curr_time_usec = GET_JIFFIES_USEC();
  6109. + elapsed_usec =
  6110. + (curr_time - info->last_tx_active) * (1000000/HZ) +
  6111. + curr_time_usec - info->last_tx_active_usec;
  6112. + }
  6113. + set_current_state(TASK_RUNNING);
  6114. + unlock_kernel();
  6115. +}
  6116. +
  6117. +/*
  6118. + * rs_hangup() --- called by tty_hangup() when a hangup is signaled.
  6119. + */
  6120. +void
  6121. +rs_hangup(struct tty_struct *tty)
  6122. +{
  6123. + struct e100_serial * info = (struct e100_serial *)tty->driver_data;
  6124. +
  6125. + rs_flush_buffer(tty);
  6126. + shutdown(info);
  6127. + info->event = 0;
  6128. + info->count = 0;
  6129. + info->flags &= ~ASYNC_NORMAL_ACTIVE;
  6130. + info->port.tty = NULL;
  6131. + wake_up_interruptible(&info->open_wait);
  6132. +}
  6133. +
  6134. +/*
  6135. + * ------------------------------------------------------------
  6136. + * rs_open() and friends
  6137. + * ------------------------------------------------------------
  6138. + */
  6139. +static int
  6140. +block_til_ready(struct tty_struct *tty, struct file * filp,
  6141. + struct e100_serial *info)
  6142. +{
  6143. + DECLARE_WAITQUEUE(wait, current);
  6144. + unsigned long flags;
  6145. + int retval;
  6146. + int do_clocal = 0, extra_count = 0;
  6147. +
  6148. + /*
  6149. + * If the device is in the middle of being closed, then block
  6150. + * until it's done, and then try again.
  6151. + */
  6152. + if (tty_hung_up_p(filp) ||
  6153. + (info->flags & ASYNC_CLOSING)) {
  6154. + wait_event_interruptible(info->close_wait,
  6155. + !(info->flags & ASYNC_CLOSING));
  6156. +#ifdef SERIAL_DO_RESTART
  6157. + if (info->flags & ASYNC_HUP_NOTIFY)
  6158. + return -EAGAIN;
  6159. + else
  6160. + return -ERESTARTSYS;
  6161. +#else
  6162. + return -EAGAIN;
  6163. +#endif
  6164. + }
  6165. +
  6166. + /*
  6167. + * If non-blocking mode is set, or the port is not enabled,
  6168. + * then make the check up front and then exit.
  6169. + */
  6170. + if ((filp->f_flags & O_NONBLOCK) ||
  6171. + (tty->flags & (1 << TTY_IO_ERROR))) {
  6172. + info->flags |= ASYNC_NORMAL_ACTIVE;
  6173. + return 0;
  6174. + }
  6175. +
  6176. + if (tty->termios->c_cflag & CLOCAL) {
  6177. + do_clocal = 1;
  6178. + }
  6179. +
  6180. + /*
  6181. + * Block waiting for the carrier detect and the line to become
  6182. + * free (i.e., not in use by the callout). While we are in
  6183. + * this loop, info->count is dropped by one, so that
  6184. + * rs_close() knows when to free things. We restore it upon
  6185. + * exit, either normal or abnormal.
  6186. + */
  6187. + retval = 0;
  6188. + add_wait_queue(&info->open_wait, &wait);
  6189. +#ifdef SERIAL_DEBUG_OPEN
  6190. + printk("block_til_ready before block: ttyS%d, count = %d\n",
  6191. + info->line, info->count);
  6192. +#endif
  6193. + local_irq_save(flags);
  6194. + if (!tty_hung_up_p(filp)) {
  6195. + extra_count++;
  6196. + info->count--;
  6197. + }
  6198. + local_irq_restore(flags);
  6199. + info->blocked_open++;
  6200. + while (1) {
  6201. + local_irq_save(flags);
  6202. + /* assert RTS and DTR */
  6203. + e100_rts(info, 1);
  6204. + e100_dtr(info, 1);
  6205. + local_irq_restore(flags);
  6206. + set_current_state(TASK_INTERRUPTIBLE);
  6207. + if (tty_hung_up_p(filp) ||
  6208. + !(info->flags & ASYNC_INITIALIZED)) {
  6209. +#ifdef SERIAL_DO_RESTART
  6210. + if (info->flags & ASYNC_HUP_NOTIFY)
  6211. + retval = -EAGAIN;
  6212. + else
  6213. + retval = -ERESTARTSYS;
  6214. +#else
  6215. + retval = -EAGAIN;
  6216. +#endif
  6217. + break;
  6218. + }
  6219. + if (!(info->flags & ASYNC_CLOSING) && do_clocal)
  6220. + /* && (do_clocal || DCD_IS_ASSERTED) */
  6221. + break;
  6222. + if (signal_pending(current)) {
  6223. + retval = -ERESTARTSYS;
  6224. + break;
  6225. + }
  6226. +#ifdef SERIAL_DEBUG_OPEN
  6227. + printk("block_til_ready blocking: ttyS%d, count = %d\n",
  6228. + info->line, info->count);
  6229. +#endif
  6230. + schedule();
  6231. + }
  6232. + set_current_state(TASK_RUNNING);
  6233. + remove_wait_queue(&info->open_wait, &wait);
  6234. + if (extra_count)
  6235. + info->count++;
  6236. + info->blocked_open--;
  6237. +#ifdef SERIAL_DEBUG_OPEN
  6238. + printk("block_til_ready after blocking: ttyS%d, count = %d\n",
  6239. + info->line, info->count);
  6240. +#endif
  6241. + if (retval)
  6242. + return retval;
  6243. + info->flags |= ASYNC_NORMAL_ACTIVE;
  6244. + return 0;
  6245. +}
  6246. +
  6247. +static void
  6248. +deinit_port(struct e100_serial *info)
  6249. +{
  6250. + if (info->dma_out_enabled) {
  6251. + cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
  6252. + free_irq(info->dma_out_irq_nbr, info);
  6253. + }
  6254. + if (info->dma_in_enabled) {
  6255. + cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
  6256. + free_irq(info->dma_in_irq_nbr, info);
  6257. + }
  6258. +}
  6259. +
  6260. +/*
  6261. + * This routine is called whenever a serial port is opened.
  6262. + * It performs the serial-specific initialization for the tty structure.
  6263. + */
  6264. +static int
  6265. +rs_open(struct tty_struct *tty, struct file * filp)
  6266. +{
  6267. + struct e100_serial *info;
  6268. + int retval, line;
  6269. + unsigned long page;
  6270. + int allocated_resources = 0;
  6271. +
  6272. + /* find which port we want to open */
  6273. + line = tty->index;
  6274. +
  6275. + if (line < 0 || line >= NR_PORTS)
  6276. + return -ENODEV;
  6277. +
  6278. + /* find the corresponding e100_serial struct in the table */
  6279. + info = rs_table + line;
  6280. +
  6281. + /* don't allow the opening of ports that are not enabled in the HW config */
  6282. + if (!info->enabled)
  6283. + return -ENODEV;
  6284. +
  6285. +#ifdef SERIAL_DEBUG_OPEN
  6286. + printk("[%d] rs_open %s, count = %d\n", current->pid, tty->name,
  6287. + info->count);
  6288. +#endif
  6289. +
  6290. + info->count++;
  6291. + tty->driver_data = info;
  6292. + info->port.tty = tty;
  6293. +
  6294. + info->port.tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
  6295. +
  6296. + if (!tmp_buf) {
  6297. + page = get_zeroed_page(GFP_KERNEL);
  6298. + if (!page) {
  6299. + return -ENOMEM;
  6300. + }
  6301. + if (tmp_buf)
  6302. + free_page(page);
  6303. + else
  6304. + tmp_buf = (unsigned char *) page;
  6305. + }
  6306. +
  6307. + /*
  6308. + * If the port is in the middle of closing, bail out now
  6309. + */
  6310. + if (tty_hung_up_p(filp) ||
  6311. + (info->flags & ASYNC_CLOSING)) {
  6312. + wait_event_interruptible(info->close_wait,
  6313. + !(info->flags & ASYNC_CLOSING));
  6314. +#ifdef SERIAL_DO_RESTART
  6315. + return ((info->flags & ASYNC_HUP_NOTIFY) ?
  6316. + -EAGAIN : -ERESTARTSYS);
  6317. +#else
  6318. + return -EAGAIN;
  6319. +#endif
  6320. + }
  6321. +
  6322. + /*
  6323. + * If DMA is enabled try to allocate the irq's.
  6324. + */
  6325. + if (info->count == 1) {
  6326. + allocated_resources = 1;
  6327. + if (info->dma_in_enabled) {
  6328. + if (request_irq(info->dma_in_irq_nbr,
  6329. + rec_interrupt,
  6330. + info->dma_in_irq_flags,
  6331. + info->dma_in_irq_description,
  6332. + info)) {
  6333. + printk(KERN_WARNING "DMA irq '%s' busy; "
  6334. + "falling back to non-DMA mode\n",
  6335. + info->dma_in_irq_description);
  6336. + /* Make sure we never try to use DMA in */
  6337. + /* for the port again. */
  6338. + info->dma_in_enabled = 0;
  6339. + } else if (cris_request_dma(info->dma_in_nbr,
  6340. + info->dma_in_irq_description,
  6341. + DMA_VERBOSE_ON_ERROR,
  6342. + info->dma_owner)) {
  6343. + free_irq(info->dma_in_irq_nbr, info);
  6344. + printk(KERN_WARNING "DMA '%s' busy; "
  6345. + "falling back to non-DMA mode\n",
  6346. + info->dma_in_irq_description);
  6347. + /* Make sure we never try to use DMA in */
  6348. + /* for the port again. */
  6349. + info->dma_in_enabled = 0;
  6350. + }
  6351. +#ifdef SERIAL_DEBUG_OPEN
  6352. + else
  6353. + printk(KERN_DEBUG "DMA irq '%s' allocated\n",
  6354. + info->dma_in_irq_description);
  6355. +#endif
  6356. + }
  6357. + if (info->dma_out_enabled) {
  6358. + if (request_irq(info->dma_out_irq_nbr,
  6359. + tr_interrupt,
  6360. + info->dma_out_irq_flags,
  6361. + info->dma_out_irq_description,
  6362. + info)) {
  6363. + printk(KERN_WARNING "DMA irq '%s' busy; "
  6364. + "falling back to non-DMA mode\n",
  6365. + info->dma_out_irq_description);
  6366. + /* Make sure we never try to use DMA out */
  6367. + /* for the port again. */
  6368. + info->dma_out_enabled = 0;
  6369. + } else if (cris_request_dma(info->dma_out_nbr,
  6370. + info->dma_out_irq_description,
  6371. + DMA_VERBOSE_ON_ERROR,
  6372. + info->dma_owner)) {
  6373. + free_irq(info->dma_out_irq_nbr, info);
  6374. + printk(KERN_WARNING "DMA '%s' busy; "
  6375. + "falling back to non-DMA mode\n",
  6376. + info->dma_out_irq_description);
  6377. + /* Make sure we never try to use DMA out */
  6378. + /* for the port again. */
  6379. + info->dma_out_enabled = 0;
  6380. + }
  6381. +#ifdef SERIAL_DEBUG_OPEN
  6382. + else
  6383. + printk(KERN_DEBUG "DMA irq '%s' allocated\n",
  6384. + info->dma_out_irq_description);
  6385. +#endif
  6386. + }
  6387. + }
  6388. +
  6389. + /*
  6390. + * Start up the serial port
  6391. + */
  6392. +
  6393. + retval = startup(info);
  6394. + if (retval) {
  6395. + if (allocated_resources)
  6396. + deinit_port(info);
  6397. +
  6398. + /* FIXME Decrease count info->count here too? */
  6399. + return retval;
  6400. + }
  6401. +
  6402. +
  6403. + retval = block_til_ready(tty, filp, info);
  6404. + if (retval) {
  6405. +#ifdef SERIAL_DEBUG_OPEN
  6406. + printk("rs_open returning after block_til_ready with %d\n",
  6407. + retval);
  6408. +#endif
  6409. + if (allocated_resources)
  6410. + deinit_port(info);
  6411. +
  6412. + return retval;
  6413. + }
  6414. +
  6415. + if ((info->count == 1) && (info->flags & ASYNC_SPLIT_TERMIOS)) {
  6416. + *tty->termios = info->normal_termios;
  6417. + change_speed(info);
  6418. + }
  6419. +
  6420. +#ifdef SERIAL_DEBUG_OPEN
  6421. + printk("rs_open ttyS%d successful...\n", info->line);
  6422. +#endif
  6423. + DLOG_INT_TRIG( log_int_pos = 0);
  6424. +
  6425. + DFLIP( if (info->line == SERIAL_DEBUG_LINE) {
  6426. + info->icount.rx = 0;
  6427. + } );
  6428. +
  6429. + return 0;
  6430. +}
  6431. +
  6432. +#ifdef CONFIG_PROC_FS
  6433. +/*
  6434. + * /proc fs routines....
  6435. + */
  6436. +
  6437. +static void seq_line_info(struct seq_file *m, struct e100_serial *info)
  6438. +{
  6439. + unsigned long tmp;
  6440. +
  6441. + seq_printf(m, "%d: uart:E100 port:%lX irq:%d",
  6442. + info->line, (unsigned long)info->ioport, info->irq);
  6443. +
  6444. + if (!info->ioport || (info->type == PORT_UNKNOWN)) {
  6445. + seq_printf(m, "\n");
  6446. + return;
  6447. + }
  6448. +
  6449. + seq_printf(m, " baud:%d", info->baud);
  6450. + seq_printf(m, " tx:%lu rx:%lu",
  6451. + (unsigned long)info->icount.tx,
  6452. + (unsigned long)info->icount.rx);
  6453. + tmp = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
  6454. + if (tmp)
  6455. + seq_printf(m, " tx_pend:%lu/%lu",
  6456. + (unsigned long)tmp,
  6457. + (unsigned long)SERIAL_XMIT_SIZE);
  6458. +
  6459. + seq_printf(m, " rx_pend:%lu/%lu",
  6460. + (unsigned long)info->recv_cnt,
  6461. + (unsigned long)info->max_recv_cnt);
  6462. +
  6463. +#if 1
  6464. + if (info->port.tty) {
  6465. + if (info->port.tty->stopped)
  6466. + seq_printf(m, " stopped:%i",
  6467. + (int)info->port.tty->stopped);
  6468. + if (info->port.tty->hw_stopped)
  6469. + seq_printf(m, " hw_stopped:%i",
  6470. + (int)info->port.tty->hw_stopped);
  6471. + }
  6472. +
  6473. + {
  6474. + unsigned char rstat = info->ioport[REG_STATUS];
  6475. + if (rstat & IO_MASK(R_SERIAL0_STATUS, xoff_detect))
  6476. + seq_printf(m, " xoff_detect:1");
  6477. + }
  6478. +
  6479. +#endif
  6480. +
  6481. + if (info->icount.frame)
  6482. + seq_printf(m, " fe:%lu", (unsigned long)info->icount.frame);
  6483. +
  6484. + if (info->icount.parity)
  6485. + seq_printf(m, " pe:%lu", (unsigned long)info->icount.parity);
  6486. +
  6487. + if (info->icount.brk)
  6488. + seq_printf(m, " brk:%lu", (unsigned long)info->icount.brk);
  6489. +
  6490. + if (info->icount.overrun)
  6491. + seq_printf(m, " oe:%lu", (unsigned long)info->icount.overrun);
  6492. +
  6493. + /*
  6494. + * Last thing is the RS-232 status lines
  6495. + */
  6496. + if (!E100_RTS_GET(info))
  6497. + seq_puts(m, "|RTS");
  6498. + if (!E100_CTS_GET(info))
  6499. + seq_puts(m, "|CTS");
  6500. + if (!E100_DTR_GET(info))
  6501. + seq_puts(m, "|DTR");
  6502. + if (!E100_DSR_GET(info))
  6503. + seq_puts(m, "|DSR");
  6504. + if (!E100_CD_GET(info))
  6505. + seq_puts(m, "|CD");
  6506. + if (!E100_RI_GET(info))
  6507. + seq_puts(m, "|RI");
  6508. + seq_puts(m, "\n");
  6509. +}
  6510. +
  6511. +
  6512. +static int crisv10_proc_show(struct seq_file *m, void *v)
  6513. +{
  6514. + int i;
  6515. +
  6516. + seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version);
  6517. +
  6518. + for (i = 0; i < NR_PORTS; i++) {
  6519. + if (!rs_table[i].enabled)
  6520. + continue;
  6521. + seq_line_info(m, &rs_table[i]);
  6522. + }
  6523. +#ifdef DEBUG_LOG_INCLUDED
  6524. + for (i = 0; i < debug_log_pos; i++) {
  6525. + seq_printf(m, "%-4i %lu.%lu ",
  6526. + i, debug_log[i].time,
  6527. + timer_data_to_ns(debug_log[i].timer_data));
  6528. + seq_printf(m, debug_log[i].string, debug_log[i].value);
  6529. + }
  6530. + seq_printf(m, "debug_log %i/%i\n", i, DEBUG_LOG_SIZE);
  6531. + debug_log_pos = 0;
  6532. +#endif
  6533. + return 0;
  6534. +}
  6535. +
  6536. +static int crisv10_proc_open(struct inode *inode, struct file *file)
  6537. +{
  6538. + return single_open(file, crisv10_proc_show, NULL);
  6539. +}
  6540. +
  6541. +static const struct file_operations crisv10_proc_fops = {
  6542. + .owner = THIS_MODULE,
  6543. + .open = crisv10_proc_open,
  6544. + .read = seq_read,
  6545. + .llseek = seq_lseek,
  6546. + .release = single_release,
  6547. +};
  6548. +#endif
  6549. +
  6550. +
  6551. +/* Finally, routines used to initialize the serial driver. */
  6552. +
  6553. +static void show_serial_version(void)
  6554. +{
  6555. + printk(KERN_INFO
  6556. + "ETRAX 100LX serial-driver %s, "
  6557. + "(c) 2000-2004 Axis Communications AB\r\n",
  6558. + &serial_version[11]); /* "$Revision: x.yy" */
  6559. +}
  6560. +
  6561. +/* rs_init inits the driver at boot (using the module_init chain) */
  6562. +
  6563. +static const struct tty_operations rs_ops = {
  6564. + .open = rs_open,
  6565. + .close = rs_close,
  6566. + .write = rs_write,
  6567. + .flush_chars = rs_flush_chars,
  6568. + .write_room = rs_write_room,
  6569. + .chars_in_buffer = rs_chars_in_buffer,
  6570. + .flush_buffer = rs_flush_buffer,
  6571. + .ioctl = rs_ioctl,
  6572. + .throttle = rs_throttle,
  6573. + .unthrottle = rs_unthrottle,
  6574. + .set_termios = rs_set_termios,
  6575. + .stop = rs_stop,
  6576. + .start = rs_start,
  6577. + .hangup = rs_hangup,
  6578. + .break_ctl = rs_break,
  6579. + .send_xchar = rs_send_xchar,
  6580. + .wait_until_sent = rs_wait_until_sent,
  6581. + .tiocmget = rs_tiocmget,
  6582. + .tiocmset = rs_tiocmset,
  6583. +#ifdef CONFIG_PROC_FS
  6584. + .proc_fops = &crisv10_proc_fops,
  6585. +#endif
  6586. +};
  6587. +
  6588. +static int __init rs_init(void)
  6589. +{
  6590. + int i;
  6591. + struct e100_serial *info;
  6592. + struct tty_driver *driver = alloc_tty_driver(NR_PORTS);
  6593. +
  6594. + if (!driver)
  6595. + return -ENOMEM;
  6596. +
  6597. + show_serial_version();
  6598. +
  6599. + /* Setup the timed flush handler system */
  6600. +
  6601. +#if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
  6602. + setup_timer(&flush_timer, timed_flush_handler, 0);
  6603. + mod_timer(&flush_timer, jiffies + 5);
  6604. +#endif
  6605. +
  6606. +#if defined(CONFIG_ETRAX_RS485)
  6607. +#if defined(CONFIG_ETRAX_RS485_ON_PA)
  6608. + if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit,
  6609. + rs485_pa_bit)) {
  6610. + printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
  6611. + "RS485 pin\n");
  6612. + put_tty_driver(driver);
  6613. + return -EBUSY;
  6614. + }
  6615. +#endif
  6616. +#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
  6617. + if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit,
  6618. + rs485_port_g_bit)) {
  6619. + printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
  6620. + "RS485 pin\n");
  6621. + put_tty_driver(driver);
  6622. + return -EBUSY;
  6623. + }
  6624. +#endif
  6625. +#endif
  6626. +
  6627. + /* Initialize the tty_driver structure */
  6628. +
  6629. + driver->driver_name = "serial";
  6630. + driver->name = "ttyS";
  6631. + driver->major = TTY_MAJOR;
  6632. + driver->minor_start = 64;
  6633. + driver->type = TTY_DRIVER_TYPE_SERIAL;
  6634. + driver->subtype = SERIAL_TYPE_NORMAL;
  6635. + driver->init_termios = tty_std_termios;
  6636. + driver->init_termios.c_cflag =
  6637. + B115200 | CS8 | CREAD | HUPCL | CLOCAL; /* is normally B9600 default... */
  6638. + driver->init_termios.c_ispeed = 115200;
  6639. + driver->init_termios.c_ospeed = 115200;
  6640. + driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
  6641. +
  6642. + tty_set_operations(driver, &rs_ops);
  6643. + serial_driver = driver;
  6644. + if (tty_register_driver(driver))
  6645. + panic("Couldn't register serial driver\n");
  6646. + /* do some initializing for the separate ports */
  6647. +
  6648. + for (i = 0, info = rs_table; i < NR_PORTS; i++,info++) {
  6649. + if (info->enabled) {
  6650. + if (cris_request_io_interface(info->io_if,
  6651. + info->io_if_description)) {
  6652. + printk(KERN_CRIT "ETRAX100LX async serial: "
  6653. + "Could not allocate IO pins for "
  6654. + "%s, port %d\n",
  6655. + info->io_if_description, i);
  6656. + info->enabled = 0;
  6657. + }
  6658. + }
  6659. + info->uses_dma_in = 0;
  6660. + info->uses_dma_out = 0;
  6661. + info->line = i;
  6662. + info->port.tty = NULL;
  6663. + info->type = PORT_ETRAX;
  6664. + info->tr_running = 0;
  6665. + info->forced_eop = 0;
  6666. + info->baud_base = DEF_BAUD_BASE;
  6667. + info->custom_divisor = 0;
  6668. + info->flags = 0;
  6669. + info->close_delay = 5*HZ/10;
  6670. + info->closing_wait = 30*HZ;
  6671. + info->x_char = 0;
  6672. + info->event = 0;
  6673. + info->count = 0;
  6674. + info->blocked_open = 0;
  6675. + info->normal_termios = driver->init_termios;
  6676. + init_waitqueue_head(&info->open_wait);
  6677. + init_waitqueue_head(&info->close_wait);
  6678. + info->xmit.buf = NULL;
  6679. + info->xmit.tail = info->xmit.head = 0;
  6680. + info->first_recv_buffer = info->last_recv_buffer = NULL;
  6681. + info->recv_cnt = info->max_recv_cnt = 0;
  6682. + info->last_tx_active_usec = 0;
  6683. + info->last_tx_active = 0;
  6684. +
  6685. +#if defined(CONFIG_ETRAX_RS485)
  6686. + /* Set sane defaults */
  6687. + info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND);
  6688. + info->rs485.flags |= SER_RS485_RTS_AFTER_SEND;
  6689. + info->rs485.delay_rts_before_send = 0;
  6690. + info->rs485.flags &= ~(SER_RS485_ENABLED);
  6691. +#endif
  6692. + INIT_WORK(&info->work, do_softint);
  6693. +
  6694. + if (info->enabled) {
  6695. + printk(KERN_INFO "%s%d at 0x%x is a builtin UART with DMA\n",
  6696. + serial_driver->name, info->line, (unsigned int)info->ioport);
  6697. + }
  6698. + }
  6699. +#ifdef CONFIG_ETRAX_FAST_TIMER
  6700. +#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
  6701. + memset(fast_timers, 0, sizeof(fast_timers));
  6702. +#endif
  6703. +#ifdef CONFIG_ETRAX_RS485
  6704. + memset(fast_timers_rs485, 0, sizeof(fast_timers_rs485));
  6705. +#endif
  6706. + fast_timer_init();
  6707. +#endif
  6708. +
  6709. +#ifndef CONFIG_SVINTO_SIM
  6710. +#ifndef CONFIG_ETRAX_KGDB
  6711. + /* Not needed in simulator. May only complicate stuff. */
  6712. + /* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */
  6713. +
  6714. + if (request_irq(SERIAL_IRQ_NBR, ser_interrupt,
  6715. + IRQF_SHARED | IRQF_DISABLED, "serial ", driver))
  6716. + panic("%s: Failed to request irq8", __func__);
  6717. +
  6718. +#endif
  6719. +#endif /* CONFIG_SVINTO_SIM */
  6720. +
  6721. + return 0;
  6722. +}
  6723. +
  6724. +/* this makes sure that rs_init is called during kernel boot */
  6725. +
  6726. +module_init(rs_init);
  6727. diff -Nur linux-2.6.32.orig/drivers/usb/host/hc-cris-dbg.h linux-2.6.32/drivers/usb/host/hc-cris-dbg.h
  6728. --- linux-2.6.32.orig/drivers/usb/host/hc-cris-dbg.h 1970-01-01 01:00:00.000000000 +0100
  6729. +++ linux-2.6.32/drivers/usb/host/hc-cris-dbg.h 2010-01-10 13:41:59.276309474 +0100
  6730. @@ -0,0 +1,146 @@
  6731. +
  6732. +/* macros for debug output */
  6733. +
  6734. +#define warn(fmt, args...) \
  6735. + printk(KERN_INFO "crisv10 warn: ");printk(fmt, ## args)
  6736. +
  6737. +#define hcd_dbg(hcd, fmt, args...) \
  6738. + dev_info(hcd->self.controller, fmt, ## args)
  6739. +#define hcd_err(hcd, fmt, args...) \
  6740. + dev_err(hcd->self.controller, fmt, ## args)
  6741. +#define hcd_info(hcd, fmt, args...) \
  6742. + dev_info(hcd->self.controller, fmt, ## args)
  6743. +#define hcd_warn(hcd, fmt, args...) \
  6744. + dev_warn(hcd->self.controller, fmt, ## args)
  6745. +
  6746. +/*
  6747. +#define devdrv_dbg(fmt, args...) \
  6748. + printk(KERN_INFO "usb_devdrv dbg: ");printk(fmt, ## args)
  6749. +*/
  6750. +#define devdrv_dbg(fmt, args...) {}
  6751. +
  6752. +#define devdrv_err(fmt, args...) \
  6753. + printk(KERN_ERR "usb_devdrv error: ");printk(fmt, ## args)
  6754. +#define devdrv_info(fmt, args...) \
  6755. + printk(KERN_INFO "usb_devdrv: ");printk(fmt, ## args)
  6756. +
  6757. +#define irq_dbg(fmt, args...) \
  6758. + printk(KERN_INFO "crisv10_irq dbg: ");printk(fmt, ## args)
  6759. +#define irq_err(fmt, args...) \
  6760. + printk(KERN_ERR "crisv10_irq error: ");printk(fmt, ## args)
  6761. +#define irq_warn(fmt, args...) \
  6762. + printk(KERN_INFO "crisv10_irq warn: ");printk(fmt, ## args)
  6763. +#define irq_info(fmt, args...) \
  6764. + printk(KERN_INFO "crisv10_hcd: ");printk(fmt, ## args)
  6765. +
  6766. +/*
  6767. +#define rh_dbg(fmt, args...) \
  6768. + printk(KERN_DEBUG "crisv10_rh dbg: ");printk(fmt, ## args)
  6769. +*/
  6770. +#define rh_dbg(fmt, args...) {}
  6771. +
  6772. +#define rh_err(fmt, args...) \
  6773. + printk(KERN_ERR "crisv10_rh error: ");printk(fmt, ## args)
  6774. +#define rh_warn(fmt, args...) \
  6775. + printk(KERN_INFO "crisv10_rh warning: ");printk(fmt, ## args)
  6776. +#define rh_info(fmt, args...) \
  6777. + printk(KERN_INFO "crisv10_rh: ");printk(fmt, ## args)
  6778. +
  6779. +/*
  6780. +#define tc_dbg(fmt, args...) \
  6781. + printk(KERN_INFO "crisv10_tc dbg: ");printk(fmt, ## args)
  6782. +*/
  6783. +#define tc_dbg(fmt, args...) {while(0){}}
  6784. +
  6785. +#define tc_err(fmt, args...) \
  6786. + printk(KERN_ERR "crisv10_tc error: ");printk(fmt, ## args)
  6787. +/*
  6788. +#define tc_warn(fmt, args...) \
  6789. + printk(KERN_INFO "crisv10_tc warning: ");printk(fmt, ## args)
  6790. +*/
  6791. +#define tc_warn(fmt, args...) {while(0){}}
  6792. +
  6793. +#define tc_info(fmt, args...) \
  6794. + printk(KERN_INFO "crisv10_tc: ");printk(fmt, ## args)
  6795. +
  6796. +
  6797. +/* Debug print-outs for various traffic types */
  6798. +
  6799. +#define intr_warn(fmt, args...) \
  6800. + printk(KERN_INFO "crisv10_intr warning: ");printk(fmt, ## args)
  6801. +
  6802. +#define intr_dbg(fmt, args...) \
  6803. + printk(KERN_DEBUG "crisv10_intr dbg: ");printk(fmt, ## args)
  6804. +/*
  6805. +#define intr_dbg(fmt, args...) {while(0){}}
  6806. +*/
  6807. +
  6808. +
  6809. +#define isoc_err(fmt, args...) \
  6810. + printk(KERN_ERR "crisv10_isoc error: ");printk(fmt, ## args)
  6811. +/*
  6812. +#define isoc_warn(fmt, args...) \
  6813. + printk(KERN_INFO "crisv10_isoc warning: ");printk(fmt, ## args)
  6814. +*/
  6815. +#define isoc_warn(fmt, args...) {while(0){}}
  6816. +
  6817. +/*
  6818. +#define isoc_dbg(fmt, args...) \
  6819. + printk(KERN_INFO "crisv10_isoc dbg: ");printk(fmt, ## args)
  6820. +*/
  6821. +#define isoc_dbg(fmt, args...) {while(0){}}
  6822. +
  6823. +/*
  6824. +#define timer_warn(fmt, args...) \
  6825. + printk(KERN_INFO "crisv10_timer warning: ");printk(fmt, ## args)
  6826. +*/
  6827. +#define timer_warn(fmt, args...) {while(0){}}
  6828. +
  6829. +/*
  6830. +#define timer_dbg(fmt, args...) \
  6831. + printk(KERN_INFO "crisv10_timer dbg: ");printk(fmt, ## args)
  6832. +*/
  6833. +#define timer_dbg(fmt, args...) {while(0){}}
  6834. +
  6835. +
  6836. +/* Debug printouts for events related to late finishing of URBs */
  6837. +
  6838. +#define late_dbg(fmt, args...) \
  6839. + printk(KERN_INFO "crisv10_late dbg: ");printk(fmt, ## args)
  6840. +/*
  6841. +#define late_dbg(fmt, args...) {while(0){}}
  6842. +*/
  6843. +
  6844. +#define late_warn(fmt, args...) \
  6845. + printk(KERN_INFO "crisv10_late warning: ");printk(fmt, ## args)
  6846. +/*
  6847. +#define errno_dbg(fmt, args...) \
  6848. + printk(KERN_INFO "crisv10_errno dbg: ");printk(fmt, ## args)
  6849. +*/
  6850. +#define errno_dbg(fmt, args...) {while(0){}}
  6851. +
  6852. +
  6853. +#define dma_dbg(fmt, args...) \
  6854. + printk(KERN_INFO "crisv10_dma dbg: ");printk(fmt, ## args)
  6855. +#define dma_err(fmt, args...) \
  6856. + printk(KERN_ERR "crisv10_dma error: ");printk(fmt, ## args)
  6857. +#define dma_warn(fmt, args...) \
  6858. + printk(KERN_INFO "crisv10_dma warning: ");printk(fmt, ## args)
  6859. +#define dma_info(fmt, args...) \
  6860. + printk(KERN_INFO "crisv10_dma: ");printk(fmt, ## args)
  6861. +
  6862. +
  6863. +
  6864. +#define str_dir(pipe) \
  6865. + (usb_pipeout(pipe) ? "out" : "in")
  6866. +#define str_type(pipe) \
  6867. + ({ \
  6868. + char *s = "?"; \
  6869. + switch (usb_pipetype(pipe)) { \
  6870. + case PIPE_ISOCHRONOUS: s = "iso"; break; \
  6871. + case PIPE_INTERRUPT: s = "intr"; break; \
  6872. + case PIPE_CONTROL: s = "ctrl"; break; \
  6873. + case PIPE_BULK: s = "bulk"; break; \
  6874. + }; \
  6875. + s; \
  6876. + })
  6877. diff -Nur linux-2.6.32.orig/drivers/usb/host/hc-crisv10.c linux-2.6.32/drivers/usb/host/hc-crisv10.c
  6878. --- linux-2.6.32.orig/drivers/usb/host/hc-crisv10.c 1970-01-01 01:00:00.000000000 +0100
  6879. +++ linux-2.6.32/drivers/usb/host/hc-crisv10.c 2010-01-10 13:41:59.326309689 +0100
  6880. @@ -0,0 +1,4801 @@
  6881. +/*
  6882. + *
  6883. + * ETRAX 100LX USB Host Controller Driver
  6884. + *
  6885. + * Copyright (C) 2005, 2006 Axis Communications AB
  6886. + *
  6887. + * Author: Konrad Eriksson <konrad.eriksson@axis.se>
  6888. + *
  6889. + */
  6890. +
  6891. +#include <linux/module.h>
  6892. +#include <linux/kernel.h>
  6893. +#include <linux/init.h>
  6894. +#include <linux/moduleparam.h>
  6895. +#include <linux/spinlock.h>
  6896. +#include <linux/usb.h>
  6897. +#include <linux/platform_device.h>
  6898. +
  6899. +#include <asm/io.h>
  6900. +#include <asm/irq.h>
  6901. +#include <arch/dma.h>
  6902. +#include <arch/io_interface_mux.h>
  6903. +
  6904. +#include "../core/hcd.h"
  6905. +#include "../core/hub.h"
  6906. +#include "hc-crisv10.h"
  6907. +#include "hc-cris-dbg.h"
  6908. +
  6909. +
  6910. +/***************************************************************************/
  6911. +/***************************************************************************/
  6912. +/* Host Controller settings */
  6913. +/***************************************************************************/
  6914. +/***************************************************************************/
  6915. +
  6916. +#define VERSION "1.00 hinko.4"
  6917. +#define COPYRIGHT "(c) 2005, 2006 Axis Communications AB"
  6918. +#define DESCRIPTION "ETRAX 100LX USB Host Controller (2.6.25-rc9 port)"
  6919. +
  6920. +#define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
  6921. +#define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
  6922. +#define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
  6923. +
  6924. +/* Number of physical ports in Etrax 100LX */
  6925. +#define USB_ROOT_HUB_PORTS 2
  6926. +
  6927. +const char hc_name[] = "hc-crisv10";
  6928. +const char product_desc[] = DESCRIPTION;
  6929. +
  6930. +/* The number of epids is, among other things, used for pre-allocating
  6931. + ctrl, bulk and isoc EP descriptors (one for each epid).
  6932. + Assumed to be > 1 when initiating the DMA lists. */
  6933. +#define NBR_OF_EPIDS 32
  6934. +
  6935. +/* Support interrupt traffic intervals up to 128 ms. */
  6936. +#define MAX_INTR_INTERVAL 128
  6937. +
  6938. +/* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
  6939. + table must be "invalid". By this we mean that we shouldn't care about epid
  6940. + attentions for this epid, or at least handle them differently from epid
  6941. + attentions for "valid" epids. This define determines which one to use
  6942. + (don't change it). */
  6943. +#define INVALID_EPID 31
  6944. +/* A special epid for the bulk dummys. */
  6945. +#define DUMMY_EPID 30
  6946. +
  6947. +/* Module settings */
  6948. +
  6949. +MODULE_DESCRIPTION(DESCRIPTION);
  6950. +MODULE_LICENSE("GPL");
  6951. +MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
  6952. +
  6953. +
  6954. +/* Module parameters */
  6955. +
  6956. +/* 0 = No ports enabled
  6957. + 1 = Only port 1 enabled (on board ethernet on devboard)
  6958. + 2 = Only port 2 enabled (external connector on devboard)
  6959. + 3 = Both ports enabled
  6960. +*/
  6961. +static unsigned int ports = 3;
  6962. +module_param(ports, uint, S_IRUGO);
  6963. +MODULE_PARM_DESC(ports, "Bitmask indicating USB ports to use");
  6964. +
  6965. +
  6966. +/***************************************************************************/
  6967. +/***************************************************************************/
  6968. +/* Shared global variables for this module */
  6969. +/***************************************************************************/
  6970. +/***************************************************************************/
  6971. +
  6972. +/* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
  6973. +static volatile struct USB_EP_Desc TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
  6974. +
  6975. +static volatile struct USB_EP_Desc TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
  6976. +
  6977. +/* EP descriptor lists for period transfers. Must be 32-bit aligned. */
  6978. +static volatile struct USB_EP_Desc TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
  6979. +static volatile struct USB_SB_Desc TxIntrSB_zout __attribute__ ((aligned (4)));
  6980. +
  6981. +static volatile struct USB_EP_Desc TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
  6982. +static volatile struct USB_SB_Desc TxIsocSB_zout __attribute__ ((aligned (4)));
  6983. +
  6984. +//static volatile struct USB_SB_Desc TxIsocSBList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
  6985. +
  6986. +/* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
  6987. + causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
  6988. + gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
  6989. + EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
  6990. + in each frame. */
  6991. +static volatile struct USB_EP_Desc TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
  6992. +
  6993. +/* List of URB pointers, where each points to the active URB for a epid.
  6994. + For Bulk, Ctrl and Intr this means which URB that currently is added to
  6995. + DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
  6996. + URB has completed is the queue examined and the first URB in queue is
  6997. + removed and moved to the activeUrbList while its state change to STARTED and
  6998. + its transfer(s) gets added to DMA list (exception Isoc where URBs enter
  6999. + state STARTED directly and added transfers added to DMA lists). */
  7000. +static struct urb *activeUrbList[NBR_OF_EPIDS];
  7001. +
  7002. +/* Additional software state info for each epid */
  7003. +static struct etrax_epid epid_state[NBR_OF_EPIDS];
  7004. +
  7005. +/* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
  7006. + even if there is new data waiting to be processed */
  7007. +static struct timer_list bulk_start_timer = TIMER_INITIALIZER(NULL, 0, 0);
  7008. +static struct timer_list bulk_eot_timer = TIMER_INITIALIZER(NULL, 0, 0);
  7009. +
  7010. +/* We want the start timer to expire before the eot timer, because the former
  7011. + might start traffic, thus making it unnecessary for the latter to time
  7012. + out. */
  7013. +#define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
  7014. +#define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
  7015. +
  7016. +/* Delay before a URB completion happen when it's scheduled to be delayed */
  7017. +#define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
  7018. +
  7019. +/* Simplifying macros for checking software state info of a epid */
  7020. +/* ----------------------------------------------------------------------- */
  7021. +#define epid_inuse(epid) epid_state[epid].inuse
  7022. +#define epid_out_traffic(epid) epid_state[epid].out_traffic
  7023. +#define epid_isoc(epid) (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
  7024. +#define epid_intr(epid) (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
  7025. +
  7026. +
  7027. +/***************************************************************************/
  7028. +/***************************************************************************/
  7029. +/* DEBUG FUNCTIONS */
  7030. +/***************************************************************************/
  7031. +/***************************************************************************/
  7032. +/* Note that these functions are always available in their "__" variants,
  7033. + for use in error situations. The "__" missing variants are controlled by
  7034. + the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
  7035. +static void __dump_urb(struct urb* purb)
  7036. +{
  7037. + struct crisv10_urb_priv *urb_priv = purb->hcpriv;
  7038. + int urb_num = -1;
  7039. + if(urb_priv) {
  7040. + urb_num = urb_priv->urb_num;
  7041. + }
  7042. + printk("\nURB:0x%x[%d]\n", (unsigned int)purb, urb_num);
  7043. + printk("dev :0x%08lx\n", (unsigned long)purb->dev);
  7044. + printk("pipe :0x%08x\n", purb->pipe);
  7045. + printk("status :%d\n", purb->status);
  7046. + printk("transfer_flags :0x%08x\n", purb->transfer_flags);
  7047. + printk("transfer_buffer :0x%08lx\n", (unsigned long)purb->transfer_buffer);
  7048. + printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
  7049. + printk("actual_length :%d\n", purb->actual_length);
  7050. + printk("setup_packet :0x%08lx\n", (unsigned long)purb->setup_packet);
  7051. + printk("start_frame :%d\n", purb->start_frame);
  7052. + printk("number_of_packets :%d\n", purb->number_of_packets);
  7053. + printk("interval :%d\n", purb->interval);
  7054. + printk("error_count :%d\n", purb->error_count);
  7055. + printk("context :0x%08lx\n", (unsigned long)purb->context);
  7056. + printk("complete :0x%08lx\n\n", (unsigned long)purb->complete);
  7057. +}
  7058. +
  7059. +static void __dump_in_desc(volatile struct USB_IN_Desc *in)
  7060. +{
  7061. + printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
  7062. + printk(" sw_len : 0x%04x (%d)\n", in->sw_len, in->sw_len);
  7063. + printk(" command : 0x%04x\n", in->command);
  7064. + printk(" next : 0x%08lx\n", in->next);
  7065. + printk(" buf : 0x%08lx\n", in->buf);
  7066. + printk(" hw_len : 0x%04x (%d)\n", in->hw_len, in->hw_len);
  7067. + printk(" status : 0x%04x\n\n", in->status);
  7068. +}
  7069. +
  7070. +static void __dump_sb_desc(volatile struct USB_SB_Desc *sb)
  7071. +{
  7072. + char tt = (sb->command & 0x30) >> 4;
  7073. + char *tt_string;
  7074. +
  7075. + switch (tt) {
  7076. + case 0:
  7077. + tt_string = "zout";
  7078. + break;
  7079. + case 1:
  7080. + tt_string = "in";
  7081. + break;
  7082. + case 2:
  7083. + tt_string = "out";
  7084. + break;
  7085. + case 3:
  7086. + tt_string = "setup";
  7087. + break;
  7088. + default:
  7089. + tt_string = "unknown (weird)";
  7090. + }
  7091. +
  7092. + printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb);
  7093. + printk(" command:0x%04x (", sb->command);
  7094. + printk("rem:%d ", (sb->command & 0x3f00) >> 8);
  7095. + printk("full:%d ", (sb->command & 0x40) >> 6);
  7096. + printk("tt:%d(%s) ", tt, tt_string);
  7097. + printk("intr:%d ", (sb->command & 0x8) >> 3);
  7098. + printk("eot:%d ", (sb->command & 0x2) >> 1);
  7099. + printk("eol:%d)", sb->command & 0x1);
  7100. + printk(" sw_len:0x%04x(%d)", sb->sw_len, sb->sw_len);
  7101. + printk(" next:0x%08lx", sb->next);
  7102. + printk(" buf:0x%08lx\n", sb->buf);
  7103. +}
  7104. +
  7105. +
  7106. +static void __dump_ep_desc(volatile struct USB_EP_Desc *ep)
  7107. +{
  7108. + printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep);
  7109. + printk(" command:0x%04x (", ep->command);
  7110. + printk("ep_id:%d ", (ep->command & 0x1f00) >> 8);
  7111. + printk("enable:%d ", (ep->command & 0x10) >> 4);
  7112. + printk("intr:%d ", (ep->command & 0x8) >> 3);
  7113. + printk("eof:%d ", (ep->command & 0x2) >> 1);
  7114. + printk("eol:%d)", ep->command & 0x1);
  7115. + printk(" hw_len:0x%04x(%d)", ep->hw_len, ep->hw_len);
  7116. + printk(" next:0x%08lx", ep->next);
  7117. + printk(" sub:0x%08lx\n", ep->sub);
  7118. +}
  7119. +
  7120. +static inline void __dump_ep_list(int pipe_type)
  7121. +{
  7122. + volatile struct USB_EP_Desc *ep;
  7123. + volatile struct USB_EP_Desc *first_ep;
  7124. + volatile struct USB_SB_Desc *sb;
  7125. +
  7126. + switch (pipe_type)
  7127. + {
  7128. + case PIPE_BULK:
  7129. + first_ep = &TxBulkEPList[0];
  7130. + break;
  7131. + case PIPE_CONTROL:
  7132. + first_ep = &TxCtrlEPList[0];
  7133. + break;
  7134. + case PIPE_INTERRUPT:
  7135. + first_ep = &TxIntrEPList[0];
  7136. + break;
  7137. + case PIPE_ISOCHRONOUS:
  7138. + first_ep = &TxIsocEPList[0];
  7139. + break;
  7140. + default:
  7141. + warn("Cannot dump unknown traffic type");
  7142. + return;
  7143. + }
  7144. + ep = first_ep;
  7145. +
  7146. + printk("\n\nDumping EP list...\n\n");
  7147. +
  7148. + do {
  7149. + __dump_ep_desc(ep);
  7150. + /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
  7151. + sb = ep->sub ? phys_to_virt(ep->sub) : 0;
  7152. + while (sb) {
  7153. + __dump_sb_desc(sb);
  7154. + sb = sb->next ? phys_to_virt(sb->next) : 0;
  7155. + }
  7156. + ep = (volatile struct USB_EP_Desc *)(phys_to_virt(ep->next));
  7157. +
  7158. + } while (ep != first_ep);
  7159. +}
  7160. +
  7161. +static inline void __dump_ept_data(int epid)
  7162. +{
  7163. + unsigned long flags;
  7164. + __u32 r_usb_ept_data;
  7165. +
  7166. + if (epid < 0 || epid > 31) {
  7167. + printk("Cannot dump ept data for invalid epid %d\n", epid);
  7168. + return;
  7169. + }
  7170. +
  7171. + local_irq_save(flags);
  7172. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
  7173. + nop();
  7174. + r_usb_ept_data = *R_USB_EPT_DATA;
  7175. + local_irq_restore(flags);
  7176. +
  7177. + printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
  7178. + if (r_usb_ept_data == 0) {
  7179. + /* No need for more detailed printing. */
  7180. + return;
  7181. + }
  7182. + printk(" valid : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
  7183. + printk(" hold : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
  7184. + printk(" error_count_in : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
  7185. + printk(" t_in : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
  7186. + printk(" low_speed : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
  7187. + printk(" port : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
  7188. + printk(" error_code : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
  7189. + printk(" t_out : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
  7190. + printk(" error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
  7191. + printk(" max_len : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
  7192. + printk(" ep : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
  7193. + printk(" dev : %d\n", (r_usb_ept_data & 0x0000003f));
  7194. +}
  7195. +
  7196. +static inline void __dump_ept_data_iso(int epid)
  7197. +{
  7198. + unsigned long flags;
  7199. + __u32 ept_data;
  7200. +
  7201. + if (epid < 0 || epid > 31) {
  7202. + printk("Cannot dump ept data for invalid epid %d\n", epid);
  7203. + return;
  7204. + }
  7205. +
  7206. + local_irq_save(flags);
  7207. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
  7208. + nop();
  7209. + ept_data = *R_USB_EPT_DATA_ISO;
  7210. + local_irq_restore(flags);
  7211. +
  7212. + printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data, epid);
  7213. + if (ept_data == 0) {
  7214. + /* No need for more detailed printing. */
  7215. + return;
  7216. + }
  7217. + printk(" valid : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, valid,
  7218. + ept_data));
  7219. + printk(" port : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, port,
  7220. + ept_data));
  7221. + printk(" error_code : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code,
  7222. + ept_data));
  7223. + printk(" max_len : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len,
  7224. + ept_data));
  7225. + printk(" ep : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, ep,
  7226. + ept_data));
  7227. + printk(" dev : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, dev,
  7228. + ept_data));
  7229. +}
  7230. +
  7231. +static inline void __dump_ept_data_list(void)
  7232. +{
  7233. + int i;
  7234. +
  7235. + printk("Dumping the whole R_USB_EPT_DATA list\n");
  7236. +
  7237. + for (i = 0; i < 32; i++) {
  7238. + __dump_ept_data(i);
  7239. + }
  7240. +}
  7241. +
  7242. +static void debug_epid(int epid) {
  7243. + int i;
  7244. +
  7245. + if(epid_isoc(epid)) {
  7246. + __dump_ept_data_iso(epid);
  7247. + } else {
  7248. + __dump_ept_data(epid);
  7249. + }
  7250. +
  7251. + printk("Bulk:\n");
  7252. + for(i = 0; i < 32; i++) {
  7253. + if(IO_EXTRACT(USB_EP_command, epid, TxBulkEPList[i].command) ==
  7254. + epid) {
  7255. + printk("%d: ", i); __dump_ep_desc(&(TxBulkEPList[i]));
  7256. + }
  7257. + }
  7258. +
  7259. + printk("Ctrl:\n");
  7260. + for(i = 0; i < 32; i++) {
  7261. + if(IO_EXTRACT(USB_EP_command, epid, TxCtrlEPList[i].command) ==
  7262. + epid) {
  7263. + printk("%d: ", i); __dump_ep_desc(&(TxCtrlEPList[i]));
  7264. + }
  7265. + }
  7266. +
  7267. + printk("Intr:\n");
  7268. + for(i = 0; i < MAX_INTR_INTERVAL; i++) {
  7269. + if(IO_EXTRACT(USB_EP_command, epid, TxIntrEPList[i].command) ==
  7270. + epid) {
  7271. + printk("%d: ", i); __dump_ep_desc(&(TxIntrEPList[i]));
  7272. + }
  7273. + }
  7274. +
  7275. + printk("Isoc:\n");
  7276. + for(i = 0; i < 32; i++) {
  7277. + if(IO_EXTRACT(USB_EP_command, epid, TxIsocEPList[i].command) ==
  7278. + epid) {
  7279. + printk("%d: ", i); __dump_ep_desc(&(TxIsocEPList[i]));
  7280. + }
  7281. + }
  7282. +
  7283. + __dump_ept_data_list();
  7284. + __dump_ep_list(PIPE_INTERRUPT);
  7285. + printk("\n\n");
  7286. +}
  7287. +
  7288. +
  7289. +
  7290. +char* hcd_status_to_str(__u8 bUsbStatus) {
  7291. + static char hcd_status_str[128];
  7292. + hcd_status_str[0] = '\0';
  7293. + if(bUsbStatus & IO_STATE(R_USB_STATUS, ourun, yes)) {
  7294. + strcat(hcd_status_str, "ourun ");
  7295. + }
  7296. + if(bUsbStatus & IO_STATE(R_USB_STATUS, perror, yes)) {
  7297. + strcat(hcd_status_str, "perror ");
  7298. + }
  7299. + if(bUsbStatus & IO_STATE(R_USB_STATUS, device_mode, yes)) {
  7300. + strcat(hcd_status_str, "device_mode ");
  7301. + }
  7302. + if(bUsbStatus & IO_STATE(R_USB_STATUS, host_mode, yes)) {
  7303. + strcat(hcd_status_str, "host_mode ");
  7304. + }
  7305. + if(bUsbStatus & IO_STATE(R_USB_STATUS, started, yes)) {
  7306. + strcat(hcd_status_str, "started ");
  7307. + }
  7308. + if(bUsbStatus & IO_STATE(R_USB_STATUS, running, yes)) {
  7309. + strcat(hcd_status_str, "running ");
  7310. + }
  7311. + return hcd_status_str;
  7312. +}
  7313. +
  7314. +
  7315. +char* sblist_to_str(struct USB_SB_Desc* sb_desc) {
  7316. + static char sblist_to_str_buff[128];
  7317. + char tmp[32], tmp2[32];
  7318. + sblist_to_str_buff[0] = '\0';
  7319. + while(sb_desc != NULL) {
  7320. + switch(IO_EXTRACT(USB_SB_command, tt, sb_desc->command)) {
  7321. + case 0: sprintf(tmp, "zout"); break;
  7322. + case 1: sprintf(tmp, "in"); break;
  7323. + case 2: sprintf(tmp, "out"); break;
  7324. + case 3: sprintf(tmp, "setup"); break;
  7325. + }
  7326. + sprintf(tmp2, "(%s %d)", tmp, sb_desc->sw_len);
  7327. + strcat(sblist_to_str_buff, tmp2);
  7328. + if(sb_desc->next != 0) {
  7329. + sb_desc = phys_to_virt(sb_desc->next);
  7330. + } else {
  7331. + sb_desc = NULL;
  7332. + }
  7333. + }
  7334. + return sblist_to_str_buff;
  7335. +}
  7336. +
  7337. +char* port_status_to_str(__u16 wPortStatus) {
  7338. + static char port_status_str[128];
  7339. + port_status_str[0] = '\0';
  7340. + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) {
  7341. + strcat(port_status_str, "connected ");
  7342. + }
  7343. + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) {
  7344. + strcat(port_status_str, "enabled ");
  7345. + }
  7346. + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, suspended, yes)) {
  7347. + strcat(port_status_str, "suspended ");
  7348. + }
  7349. + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes)) {
  7350. + strcat(port_status_str, "reset ");
  7351. + }
  7352. + if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, speed, full)) {
  7353. + strcat(port_status_str, "full-speed ");
  7354. + } else {
  7355. + strcat(port_status_str, "low-speed ");
  7356. + }
  7357. + return port_status_str;
  7358. +}
  7359. +
  7360. +
  7361. +char* endpoint_to_str(struct usb_endpoint_descriptor *ed) {
  7362. + static char endpoint_to_str_buff[128];
  7363. + char tmp[32];
  7364. + int epnum = ed->bEndpointAddress & 0x0F;
  7365. + int dir = ed->bEndpointAddress & 0x80;
  7366. + int type = ed->bmAttributes & 0x03;
  7367. + endpoint_to_str_buff[0] = '\0';
  7368. + sprintf(endpoint_to_str_buff, "ep:%d ", epnum);
  7369. + switch(type) {
  7370. + case 0:
  7371. + sprintf(tmp, " ctrl");
  7372. + break;
  7373. + case 1:
  7374. + sprintf(tmp, " isoc");
  7375. + break;
  7376. + case 2:
  7377. + sprintf(tmp, " bulk");
  7378. + break;
  7379. + case 3:
  7380. + sprintf(tmp, " intr");
  7381. + break;
  7382. + }
  7383. + strcat(endpoint_to_str_buff, tmp);
  7384. + if(dir) {
  7385. + sprintf(tmp, " in");
  7386. + } else {
  7387. + sprintf(tmp, " out");
  7388. + }
  7389. + strcat(endpoint_to_str_buff, tmp);
  7390. +
  7391. + return endpoint_to_str_buff;
  7392. +}
  7393. +
  7394. +/* Debug helper functions for Transfer Controller */
  7395. +char* pipe_to_str(unsigned int pipe) {
  7396. + static char pipe_to_str_buff[128];
  7397. + char tmp[64];
  7398. + sprintf(pipe_to_str_buff, "dir:%s", str_dir(pipe));
  7399. + sprintf(tmp, " type:%s", str_type(pipe));
  7400. + strcat(pipe_to_str_buff, tmp);
  7401. +
  7402. + sprintf(tmp, " dev:%d", usb_pipedevice(pipe));
  7403. + strcat(pipe_to_str_buff, tmp);
  7404. + sprintf(tmp, " ep:%d", usb_pipeendpoint(pipe));
  7405. + strcat(pipe_to_str_buff, tmp);
  7406. + return pipe_to_str_buff;
  7407. +}
  7408. +
  7409. +
  7410. +#define USB_DEBUG_DESC 1
  7411. +
  7412. +#ifdef USB_DEBUG_DESC
  7413. +#define dump_in_desc(x) __dump_in_desc(x)
  7414. +#define dump_sb_desc(...) __dump_sb_desc(...)
  7415. +#define dump_ep_desc(x) __dump_ep_desc(x)
  7416. +#define dump_ept_data(x) __dump_ept_data(x)
  7417. +#else
  7418. +#define dump_in_desc(...) do {} while (0)
  7419. +#define dump_sb_desc(...) do {} while (0)
  7420. +#define dump_ep_desc(...) do {} while (0)
  7421. +#endif
  7422. +
  7423. +
  7424. +/* Uncomment this to enable massive function call trace
  7425. + #define USB_DEBUG_TRACE */
  7426. +//#define USB_DEBUG_TRACE 1
  7427. +
  7428. +#ifdef USB_DEBUG_TRACE
  7429. +#define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
  7430. +#define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
  7431. +#else
  7432. +#define DBFENTER do {} while (0)
  7433. +#define DBFEXIT do {} while (0)
  7434. +#endif
  7435. +
  7436. +#define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
  7437. +{panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
  7438. +
  7439. +/* Most helpful debugging aid */
  7440. +#define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
  7441. +
  7442. +
  7443. +/***************************************************************************/
  7444. +/***************************************************************************/
  7445. +/* Forward declarations */
  7446. +/***************************************************************************/
  7447. +/***************************************************************************/
  7448. +void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg);
  7449. +void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg);
  7450. +void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg);
  7451. +void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg);
  7452. +
  7453. +void rh_port_status_change(__u16[]);
  7454. +int rh_clear_port_feature(__u8, __u16);
  7455. +int rh_set_port_feature(__u8, __u16);
  7456. +static void rh_disable_port(unsigned int port);
  7457. +
  7458. +static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
  7459. + int timer);
  7460. +
  7461. +//static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
  7462. +// int mem_flags);
  7463. +static int tc_setup_epid(struct urb *urb, int mem_flags);
  7464. +static void tc_free_epid(struct usb_host_endpoint *ep);
  7465. +static int tc_allocate_epid(void);
  7466. +static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status);
  7467. +static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
  7468. + int status);
  7469. +
  7470. +static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
  7471. + int mem_flags);
  7472. +static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb);
  7473. +
  7474. +static inline struct urb *urb_list_first(int epid);
  7475. +static inline void urb_list_add(struct urb *urb, int epid,
  7476. + int mem_flags);
  7477. +static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid);
  7478. +static inline void urb_list_del(struct urb *urb, int epid);
  7479. +static inline void urb_list_move_last(struct urb *urb, int epid);
  7480. +static inline struct urb *urb_list_next(struct urb *urb, int epid);
  7481. +
  7482. +int create_sb_for_urb(struct urb *urb, int mem_flags);
  7483. +int init_intr_urb(struct urb *urb, int mem_flags);
  7484. +
  7485. +static inline void etrax_epid_set(__u8 index, __u32 data);
  7486. +static inline void etrax_epid_clear_error(__u8 index);
  7487. +static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
  7488. + __u8 toggle);
  7489. +static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout);
  7490. +static inline __u32 etrax_epid_get(__u8 index);
  7491. +
  7492. +/* We're accessing the same register position in Etrax so
  7493. + when we do full access the internal difference doesn't matter */
  7494. +#define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
  7495. +#define etrax_epid_iso_get(index) etrax_epid_get(index)
  7496. +
  7497. +
  7498. +//static void tc_dma_process_isoc_urb(struct urb *urb);
  7499. +static void tc_dma_process_queue(int epid);
  7500. +static void tc_dma_unlink_intr_urb(struct urb *urb);
  7501. +static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc);
  7502. +static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc);
  7503. +
  7504. +static void tc_bulk_start_timer_func(unsigned long dummy);
  7505. +static void tc_bulk_eot_timer_func(unsigned long dummy);
  7506. +
  7507. +
  7508. +/*************************************************************/
  7509. +/*************************************************************/
  7510. +/* Host Controler Driver block */
  7511. +/*************************************************************/
  7512. +/*************************************************************/
  7513. +
  7514. +/* HCD operations */
  7515. +static irqreturn_t crisv10_hcd_top_irq(int irq, void*);
  7516. +static int crisv10_hcd_reset(struct usb_hcd *);
  7517. +static int crisv10_hcd_start(struct usb_hcd *);
  7518. +static void crisv10_hcd_stop(struct usb_hcd *);
  7519. +#ifdef CONFIG_PM
  7520. +static int crisv10_hcd_suspend(struct device *, u32, u32);
  7521. +static int crisv10_hcd_resume(struct device *, u32);
  7522. +#endif /* CONFIG_PM */
  7523. +static int crisv10_hcd_get_frame(struct usb_hcd *);
  7524. +
  7525. +//static int tc_urb_enqueue(struct usb_hcd *, struct usb_host_endpoint *ep, struct urb *, gfp_t mem_flags);
  7526. +static int tc_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
  7527. +//static int tc_urb_dequeue(struct usb_hcd *, struct urb *);
  7528. +static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
  7529. +static void tc_endpoint_disable(struct usb_hcd *, struct usb_host_endpoint *ep);
  7530. +
  7531. +static int rh_status_data_request(struct usb_hcd *, char *);
  7532. +static int rh_control_request(struct usb_hcd *, u16, u16, u16, char*, u16);
  7533. +
  7534. +#ifdef CONFIG_PM
  7535. +static int crisv10_hcd_hub_suspend(struct usb_hcd *);
  7536. +static int crisv10_hcd_hub_resume(struct usb_hcd *);
  7537. +#endif /* CONFIG_PM */
  7538. +#ifdef CONFIG_USB_OTG
  7539. +static int crisv10_hcd_start_port_reset(struct usb_hcd *, unsigned);
  7540. +#endif /* CONFIG_USB_OTG */
  7541. +
  7542. +/* host controller driver interface */
  7543. +static const struct hc_driver crisv10_hc_driver =
  7544. + {
  7545. + .description = hc_name,
  7546. + .product_desc = product_desc,
  7547. + .hcd_priv_size = sizeof(struct crisv10_hcd),
  7548. +
  7549. + /* Attaching IRQ handler manualy in probe() */
  7550. + /* .irq = crisv10_hcd_irq, */
  7551. +
  7552. + .flags = HCD_USB11,
  7553. +
  7554. + /* called to init HCD and root hub */
  7555. + .reset = crisv10_hcd_reset,
  7556. + .start = crisv10_hcd_start,
  7557. +
  7558. + /* cleanly make HCD stop writing memory and doing I/O */
  7559. + .stop = crisv10_hcd_stop,
  7560. +
  7561. + /* return current frame number */
  7562. + .get_frame_number = crisv10_hcd_get_frame,
  7563. +
  7564. +
  7565. + /* Manage i/o requests via the Transfer Controller */
  7566. + .urb_enqueue = tc_urb_enqueue,
  7567. + .urb_dequeue = tc_urb_dequeue,
  7568. +
  7569. + /* hw synch, freeing endpoint resources that urb_dequeue can't */
  7570. + .endpoint_disable = tc_endpoint_disable,
  7571. +
  7572. +
  7573. + /* Root Hub support */
  7574. + .hub_status_data = rh_status_data_request,
  7575. + .hub_control = rh_control_request,
  7576. +#ifdef CONFIG_PM
  7577. + .hub_suspend = rh_suspend_request,
  7578. + .hub_resume = rh_resume_request,
  7579. +#endif /* CONFIG_PM */
  7580. +#ifdef CONFIG_USB_OTG
  7581. + .start_port_reset = crisv10_hcd_start_port_reset,
  7582. +#endif /* CONFIG_USB_OTG */
  7583. + };
  7584. +
  7585. +
  7586. +/*
  7587. + * conversion between pointers to a hcd and the corresponding
  7588. + * crisv10_hcd
  7589. + */
  7590. +
  7591. +static inline struct crisv10_hcd *hcd_to_crisv10_hcd(struct usb_hcd *hcd)
  7592. +{
  7593. + return (struct crisv10_hcd *) hcd->hcd_priv;
  7594. +}
  7595. +
  7596. +static inline struct usb_hcd *crisv10_hcd_to_hcd(struct crisv10_hcd *hcd)
  7597. +{
  7598. + return container_of((void *) hcd, struct usb_hcd, hcd_priv);
  7599. +}
  7600. +
  7601. +/* check if specified port is in use */
  7602. +static inline int port_in_use(unsigned int port)
  7603. +{
  7604. + return ports & (1 << port);
  7605. +}
  7606. +
  7607. +/* number of ports in use */
  7608. +static inline unsigned int num_ports(void)
  7609. +{
  7610. + unsigned int i, num = 0;
  7611. + for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
  7612. + if (port_in_use(i))
  7613. + num++;
  7614. + return num;
  7615. +}
  7616. +
  7617. +/* map hub port number to the port number used internally by the HC */
  7618. +static inline unsigned int map_port(unsigned int port)
  7619. +{
  7620. + unsigned int i, num = 0;
  7621. + for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
  7622. + if (port_in_use(i))
  7623. + if (++num == port)
  7624. + return i;
  7625. + return -1;
  7626. +}
  7627. +
  7628. +/* size of descriptors in slab cache */
  7629. +#ifndef MAX
  7630. +#define MAX(x, y) ((x) > (y) ? (x) : (y))
  7631. +#endif
  7632. +
  7633. +
  7634. +/******************************************************************/
  7635. +/* Hardware Interrupt functions */
  7636. +/******************************************************************/
  7637. +
  7638. +/* Fast interrupt handler for HC */
  7639. +static irqreturn_t crisv10_hcd_top_irq(int irq, void *vcd)
  7640. +{
  7641. + struct usb_hcd *hcd = vcd;
  7642. + struct crisv10_irq_reg reg;
  7643. + __u32 irq_mask;
  7644. + unsigned long flags;
  7645. +
  7646. + DBFENTER;
  7647. +
  7648. + ASSERT(hcd != NULL);
  7649. + reg.hcd = hcd;
  7650. +
  7651. + /* Turn of other interrupts while handling these sensitive cases */
  7652. + local_irq_save(flags);
  7653. +
  7654. + /* Read out which interrupts that are flaged */
  7655. + irq_mask = *R_USB_IRQ_MASK_READ;
  7656. + reg.r_usb_irq_mask_read = irq_mask;
  7657. +
  7658. + /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
  7659. + R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
  7660. + clears the ourun and perror fields of R_USB_STATUS. */
  7661. + reg.r_usb_status = *R_USB_STATUS;
  7662. +
  7663. + /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
  7664. + interrupts. */
  7665. + reg.r_usb_epid_attn = *R_USB_EPID_ATTN;
  7666. +
  7667. + /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
  7668. + port_status interrupt. */
  7669. + reg.r_usb_rh_port_status_1 = *R_USB_RH_PORT_STATUS_1;
  7670. + reg.r_usb_rh_port_status_2 = *R_USB_RH_PORT_STATUS_2;
  7671. +
  7672. + /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
  7673. + /* Note: the lower 11 bits contain the actual frame number, sent with each
  7674. + sof. */
  7675. + reg.r_usb_fm_number = *R_USB_FM_NUMBER;
  7676. +
  7677. + /* Interrupts are handled in order of priority. */
  7678. + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
  7679. + crisv10_hcd_port_status_irq(&reg);
  7680. + }
  7681. + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
  7682. + crisv10_hcd_epid_attn_irq(&reg);
  7683. + }
  7684. + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
  7685. + crisv10_hcd_ctl_status_irq(&reg);
  7686. + }
  7687. + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
  7688. + crisv10_hcd_isoc_eof_irq(&reg);
  7689. + }
  7690. + if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
  7691. + /* Update/restart the bulk start timer since obviously the channel is
  7692. + running. */
  7693. + mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
  7694. + /* Update/restart the bulk eot timer since we just received an bulk eot
  7695. + interrupt. */
  7696. + mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
  7697. +
  7698. + /* Check for finished bulk transfers on epids */
  7699. + check_finished_bulk_tx_epids(hcd, 0);
  7700. + }
  7701. + local_irq_restore(flags);
  7702. +
  7703. + DBFEXIT;
  7704. + return IRQ_HANDLED;
  7705. +}
  7706. +
  7707. +
  7708. +void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg) {
  7709. + struct usb_hcd *hcd = reg->hcd;
  7710. + struct crisv10_urb_priv *urb_priv;
  7711. + int epid;
  7712. + DBFENTER;
  7713. +
  7714. + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
  7715. + if (test_bit(epid, (void *)&reg->r_usb_epid_attn)) {
  7716. + struct urb *urb;
  7717. + __u32 ept_data;
  7718. + int error_code;
  7719. +
  7720. + if (epid == DUMMY_EPID || epid == INVALID_EPID) {
  7721. + /* We definitely don't care about these ones. Besides, they are
  7722. + always disabled, so any possible disabling caused by the
  7723. + epid attention interrupt is irrelevant. */
  7724. + warn("Got epid_attn for INVALID_EPID or DUMMY_EPID (%d).", epid);
  7725. + continue;
  7726. + }
  7727. +
  7728. + if(!epid_inuse(epid)) {
  7729. + irq_err("Epid attention on epid:%d that isn't in use\n", epid);
  7730. + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
  7731. + debug_epid(epid);
  7732. + continue;
  7733. + }
  7734. +
  7735. + /* Note that although there are separate R_USB_EPT_DATA and
  7736. + R_USB_EPT_DATA_ISO registers, they are located at the same address and
  7737. + are of the same size. In other words, this read should be ok for isoc
  7738. + also. */
  7739. + ept_data = etrax_epid_get(epid);
  7740. + error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, ept_data);
  7741. +
  7742. + /* Get the active URB for this epid. We blatantly assume
  7743. + that only this URB could have caused the epid attention. */
  7744. + urb = activeUrbList[epid];
  7745. + if (urb == NULL) {
  7746. + irq_err("Attention on epid:%d error:%d with no active URB.\n",
  7747. + epid, error_code);
  7748. + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
  7749. + debug_epid(epid);
  7750. + continue;
  7751. + }
  7752. +
  7753. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  7754. + ASSERT(urb_priv);
  7755. +
  7756. + /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
  7757. + if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
  7758. +
  7759. + /* Isoc traffic doesn't have error_count_in/error_count_out. */
  7760. + if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
  7761. + (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, ept_data) == 3 ||
  7762. + IO_EXTRACT(R_USB_EPT_DATA, error_count_out, ept_data) == 3)) {
  7763. + /* Check if URB allready is marked for late-finish, we can get
  7764. + several 3rd error for Intr traffic when a device is unplugged */
  7765. + if(urb_priv->later_data == NULL) {
  7766. + /* 3rd error. */
  7767. + irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid,
  7768. + str_dir(urb->pipe), str_type(urb->pipe),
  7769. + (unsigned int)urb, urb_priv->urb_num);
  7770. +
  7771. + tc_finish_urb_later(hcd, urb, -EPROTO);
  7772. + }
  7773. +
  7774. + } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
  7775. + irq_warn("Perror for epid:%d\n", epid);
  7776. + printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
  7777. + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
  7778. + __dump_urb(urb);
  7779. + debug_epid(epid);
  7780. +
  7781. + if (!(ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
  7782. + /* invalid ep_id */
  7783. + panic("Perror because of invalid epid."
  7784. + " Deconfigured too early?");
  7785. + } else {
  7786. + /* past eof1, near eof, zout transfer, setup transfer */
  7787. + /* Dump the urb and the relevant EP descriptor. */
  7788. + panic("Something wrong with DMA descriptor contents."
  7789. + " Too much traffic inserted?");
  7790. + }
  7791. + } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
  7792. + /* buffer ourun */
  7793. + printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
  7794. + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
  7795. + __dump_urb(urb);
  7796. + debug_epid(epid);
  7797. +
  7798. + panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid);
  7799. + } else {
  7800. + irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid,
  7801. + str_dir(urb->pipe), str_type(urb->pipe));
  7802. + printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
  7803. + __dump_urb(urb);
  7804. + debug_epid(epid);
  7805. + }
  7806. +
  7807. + } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
  7808. + stall)) {
  7809. + /* Not really a protocol error, just says that the endpoint gave
  7810. + a stall response. Note that error_code cannot be stall for isoc. */
  7811. + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  7812. + panic("Isoc traffic cannot stall");
  7813. + }
  7814. +
  7815. + tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid,
  7816. + str_dir(urb->pipe), str_type(urb->pipe), (unsigned int)urb);
  7817. + tc_finish_urb(hcd, urb, -EPIPE);
  7818. +
  7819. + } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
  7820. + bus_error)) {
  7821. + /* Two devices responded to a transaction request. Must be resolved
  7822. + by software. FIXME: Reset ports? */
  7823. + panic("Bus error for epid %d."
  7824. + " Two devices responded to transaction request\n",
  7825. + epid);
  7826. +
  7827. + } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
  7828. + buffer_error)) {
  7829. + /* DMA overrun or underrun. */
  7830. + irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid,
  7831. + str_dir(urb->pipe), str_type(urb->pipe));
  7832. +
  7833. + /* It seems that error_code = buffer_error in
  7834. + R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
  7835. + are the same error. */
  7836. + tc_finish_urb(hcd, urb, -EPROTO);
  7837. + } else {
  7838. + irq_warn("Unknown attention on epid:%d (%s %s)\n", epid,
  7839. + str_dir(urb->pipe), str_type(urb->pipe));
  7840. + dump_ept_data(epid);
  7841. + }
  7842. + }
  7843. + }
  7844. + DBFEXIT;
  7845. +}
  7846. +
  7847. +void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg)
  7848. +{
  7849. + __u16 port_reg[USB_ROOT_HUB_PORTS];
  7850. + DBFENTER;
  7851. + port_reg[0] = reg->r_usb_rh_port_status_1;
  7852. + port_reg[1] = reg->r_usb_rh_port_status_2;
  7853. + rh_port_status_change(port_reg);
  7854. + DBFEXIT;
  7855. +}
  7856. +
  7857. +void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg)
  7858. +{
  7859. + int epid;
  7860. + struct urb *urb;
  7861. + struct crisv10_urb_priv *urb_priv;
  7862. +
  7863. + DBFENTER;
  7864. +
  7865. + for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
  7866. +
  7867. + /* Only check epids that are in use, is valid and has SB list */
  7868. + if (!epid_inuse(epid) || epid == INVALID_EPID ||
  7869. + TxIsocEPList[epid].sub == 0 || epid == DUMMY_EPID) {
  7870. + /* Nothing here to see. */
  7871. + continue;
  7872. + }
  7873. + ASSERT(epid_isoc(epid));
  7874. +
  7875. + /* Get the active URB for this epid (if any). */
  7876. + urb = activeUrbList[epid];
  7877. + if (urb == 0) {
  7878. + isoc_warn("Ignoring NULL urb for epid:%d\n", epid);
  7879. + continue;
  7880. + }
  7881. + if(!epid_out_traffic(epid)) {
  7882. + /* Sanity check. */
  7883. + ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
  7884. +
  7885. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  7886. + ASSERT(urb_priv);
  7887. +
  7888. + if (urb_priv->urb_state == NOT_STARTED) {
  7889. + /* If ASAP is not set and urb->start_frame is the current frame,
  7890. + start the transfer. */
  7891. + if (!(urb->transfer_flags & URB_ISO_ASAP) &&
  7892. + (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
  7893. + /* EP should not be enabled if we're waiting for start_frame */
  7894. + ASSERT((TxIsocEPList[epid].command &
  7895. + IO_STATE(USB_EP_command, enable, yes)) == 0);
  7896. +
  7897. + isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid);
  7898. + TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
  7899. +
  7900. + /* This urb is now active. */
  7901. + urb_priv->urb_state = STARTED;
  7902. + continue;
  7903. + }
  7904. + }
  7905. + }
  7906. + }
  7907. +
  7908. + DBFEXIT;
  7909. +}
  7910. +
  7911. +void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg)
  7912. +{
  7913. + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(reg->hcd);
  7914. +
  7915. + DBFENTER;
  7916. + ASSERT(crisv10_hcd);
  7917. +
  7918. + irq_dbg("ctr_status_irq, controller status: %s\n",
  7919. + hcd_status_to_str(reg->r_usb_status));
  7920. +
  7921. + /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
  7922. + list for the corresponding epid? */
  7923. + if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
  7924. + panic("USB controller got ourun.");
  7925. + }
  7926. + if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
  7927. +
  7928. + /* Before, etrax_usb_do_intr_recover was called on this epid if it was
  7929. + an interrupt pipe. I don't see how re-enabling all EP descriptors
  7930. + will help if there was a programming error. */
  7931. + panic("USB controller got perror.");
  7932. + }
  7933. +
  7934. + /* Keep track of USB Controller, if it's running or not */
  7935. + if(reg->r_usb_status & IO_STATE(R_USB_STATUS, running, yes)) {
  7936. + crisv10_hcd->running = 1;
  7937. + } else {
  7938. + crisv10_hcd->running = 0;
  7939. + }
  7940. +
  7941. + if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
  7942. + /* We should never operate in device mode. */
  7943. + panic("USB controller in device mode.");
  7944. + }
  7945. +
  7946. + /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
  7947. + using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
  7948. + set_bit(HCD_FLAG_SAW_IRQ, &reg->hcd->flags);
  7949. +
  7950. + DBFEXIT;
  7951. +}
  7952. +
  7953. +
  7954. +/******************************************************************/
  7955. +/* Host Controller interface functions */
  7956. +/******************************************************************/
  7957. +
  7958. +static inline void crisv10_ready_wait(void) {
  7959. + volatile int timeout = 10000;
  7960. + /* Check the busy bit of USB controller in Etrax */
  7961. + while((*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy)) &&
  7962. + (timeout-- > 0));
  7963. + if(timeout == 0) {
  7964. + warn("Timeout while waiting for USB controller to be idle\n");
  7965. + }
  7966. +}
  7967. +
  7968. +/* reset host controller */
  7969. +static int crisv10_hcd_reset(struct usb_hcd *hcd)
  7970. +{
  7971. + DBFENTER;
  7972. + hcd_dbg(hcd, "reset\n");
  7973. +
  7974. +
  7975. + /* Reset the USB interface. */
  7976. + /*
  7977. + *R_USB_COMMAND =
  7978. + IO_STATE(R_USB_COMMAND, port_sel, nop) |
  7979. + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
  7980. + IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
  7981. + nop();
  7982. + */
  7983. + DBFEXIT;
  7984. + return 0;
  7985. +}
  7986. +
  7987. +/* start host controller */
  7988. +static int crisv10_hcd_start(struct usb_hcd *hcd)
  7989. +{
  7990. + DBFENTER;
  7991. + hcd_dbg(hcd, "start\n");
  7992. +
  7993. + crisv10_ready_wait();
  7994. +
  7995. + /* Start processing of USB traffic. */
  7996. + *R_USB_COMMAND =
  7997. + IO_STATE(R_USB_COMMAND, port_sel, nop) |
  7998. + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
  7999. + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
  8000. +
  8001. + nop();
  8002. +
  8003. + hcd->state = HC_STATE_RUNNING;
  8004. +
  8005. + DBFEXIT;
  8006. + return 0;
  8007. +}
  8008. +
  8009. +/* stop host controller */
  8010. +static void crisv10_hcd_stop(struct usb_hcd *hcd)
  8011. +{
  8012. + DBFENTER;
  8013. + hcd_dbg(hcd, "stop\n");
  8014. + crisv10_hcd_reset(hcd);
  8015. + DBFEXIT;
  8016. +}
  8017. +
  8018. +/* return the current frame number */
  8019. +static int crisv10_hcd_get_frame(struct usb_hcd *hcd)
  8020. +{
  8021. + DBFENTER;
  8022. + DBFEXIT;
  8023. + return (*R_USB_FM_NUMBER & 0x7ff);
  8024. +}
  8025. +
  8026. +#ifdef CONFIG_USB_OTG
  8027. +
  8028. +static int crisv10_hcd_start_port_reset(struct usb_hcd *hcd, unsigned port)
  8029. +{
  8030. + return 0; /* no-op for now */
  8031. +}
  8032. +
  8033. +#endif /* CONFIG_USB_OTG */
  8034. +
  8035. +
  8036. +/******************************************************************/
  8037. +/* Root Hub functions */
  8038. +/******************************************************************/
  8039. +
  8040. +/* root hub status */
  8041. +static const struct usb_hub_status rh_hub_status =
  8042. + {
  8043. + .wHubStatus = 0,
  8044. + .wHubChange = 0,
  8045. + };
  8046. +
  8047. +/* root hub descriptor */
  8048. +static const u8 rh_hub_descr[] =
  8049. + {
  8050. + 0x09, /* bDescLength */
  8051. + 0x29, /* bDescriptorType */
  8052. + USB_ROOT_HUB_PORTS, /* bNbrPorts */
  8053. + 0x00, /* wHubCharacteristics */
  8054. + 0x00,
  8055. + 0x01, /* bPwrOn2pwrGood */
  8056. + 0x00, /* bHubContrCurrent */
  8057. + 0x00, /* DeviceRemovable */
  8058. + 0xff /* PortPwrCtrlMask */
  8059. + };
  8060. +
  8061. +/* Actual holder of root hub status*/
  8062. +struct crisv10_rh rh;
  8063. +
  8064. +/* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
  8065. +int rh_init(void) {
  8066. + int i;
  8067. + /* Reset port status flags */
  8068. + for (i = 0; i < USB_ROOT_HUB_PORTS; i++) {
  8069. + rh.wPortChange[i] = 0;
  8070. + rh.wPortStatusPrev[i] = 0;
  8071. + }
  8072. + return 0;
  8073. +}
  8074. +
  8075. +#define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
  8076. + (1<<USB_PORT_FEAT_ENABLE)|\
  8077. + (1<<USB_PORT_FEAT_SUSPEND)|\
  8078. + (1<<USB_PORT_FEAT_RESET))
  8079. +
  8080. +/* Handle port status change interrupt (called from bottom part interrupt) */
  8081. +void rh_port_status_change(__u16 port_reg[]) {
  8082. + int i;
  8083. + __u16 wChange;
  8084. +
  8085. + for(i = 0; i < USB_ROOT_HUB_PORTS; i++) {
  8086. + /* Xor out changes since last read, masked for important flags */
  8087. + wChange = (port_reg[i] & RH_FEAT_MASK) ^ rh.wPortStatusPrev[i];
  8088. + /* Or changes together with (if any) saved changes */
  8089. + rh.wPortChange[i] |= wChange;
  8090. + /* Save new status */
  8091. + rh.wPortStatusPrev[i] = port_reg[i];
  8092. +
  8093. + if(wChange) {
  8094. + rh_dbg("Interrupt port_status change port%d: %s Current-status:%s\n", i+1,
  8095. + port_status_to_str(wChange),
  8096. + port_status_to_str(port_reg[i]));
  8097. + }
  8098. + }
  8099. +}
  8100. +
  8101. +/* Construct port status change bitmap for the root hub */
  8102. +static int rh_status_data_request(struct usb_hcd *hcd, char *buf)
  8103. +{
  8104. + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
  8105. + unsigned int i;
  8106. +
  8107. +// DBFENTER;
  8108. +
  8109. + /*
  8110. + * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
  8111. + * return bitmap indicating ports with status change
  8112. + */
  8113. + *buf = 0;
  8114. + spin_lock(&crisv10_hcd->lock);
  8115. + for (i = 1; i <= crisv10_hcd->num_ports; i++) {
  8116. + if (rh.wPortChange[map_port(i)]) {
  8117. + *buf |= (1 << i);
  8118. + rh_dbg("rh_status_data_request, change on port %d: %s Current Status: %s\n", i,
  8119. + port_status_to_str(rh.wPortChange[map_port(i)]),
  8120. + port_status_to_str(rh.wPortStatusPrev[map_port(i)]));
  8121. + }
  8122. + }
  8123. + spin_unlock(&crisv10_hcd->lock);
  8124. +
  8125. +// DBFEXIT;
  8126. +
  8127. + return *buf == 0 ? 0 : 1;
  8128. +}
  8129. +
  8130. +/* Handle a control request for the root hub (called from hcd_driver) */
  8131. +static int rh_control_request(struct usb_hcd *hcd,
  8132. + u16 typeReq,
  8133. + u16 wValue,
  8134. + u16 wIndex,
  8135. + char *buf,
  8136. + u16 wLength) {
  8137. +
  8138. + struct crisv10_hcd *crisv10_hcd = hcd_to_crisv10_hcd(hcd);
  8139. + int retval = 0;
  8140. + int len;
  8141. + DBFENTER;
  8142. +
  8143. + switch (typeReq) {
  8144. + case GetHubDescriptor:
  8145. + rh_dbg("GetHubDescriptor\n");
  8146. + len = min_t(unsigned int, sizeof rh_hub_descr, wLength);
  8147. + memcpy(buf, rh_hub_descr, len);
  8148. + buf[2] = crisv10_hcd->num_ports;
  8149. + break;
  8150. + case GetHubStatus:
  8151. + rh_dbg("GetHubStatus\n");
  8152. + len = min_t(unsigned int, sizeof rh_hub_status, wLength);
  8153. + memcpy(buf, &rh_hub_status, len);
  8154. + break;
  8155. + case GetPortStatus:
  8156. + if (!wIndex || wIndex > crisv10_hcd->num_ports)
  8157. + goto error;
  8158. + rh_dbg("GetportStatus, port:%d change:%s status:%s\n", wIndex,
  8159. + port_status_to_str(rh.wPortChange[map_port(wIndex)]),
  8160. + port_status_to_str(rh.wPortStatusPrev[map_port(wIndex)]));
  8161. + *(u16 *) buf = cpu_to_le16(rh.wPortStatusPrev[map_port(wIndex)]);
  8162. + *(u16 *) (buf + 2) = cpu_to_le16(rh.wPortChange[map_port(wIndex)]);
  8163. + break;
  8164. + case SetHubFeature:
  8165. + rh_dbg("SetHubFeature\n");
  8166. + case ClearHubFeature:
  8167. + rh_dbg("ClearHubFeature\n");
  8168. + switch (wValue) {
  8169. + case C_HUB_OVER_CURRENT:
  8170. + case C_HUB_LOCAL_POWER:
  8171. + rh_warn("Not implemented hub request:%d \n", typeReq);
  8172. + /* not implemented */
  8173. + break;
  8174. + default:
  8175. + goto error;
  8176. + }
  8177. + break;
  8178. + case SetPortFeature:
  8179. + if (!wIndex || wIndex > crisv10_hcd->num_ports)
  8180. + goto error;
  8181. + if(rh_set_port_feature(map_port(wIndex), wValue))
  8182. + goto error;
  8183. + break;
  8184. + case ClearPortFeature:
  8185. + if (!wIndex || wIndex > crisv10_hcd->num_ports)
  8186. + goto error;
  8187. + if(rh_clear_port_feature(map_port(wIndex), wValue))
  8188. + goto error;
  8189. + break;
  8190. + default:
  8191. + rh_warn("Unknown hub request: %d\n", typeReq);
  8192. + error:
  8193. + retval = -EPIPE;
  8194. + }
  8195. + DBFEXIT;
  8196. + return retval;
  8197. +}
  8198. +
  8199. +int rh_set_port_feature(__u8 bPort, __u16 wFeature) {
  8200. + __u8 bUsbCommand = 0;
  8201. + switch(wFeature) {
  8202. + case USB_PORT_FEAT_RESET:
  8203. + rh_dbg("SetPortFeature: reset\n");
  8204. + bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, reset);
  8205. + goto set;
  8206. + break;
  8207. + case USB_PORT_FEAT_SUSPEND:
  8208. + rh_dbg("SetPortFeature: suspend\n");
  8209. + bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, suspend);
  8210. + goto set;
  8211. + break;
  8212. + case USB_PORT_FEAT_POWER:
  8213. + rh_dbg("SetPortFeature: power\n");
  8214. + break;
  8215. + case USB_PORT_FEAT_C_CONNECTION:
  8216. + rh_dbg("SetPortFeature: c_connection\n");
  8217. + break;
  8218. + case USB_PORT_FEAT_C_RESET:
  8219. + rh_dbg("SetPortFeature: c_reset\n");
  8220. + break;
  8221. + case USB_PORT_FEAT_C_OVER_CURRENT:
  8222. + rh_dbg("SetPortFeature: c_over_current\n");
  8223. + break;
  8224. +
  8225. + set:
  8226. + /* Select which port via the port_sel field */
  8227. + bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
  8228. +
  8229. + /* Make sure the controller isn't busy. */
  8230. + crisv10_ready_wait();
  8231. + /* Send out the actual command to the USB controller */
  8232. + *R_USB_COMMAND = bUsbCommand;
  8233. +
  8234. + /* If port reset then also bring USB controller into running state */
  8235. + if(wFeature == USB_PORT_FEAT_RESET) {
  8236. + /* Wait a while for controller to first become started after port reset */
  8237. + udelay(12000); /* 12ms blocking wait */
  8238. +
  8239. + /* Make sure the controller isn't busy. */
  8240. + crisv10_ready_wait();
  8241. +
  8242. + /* If all enabled ports were disabled the host controller goes down into
  8243. + started mode, so we need to bring it back into the running state.
  8244. + (This is safe even if it's already in the running state.) */
  8245. + *R_USB_COMMAND =
  8246. + IO_STATE(R_USB_COMMAND, port_sel, nop) |
  8247. + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
  8248. + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
  8249. + }
  8250. +
  8251. + break;
  8252. + default:
  8253. + rh_dbg("SetPortFeature: unknown feature\n");
  8254. + return -1;
  8255. + }
  8256. + return 0;
  8257. +}
  8258. +
  8259. +int rh_clear_port_feature(__u8 bPort, __u16 wFeature) {
  8260. + switch(wFeature) {
  8261. + case USB_PORT_FEAT_ENABLE:
  8262. + rh_dbg("ClearPortFeature: enable\n");
  8263. + rh_disable_port(bPort);
  8264. + break;
  8265. + case USB_PORT_FEAT_SUSPEND:
  8266. + rh_dbg("ClearPortFeature: suspend\n");
  8267. + break;
  8268. + case USB_PORT_FEAT_POWER:
  8269. + rh_dbg("ClearPortFeature: power\n");
  8270. + break;
  8271. +
  8272. + case USB_PORT_FEAT_C_ENABLE:
  8273. + rh_dbg("ClearPortFeature: c_enable\n");
  8274. + goto clear;
  8275. + case USB_PORT_FEAT_C_SUSPEND:
  8276. + rh_dbg("ClearPortFeature: c_suspend\n");
  8277. + goto clear;
  8278. + case USB_PORT_FEAT_C_CONNECTION:
  8279. + rh_dbg("ClearPortFeature: c_connection\n");
  8280. + goto clear;
  8281. + case USB_PORT_FEAT_C_OVER_CURRENT:
  8282. + rh_dbg("ClearPortFeature: c_over_current\n");
  8283. + goto clear;
  8284. + case USB_PORT_FEAT_C_RESET:
  8285. + rh_dbg("ClearPortFeature: c_reset\n");
  8286. + goto clear;
  8287. + clear:
  8288. + rh.wPortChange[bPort] &= ~(1 << (wFeature - 16));
  8289. + break;
  8290. + default:
  8291. + rh_dbg("ClearPortFeature: unknown feature\n");
  8292. + return -1;
  8293. + }
  8294. + return 0;
  8295. +}
  8296. +
  8297. +
  8298. +#ifdef CONFIG_PM
  8299. +/* Handle a suspend request for the root hub (called from hcd_driver) */
  8300. +static int rh_suspend_request(struct usb_hcd *hcd)
  8301. +{
  8302. + return 0; /* no-op for now */
  8303. +}
  8304. +
  8305. +/* Handle a resume request for the root hub (called from hcd_driver) */
  8306. +static int rh_resume_request(struct usb_hcd *hcd)
  8307. +{
  8308. + return 0; /* no-op for now */
  8309. +}
  8310. +#endif /* CONFIG_PM */
  8311. +
  8312. +
  8313. +
  8314. +/* Wrapper function for workaround port disable registers in USB controller */
  8315. +static void rh_disable_port(unsigned int port) {
  8316. + volatile int timeout = 10000;
  8317. + volatile char* usb_portx_disable;
  8318. + switch(port) {
  8319. + case 0:
  8320. + usb_portx_disable = R_USB_PORT1_DISABLE;
  8321. + break;
  8322. + case 1:
  8323. + usb_portx_disable = R_USB_PORT2_DISABLE;
  8324. + break;
  8325. + default:
  8326. + /* Invalid port index */
  8327. + return;
  8328. + }
  8329. + /* Set disable flag in special register */
  8330. + *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
  8331. + /* Wait until not enabled anymore */
  8332. + while((rh.wPortStatusPrev[port] &
  8333. + IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
  8334. + (timeout-- > 0));
  8335. + if(timeout == 0) {
  8336. + warn("Timeout while waiting for port %d to become disabled\n", port);
  8337. + }
  8338. + /* clear disable flag in special register */
  8339. + *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
  8340. + rh_info("Physical port %d disabled\n", port+1);
  8341. +}
  8342. +
  8343. +
  8344. +/******************************************************************/
  8345. +/* Transfer Controller (TC) functions */
  8346. +/******************************************************************/
  8347. +
  8348. +/* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
  8349. + dynamically?
  8350. + To adjust it dynamically we would have to get an interrupt when we reach
  8351. + the end of the rx descriptor list, or when we get close to the end, and
  8352. + then allocate more descriptors. */
  8353. +#define NBR_OF_RX_DESC 512
  8354. +#define RX_DESC_BUF_SIZE 1024
  8355. +#define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
  8356. +
  8357. +
  8358. +/* Local variables for Transfer Controller */
  8359. +/* --------------------------------------- */
  8360. +
  8361. +/* This is a circular (double-linked) list of the active urbs for each epid.
  8362. + The head is never removed, and new urbs are linked onto the list as
  8363. + urb_entry_t elements. Don't reference urb_list directly; use the wrapper
  8364. + functions instead (which includes spin_locks) */
  8365. +static struct list_head urb_list[NBR_OF_EPIDS];
  8366. +
  8367. +/* Read about the need and usage of this lock in submit_ctrl_urb. */
  8368. +/* Lock for URB lists for each EPID */
  8369. +static spinlock_t urb_list_lock;
  8370. +
  8371. +/* Lock for EPID array register (R_USB_EPT_x) in Etrax */
  8372. +static spinlock_t etrax_epid_lock;
  8373. +
  8374. +/* Lock for dma8 sub0 handling */
  8375. +static spinlock_t etrax_dma8_sub0_lock;
  8376. +
  8377. +/* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
  8378. + Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
  8379. + cache aligned. */
  8380. +static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
  8381. +static volatile struct USB_IN_Desc RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
  8382. +
  8383. +/* Pointers into RxDescList. */
  8384. +static volatile struct USB_IN_Desc *myNextRxDesc;
  8385. +static volatile struct USB_IN_Desc *myLastRxDesc;
  8386. +
  8387. +/* A zout transfer makes a memory access at the address of its buf pointer,
  8388. + which means that setting this buf pointer to 0 will cause an access to the
  8389. + flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
  8390. + (depending on DMA burst size) transfer.
  8391. + Instead, we set it to 1, and point it to this buffer. */
  8392. +static int zout_buffer[4] __attribute__ ((aligned (4)));
  8393. +
  8394. +/* Cache for allocating new EP and SB descriptors. */
  8395. +//static kmem_cache_t *usb_desc_cache;
  8396. +static struct kmem_cache *usb_desc_cache;
  8397. +
  8398. +/* Cache for the data allocated in the isoc descr top half. */
  8399. +//static kmem_cache_t *isoc_compl_cache;
  8400. +static struct kmem_cache *isoc_compl_cache;
  8401. +
  8402. +/* Cache for the data allocated when delayed finishing of URBs */
  8403. +//static kmem_cache_t *later_data_cache;
  8404. +static struct kmem_cache *later_data_cache;
  8405. +
  8406. +/* Counter to keep track of how many Isoc EP we have sat up. Used to enable
  8407. + and disable iso_eof interrupt. We only need these interrupts when we have
  8408. + Isoc data endpoints (consumes CPU cycles).
  8409. + FIXME: This could be more fine granular, so this interrupt is only enabled
  8410. + when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
  8411. +static int isoc_epid_counter;
  8412. +
  8413. +/* Protecting wrapper functions for R_USB_EPT_x */
  8414. +/* -------------------------------------------- */
  8415. +static inline void etrax_epid_set(__u8 index, __u32 data) {
  8416. + unsigned long flags;
  8417. + spin_lock_irqsave(&etrax_epid_lock, flags);
  8418. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
  8419. + nop();
  8420. + *R_USB_EPT_DATA = data;
  8421. + spin_unlock_irqrestore(&etrax_epid_lock, flags);
  8422. +}
  8423. +
  8424. +static inline void etrax_epid_clear_error(__u8 index) {
  8425. + unsigned long flags;
  8426. + spin_lock_irqsave(&etrax_epid_lock, flags);
  8427. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
  8428. + nop();
  8429. + *R_USB_EPT_DATA &=
  8430. + ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
  8431. + IO_MASK(R_USB_EPT_DATA, error_count_out) |
  8432. + IO_MASK(R_USB_EPT_DATA, error_code));
  8433. + spin_unlock_irqrestore(&etrax_epid_lock, flags);
  8434. +}
  8435. +
  8436. +static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
  8437. + __u8 toggle) {
  8438. + unsigned long flags;
  8439. + spin_lock_irqsave(&etrax_epid_lock, flags);
  8440. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
  8441. + nop();
  8442. + if(dirout) {
  8443. + *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
  8444. + *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
  8445. + } else {
  8446. + *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
  8447. + *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
  8448. + }
  8449. + spin_unlock_irqrestore(&etrax_epid_lock, flags);
  8450. +}
  8451. +
  8452. +static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout) {
  8453. + unsigned long flags;
  8454. + __u8 toggle;
  8455. + spin_lock_irqsave(&etrax_epid_lock, flags);
  8456. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
  8457. + nop();
  8458. + if (dirout) {
  8459. + toggle = IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
  8460. + } else {
  8461. + toggle = IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
  8462. + }
  8463. + spin_unlock_irqrestore(&etrax_epid_lock, flags);
  8464. + return toggle;
  8465. +}
  8466. +
  8467. +
  8468. +static inline __u32 etrax_epid_get(__u8 index) {
  8469. + unsigned long flags;
  8470. + __u32 data;
  8471. + spin_lock_irqsave(&etrax_epid_lock, flags);
  8472. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
  8473. + nop();
  8474. + data = *R_USB_EPT_DATA;
  8475. + spin_unlock_irqrestore(&etrax_epid_lock, flags);
  8476. + return data;
  8477. +}
  8478. +
  8479. +
  8480. +
  8481. +
  8482. +/* Main functions for Transfer Controller */
  8483. +/* -------------------------------------- */
  8484. +
  8485. +/* Init structs, memories and lists used by Transfer Controller */
  8486. +int tc_init(struct usb_hcd *hcd) {
  8487. + int i;
  8488. + /* Clear software state info for all epids */
  8489. + memset(epid_state, 0, sizeof(struct etrax_epid) * NBR_OF_EPIDS);
  8490. +
  8491. + /* Set Invalid and Dummy as being in use and disabled */
  8492. + epid_state[INVALID_EPID].inuse = 1;
  8493. + epid_state[DUMMY_EPID].inuse = 1;
  8494. + epid_state[INVALID_EPID].disabled = 1;
  8495. + epid_state[DUMMY_EPID].disabled = 1;
  8496. +
  8497. + /* Clear counter for how many Isoc epids we have sat up */
  8498. + isoc_epid_counter = 0;
  8499. +
  8500. + /* Initialize the urb list by initiating a head for each list.
  8501. + Also reset list hodling active URB for each epid */
  8502. + for (i = 0; i < NBR_OF_EPIDS; i++) {
  8503. + INIT_LIST_HEAD(&urb_list[i]);
  8504. + activeUrbList[i] = NULL;
  8505. + }
  8506. +
  8507. + /* Init lock for URB lists */
  8508. + spin_lock_init(&urb_list_lock);
  8509. + /* Init lock for Etrax R_USB_EPT register */
  8510. + spin_lock_init(&etrax_epid_lock);
  8511. + /* Init lock for Etrax dma8 sub0 handling */
  8512. + spin_lock_init(&etrax_dma8_sub0_lock);
  8513. +
  8514. + /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
  8515. +
  8516. + /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
  8517. + allocate SB descriptors from this cache. This is ok since
  8518. + sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
  8519. +// usb_desc_cache = kmem_cache_create("usb_desc_cache",
  8520. +// sizeof(struct USB_EP_Desc), 0,
  8521. +// SLAB_HWCACHE_ALIGN, 0, 0);
  8522. + usb_desc_cache = kmem_cache_create(
  8523. + "usb_desc_cache",
  8524. + sizeof(struct USB_EP_Desc),
  8525. + 0,
  8526. + SLAB_HWCACHE_ALIGN,
  8527. + NULL);
  8528. + if(usb_desc_cache == NULL) {
  8529. + return -ENOMEM;
  8530. + }
  8531. +
  8532. + /* Create slab cache for speedy allocation of memory for isoc bottom-half
  8533. + interrupt handling */
  8534. +// isoc_compl_cache =
  8535. +// kmem_cache_create("isoc_compl_cache",
  8536. +// sizeof(struct crisv10_isoc_complete_data),
  8537. +// 0, SLAB_HWCACHE_ALIGN, 0, 0);
  8538. + isoc_compl_cache = kmem_cache_create(
  8539. + "isoc_compl_cache",
  8540. + sizeof(struct crisv10_isoc_complete_data),
  8541. + 0,
  8542. + SLAB_HWCACHE_ALIGN,
  8543. + NULL
  8544. + );
  8545. +
  8546. + if(isoc_compl_cache == NULL) {
  8547. + return -ENOMEM;
  8548. + }
  8549. +
  8550. + /* Create slab cache for speedy allocation of memory for later URB finish
  8551. + struct */
  8552. +// later_data_cache =
  8553. +// kmem_cache_create("later_data_cache",
  8554. +// sizeof(struct urb_later_data),
  8555. +// 0, SLAB_HWCACHE_ALIGN, 0, 0);
  8556. +
  8557. + later_data_cache = kmem_cache_create(
  8558. + "later_data_cache",
  8559. + sizeof(struct urb_later_data),
  8560. + 0,
  8561. + SLAB_HWCACHE_ALIGN,
  8562. + NULL
  8563. + );
  8564. +
  8565. + if(later_data_cache == NULL) {
  8566. + return -ENOMEM;
  8567. + }
  8568. +
  8569. +
  8570. + /* Initiate the bulk start timer. */
  8571. + init_timer(&bulk_start_timer);
  8572. + bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
  8573. + bulk_start_timer.function = tc_bulk_start_timer_func;
  8574. + add_timer(&bulk_start_timer);
  8575. +
  8576. +
  8577. + /* Initiate the bulk eot timer. */
  8578. + init_timer(&bulk_eot_timer);
  8579. + bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
  8580. + bulk_eot_timer.function = tc_bulk_eot_timer_func;
  8581. + bulk_eot_timer.data = (unsigned long)hcd;
  8582. + add_timer(&bulk_eot_timer);
  8583. +
  8584. + return 0;
  8585. +}
  8586. +
  8587. +/* Uninitialize all resources used by Transfer Controller */
  8588. +void tc_destroy(void) {
  8589. +
  8590. + /* Destroy all slab cache */
  8591. + kmem_cache_destroy(usb_desc_cache);
  8592. + kmem_cache_destroy(isoc_compl_cache);
  8593. + kmem_cache_destroy(later_data_cache);
  8594. +
  8595. + /* Remove timers */
  8596. + del_timer(&bulk_start_timer);
  8597. + del_timer(&bulk_eot_timer);
  8598. +}
  8599. +
  8600. +static void restart_dma8_sub0(void) {
  8601. + unsigned long flags;
  8602. + spin_lock_irqsave(&etrax_dma8_sub0_lock, flags);
  8603. + /* Verify that the dma is not running */
  8604. + if ((*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd)) == 0) {
  8605. + struct USB_EP_Desc *ep = (struct USB_EP_Desc *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
  8606. + while (DUMMY_EPID == IO_EXTRACT(USB_EP_command, epid, ep->command)) {
  8607. + ep = (struct USB_EP_Desc *)phys_to_virt(ep->next);
  8608. + }
  8609. + /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID.
  8610. + * ep->next is already a physical address. virt_to_phys is needed, see
  8611. + * http://mhonarc.axis.se/dev-etrax/msg08630.html
  8612. + */
  8613. + //*R_DMA_CH8_SUB0_EP = ep->next;
  8614. + *R_DMA_CH8_SUB0_EP = virt_to_phys(ep);
  8615. + /* Restart the DMA */
  8616. + *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
  8617. + }
  8618. + spin_unlock_irqrestore(&etrax_dma8_sub0_lock, flags);
  8619. +}
  8620. +
  8621. +/* queue an URB with the transfer controller (called from hcd_driver) */
  8622. +//static int tc_urb_enqueue(struct usb_hcd *hcd,
  8623. +// struct usb_host_endpoint *ep,
  8624. +// struct urb *urb,
  8625. +// gfp_t mem_flags) {
  8626. +static int tc_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
  8627. +{
  8628. + int epid;
  8629. + int retval;
  8630. +// int bustime = 0;
  8631. + int maxpacket;
  8632. + unsigned long flags;
  8633. + struct crisv10_urb_priv *urb_priv;
  8634. + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
  8635. + DBFENTER;
  8636. +
  8637. + if(!(crisv10_hcd->running)) {
  8638. + /* The USB Controller is not running, probably because no device is
  8639. + attached. No idea to enqueue URBs then */
  8640. + tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
  8641. + (unsigned int)urb);
  8642. + return -ENOENT;
  8643. + }
  8644. +
  8645. + maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
  8646. +
  8647. + /* hinko ignore usb_pipeisoc */
  8648. +#if 0
  8649. + /* Special case check for In Isoc transfers. Specification states that each
  8650. + In Isoc transfer consists of one packet and therefore it should fit into
  8651. + the transfer-buffer of an URB.
  8652. + We do the check here to be sure (an invalid scenario can be produced with
  8653. + parameters to the usbtest suite) */
  8654. + if(usb_pipeisoc(urb->pipe) && usb_pipein(urb->pipe) &&
  8655. + (urb->transfer_buffer_length < maxpacket)) {
  8656. + tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb->transfer_buffer_length, maxpacket);
  8657. + return -EMSGSIZE;
  8658. + }
  8659. +
  8660. + /* Check if there is enough bandwidth for periodic transfer */
  8661. + if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe)) {
  8662. + /* only check (and later claim) if not already claimed */
  8663. + if (urb->bandwidth == 0) {
  8664. + bustime = usb_check_bandwidth(urb->dev, urb);
  8665. + if (bustime < 0) {
  8666. + tc_err("Not enough periodic bandwidth\n");
  8667. + return -ENOSPC;
  8668. + }
  8669. + }
  8670. + }
  8671. +#endif
  8672. +
  8673. + /* Check if there is a epid for URBs destination, if not this function
  8674. + set up one. */
  8675. + //epid = tc_setup_epid(ep, urb, mem_flags);
  8676. + epid = tc_setup_epid(urb, mem_flags);
  8677. + if (epid < 0) {
  8678. + tc_err("Failed setup epid:%d for URB:0x%x\n", epid, (unsigned int)urb);
  8679. + DBFEXIT;
  8680. + return -ENOMEM;
  8681. + }
  8682. +
  8683. + if(urb == activeUrbList[epid]) {
  8684. + tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb);
  8685. + return -ENXIO;
  8686. + }
  8687. +
  8688. + if(urb_list_entry(urb, epid)) {
  8689. + tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb);
  8690. + return -ENXIO;
  8691. + }
  8692. +
  8693. + /* If we actively have flaged endpoint as disabled then refuse submition */
  8694. + if(epid_state[epid].disabled) {
  8695. + return -ENOENT;
  8696. + }
  8697. +
  8698. + /* Allocate and init HC-private data for URB */
  8699. + if(urb_priv_create(hcd, urb, epid, mem_flags) != 0) {
  8700. + DBFEXIT;
  8701. + return -ENOMEM;
  8702. + }
  8703. + urb_priv = urb->hcpriv;
  8704. +
  8705. + tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
  8706. + (unsigned int)urb, urb_priv->urb_num, epid,
  8707. + pipe_to_str(urb->pipe), urb->transfer_buffer_length);
  8708. +
  8709. + /* Create and link SBs required for this URB */
  8710. + retval = create_sb_for_urb(urb, mem_flags);
  8711. + if(retval != 0) {
  8712. + tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb,
  8713. + urb_priv->urb_num);
  8714. + urb_priv_free(hcd, urb);
  8715. + DBFEXIT;
  8716. + return retval;
  8717. + }
  8718. +
  8719. + /* Init intr EP pool if this URB is a INTR transfer. This pool is later
  8720. + used when inserting EPs in the TxIntrEPList. We do the alloc here
  8721. + so we can't run out of memory later */
  8722. + if(usb_pipeint(urb->pipe)) {
  8723. + retval = init_intr_urb(urb, mem_flags);
  8724. + if(retval != 0) {
  8725. + tc_warn("Failed to init Intr URB\n");
  8726. + urb_priv_free(hcd, urb);
  8727. + DBFEXIT;
  8728. + return retval;
  8729. + }
  8730. + }
  8731. +
  8732. + /* Disable other access when inserting USB */
  8733. +
  8734. + /* BUG on sleeping inside int disabled if using local_irq_save/local_irq_restore
  8735. + * her - because urb_list_add() and tc_dma_process_queue() save irqs again !??!
  8736. + */
  8737. +// local_irq_save(flags);
  8738. +
  8739. + /* hinko ignore usb_pipeisoc */
  8740. +#if 0
  8741. + /* Claim bandwidth, if needed */
  8742. + if(bustime) {
  8743. + usb_claim_bandwidth(urb->dev, urb, bustime, 0);
  8744. + }
  8745. +
  8746. + /* Add URB to EP queue */
  8747. + urb_list_add(urb, epid, mem_flags);
  8748. +
  8749. + if(usb_pipeisoc(urb->pipe)) {
  8750. + /* Special processing of Isoc URBs. */
  8751. + tc_dma_process_isoc_urb(urb);
  8752. + } else {
  8753. + /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
  8754. + tc_dma_process_queue(epid);
  8755. + }
  8756. +#endif
  8757. + /* Add URB to EP queue */
  8758. + urb_list_add(urb, epid, mem_flags);
  8759. +
  8760. + /*hinko link/unlink urb -> ep */
  8761. + spin_lock_irqsave(&crisv10_hcd->lock, flags);
  8762. + //spin_lock(&crisv10_hcd->lock);
  8763. + retval = usb_hcd_link_urb_to_ep(hcd, urb);
  8764. + if (retval) {
  8765. + spin_unlock_irqrestore(&crisv10_hcd->lock, flags);
  8766. + tc_warn("Failed to link urb to ep\n");
  8767. + urb_priv_free(hcd, urb);
  8768. + DBFEXIT;
  8769. + return retval;
  8770. + }
  8771. + spin_unlock_irqrestore(&crisv10_hcd->lock, flags);
  8772. + //spin_unlock(&crisv10_hcd->lock);
  8773. +
  8774. + /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
  8775. + tc_dma_process_queue(epid);
  8776. +
  8777. +// local_irq_restore(flags);
  8778. +
  8779. + DBFEXIT;
  8780. + return 0;
  8781. +}
  8782. +
  8783. +/* remove an URB from the transfer controller queues (called from hcd_driver)*/
  8784. +//static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
  8785. +static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  8786. +{
  8787. + struct crisv10_urb_priv *urb_priv;
  8788. + unsigned long flags;
  8789. + int epid;
  8790. +
  8791. + DBFENTER;
  8792. + /* Disable interrupts here since a descriptor interrupt for the isoc epid
  8793. + will modify the sb list. This could possibly be done more granular, but
  8794. + urb_dequeue should not be used frequently anyway.
  8795. + */
  8796. + local_irq_save(flags);
  8797. +
  8798. + urb_priv = urb->hcpriv;
  8799. +
  8800. + if (!urb_priv) {
  8801. + /* This happens if a device driver calls unlink on an urb that
  8802. + was never submitted (lazy driver) or if the urb was completed
  8803. + while dequeue was being called. */
  8804. + tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb);
  8805. + local_irq_restore(flags);
  8806. + return 0;
  8807. + }
  8808. + epid = urb_priv->epid;
  8809. +
  8810. + tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
  8811. + (urb == activeUrbList[epid]) ? "active" : "queued",
  8812. + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
  8813. + str_type(urb->pipe), epid, urb->status,
  8814. + (urb_priv->later_data) ? "later-sched" : "");
  8815. +
  8816. + /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
  8817. + that isn't active can be dequeued by just removing it from the queue */
  8818. + if(usb_pipebulk(urb->pipe) || usb_pipecontrol(urb->pipe) ||
  8819. + usb_pipeint(urb->pipe)) {
  8820. +
  8821. + /* Check if URB haven't gone further than the queue */
  8822. + if(urb != activeUrbList[epid]) {
  8823. + ASSERT(urb_priv->later_data == NULL);
  8824. + tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
  8825. + " (not active)\n", (unsigned int)urb, urb_priv->urb_num,
  8826. + str_dir(urb->pipe), str_type(urb->pipe), epid);
  8827. +
  8828. + /* Finish the URB with error status from USB core */
  8829. + tc_finish_urb(hcd, urb, urb->status);
  8830. + local_irq_restore(flags);
  8831. + return 0;
  8832. + }
  8833. + }
  8834. +
  8835. + /* Set URB status to Unlink for handling when interrupt comes. */
  8836. + urb_priv->urb_state = UNLINK;
  8837. +
  8838. + /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
  8839. + switch(usb_pipetype(urb->pipe)) {
  8840. + case PIPE_BULK:
  8841. + /* Check if EP still is enabled */
  8842. + if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  8843. + /* The EP was enabled, disable it. */
  8844. + TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  8845. + }
  8846. + /* Kicking dummy list out of the party. */
  8847. + TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
  8848. + break;
  8849. + case PIPE_CONTROL:
  8850. + /* Check if EP still is enabled */
  8851. + if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  8852. + /* The EP was enabled, disable it. */
  8853. + TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  8854. + }
  8855. + break;
  8856. + case PIPE_ISOCHRONOUS:
  8857. + /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
  8858. + finish_isoc_urb(). Because there might the case when URB is dequeued
  8859. + but there are other valid URBs waiting */
  8860. +
  8861. + /* Check if In Isoc EP still is enabled */
  8862. + if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  8863. + /* The EP was enabled, disable it. */
  8864. + TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  8865. + }
  8866. + break;
  8867. + case PIPE_INTERRUPT:
  8868. + /* Special care is taken for interrupt URBs. EPs are unlinked in
  8869. + tc_finish_urb */
  8870. + break;
  8871. + default:
  8872. + break;
  8873. + }
  8874. +
  8875. + /* Asynchronous unlink, finish the URB later from scheduled or other
  8876. + event (data finished, error) */
  8877. + tc_finish_urb_later(hcd, urb, urb->status);
  8878. +
  8879. + local_irq_restore(flags);
  8880. + DBFEXIT;
  8881. + return 0;
  8882. +}
  8883. +
  8884. +
  8885. +static void tc_sync_finish_epid(struct usb_hcd *hcd, int epid) {
  8886. + volatile int timeout = 10000;
  8887. + struct urb* urb;
  8888. + struct crisv10_urb_priv* urb_priv;
  8889. + unsigned long flags;
  8890. +
  8891. + volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
  8892. + volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
  8893. + volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
  8894. +
  8895. + int type = epid_state[epid].type;
  8896. +
  8897. + /* Setting this flag will cause enqueue() to return -ENOENT for new
  8898. + submitions on this endpoint and finish_urb() wont process queue further */
  8899. + epid_state[epid].disabled = 1;
  8900. +
  8901. + switch(type) {
  8902. + case PIPE_BULK:
  8903. + /* Check if EP still is enabled */
  8904. + if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  8905. + /* The EP was enabled, disable it. */
  8906. + TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  8907. + tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
  8908. +
  8909. + /* Do busy-wait until DMA not using this EP descriptor anymore */
  8910. + while((*R_DMA_CH8_SUB0_EP ==
  8911. + virt_to_phys(&TxBulkEPList[epid])) &&
  8912. + (timeout-- > 0));
  8913. + if(timeout == 0) {
  8914. + warn("Timeout while waiting for DMA-TX-Bulk to leave EP for"
  8915. + " epid:%d\n", epid);
  8916. + }
  8917. + }
  8918. + break;
  8919. +
  8920. + case PIPE_CONTROL:
  8921. + /* Check if EP still is enabled */
  8922. + if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  8923. + /* The EP was enabled, disable it. */
  8924. + TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  8925. + tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
  8926. +
  8927. + /* Do busy-wait until DMA not using this EP descriptor anymore */
  8928. + while((*R_DMA_CH8_SUB1_EP ==
  8929. + virt_to_phys(&TxCtrlEPList[epid])) &&
  8930. + (timeout-- > 0));
  8931. + if(timeout == 0) {
  8932. + warn("Timeout while waiting for DMA-TX-Ctrl to leave EP for"
  8933. + " epid:%d\n", epid);
  8934. + }
  8935. + }
  8936. + break;
  8937. +
  8938. + case PIPE_INTERRUPT:
  8939. + local_irq_save(flags);
  8940. + /* Disable all Intr EPs belonging to epid */
  8941. + first_ep = &TxIntrEPList[0];
  8942. + curr_ep = first_ep;
  8943. + do {
  8944. + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
  8945. + if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
  8946. + /* Disable EP */
  8947. + next_ep->command &= ~IO_MASK(USB_EP_command, enable);
  8948. + }
  8949. + curr_ep = phys_to_virt(curr_ep->next);
  8950. + } while (curr_ep != first_ep);
  8951. +
  8952. + local_irq_restore(flags);
  8953. + break;
  8954. +
  8955. + case PIPE_ISOCHRONOUS:
  8956. + /* Check if EP still is enabled */
  8957. + if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  8958. + tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid);
  8959. + /* The EP was enabled, disable it. */
  8960. + TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  8961. +
  8962. + while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
  8963. + (timeout-- > 0));
  8964. + if(timeout == 0) {
  8965. + warn("Timeout while waiting for DMA-TX-Isoc to leave EP for"
  8966. + " epid:%d\n", epid);
  8967. + }
  8968. + }
  8969. + break;
  8970. + }
  8971. +
  8972. + local_irq_save(flags);
  8973. +
  8974. + /* Finish if there is active URB for this endpoint */
  8975. + if(activeUrbList[epid] != NULL) {
  8976. + urb = activeUrbList[epid];
  8977. + urb_priv = urb->hcpriv;
  8978. + ASSERT(urb_priv);
  8979. + tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
  8980. + (urb == activeUrbList[epid]) ? "active" : "queued",
  8981. + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
  8982. + str_type(urb->pipe), epid, urb->status,
  8983. + (urb_priv->later_data) ? "later-sched" : "");
  8984. +
  8985. + tc_finish_urb(hcd, activeUrbList[epid], -ENOENT);
  8986. + ASSERT(activeUrbList[epid] == NULL);
  8987. + }
  8988. +
  8989. + /* Finish any queued URBs for this endpoint. There won't be any resubmitions
  8990. + because epid_disabled causes enqueue() to fail for this endpoint */
  8991. + while((urb = urb_list_first(epid)) != NULL) {
  8992. + urb_priv = urb->hcpriv;
  8993. + ASSERT(urb_priv);
  8994. +
  8995. + tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
  8996. + (urb == activeUrbList[epid]) ? "active" : "queued",
  8997. + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
  8998. + str_type(urb->pipe), epid, urb->status,
  8999. + (urb_priv->later_data) ? "later-sched" : "");
  9000. +
  9001. + tc_finish_urb(hcd, urb, -ENOENT);
  9002. + }
  9003. + epid_state[epid].disabled = 0;
  9004. + local_irq_restore(flags);
  9005. +}
  9006. +
  9007. +/* free resources associated with an endpoint (called from hcd_driver) */
  9008. +static void tc_endpoint_disable(struct usb_hcd *hcd,
  9009. + struct usb_host_endpoint *ep) {
  9010. + DBFENTER;
  9011. + /* Only free epid if it has been allocated. We get two endpoint_disable
  9012. + requests for ctrl endpoints so ignore the second one */
  9013. + if(ep->hcpriv != NULL) {
  9014. + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
  9015. + int epid = ep_priv->epid;
  9016. + tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
  9017. + (unsigned int)ep, (unsigned int)ep->hcpriv,
  9018. + endpoint_to_str(&(ep->desc)), epid);
  9019. +
  9020. + tc_sync_finish_epid(hcd, epid);
  9021. +
  9022. + ASSERT(activeUrbList[epid] == NULL);
  9023. + ASSERT(list_empty(&urb_list[epid]));
  9024. +
  9025. + tc_free_epid(ep);
  9026. + } else {
  9027. + tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep,
  9028. + (unsigned int)ep->hcpriv, endpoint_to_str(&(ep->desc)));
  9029. + }
  9030. + DBFEXIT;
  9031. +}
  9032. +
  9033. +//static void tc_finish_urb_later_proc(void *data) {
  9034. +static void tc_finish_urb_later_proc(struct work_struct *work) {
  9035. + unsigned long flags;
  9036. + //struct urb_later_data* uld = (struct urb_later_data*)data;
  9037. + struct urb_later_data* uld = container_of(work, struct urb_later_data, ws.work);
  9038. + local_irq_save(flags);
  9039. + if(uld->urb == NULL) {
  9040. + late_dbg("Later finish of URB = NULL (allready finished)\n");
  9041. + } else {
  9042. + struct crisv10_urb_priv* urb_priv = uld->urb->hcpriv;
  9043. + ASSERT(urb_priv);
  9044. + if(urb_priv->urb_num == uld->urb_num) {
  9045. + late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld->urb),
  9046. + urb_priv->urb_num);
  9047. + if(uld->status != uld->urb->status) {
  9048. + errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
  9049. + uld->urb->status, uld->status);
  9050. + }
  9051. + if(uld != urb_priv->later_data) {
  9052. + panic("Scheduled uld not same as URBs uld\n");
  9053. + }
  9054. + tc_finish_urb(uld->hcd, uld->urb, uld->status);
  9055. + } else {
  9056. + late_warn("Ignoring later finish of URB:0x%x[%d]"
  9057. + ", urb_num doesn't match current URB:0x%x[%d]",
  9058. + (unsigned int)(uld->urb), uld->urb_num,
  9059. + (unsigned int)(uld->urb), urb_priv->urb_num);
  9060. + }
  9061. + }
  9062. + local_irq_restore(flags);
  9063. + kmem_cache_free(later_data_cache, uld);
  9064. +}
  9065. +
  9066. +static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
  9067. + int status) {
  9068. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  9069. + struct urb_later_data* uld;
  9070. +
  9071. + ASSERT(urb_priv);
  9072. +
  9073. + if(urb_priv->later_data != NULL) {
  9074. + /* Later-finish allready scheduled for this URB, just update status to
  9075. + return when finishing later */
  9076. + errno_dbg("Later-finish schedule change URB status:%d with new"
  9077. + " status:%d\n", urb_priv->later_data->status, status);
  9078. +
  9079. + urb_priv->later_data->status = status;
  9080. + return;
  9081. + }
  9082. +
  9083. + uld = kmem_cache_alloc(later_data_cache, GFP_ATOMIC);
  9084. + ASSERT(uld);
  9085. +
  9086. + uld->hcd = hcd;
  9087. + uld->urb = urb;
  9088. + uld->urb_num = urb_priv->urb_num;
  9089. + uld->status = status;
  9090. +
  9091. + //INIT_WORK(&uld->ws, tc_finish_urb_later_proc, uld);
  9092. + INIT_DELAYED_WORK(&uld->ws, tc_finish_urb_later_proc);
  9093. + urb_priv->later_data = uld;
  9094. +
  9095. + /* Schedule the finishing of the URB to happen later */
  9096. + schedule_delayed_work(&uld->ws, LATER_TIMER_DELAY);
  9097. +}
  9098. +
  9099. + /* hinko ignore usb_pipeisoc */
  9100. +#if 0
  9101. +static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
  9102. + int status);
  9103. +#endif
  9104. +
  9105. +static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status) {
  9106. + struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
  9107. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  9108. + int epid;
  9109. + char toggle;
  9110. + int urb_num;
  9111. + unsigned long flags;
  9112. +
  9113. + DBFENTER;
  9114. + ASSERT(urb_priv != NULL);
  9115. + epid = urb_priv->epid;
  9116. + urb_num = urb_priv->urb_num;
  9117. +
  9118. + if(urb != activeUrbList[epid]) {
  9119. + if(urb_list_entry(urb, epid)) {
  9120. + /* Remove this URB from the list. Only happens when URB are finished
  9121. + before having been processed (dequeing) */
  9122. + urb_list_del(urb, epid);
  9123. + } else {
  9124. + tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
  9125. + " epid:%d\n", (unsigned int)urb, urb_num, epid);
  9126. + }
  9127. + }
  9128. +
  9129. + /* Cancel any pending later-finish of this URB */
  9130. + if(urb_priv->later_data) {
  9131. + urb_priv->later_data->urb = NULL;
  9132. + }
  9133. +
  9134. + /* For an IN pipe, we always set the actual length, regardless of whether
  9135. + there was an error or not (which means the device driver can use the data
  9136. + if it wants to). */
  9137. + if(usb_pipein(urb->pipe)) {
  9138. + urb->actual_length = urb_priv->rx_offset;
  9139. + } else {
  9140. + /* Set actual_length for OUT urbs also; the USB mass storage driver seems
  9141. + to want that. */
  9142. + if (status == 0 && urb->status == -EINPROGRESS) {
  9143. + urb->actual_length = urb->transfer_buffer_length;
  9144. + } else {
  9145. + /* We wouldn't know of any partial writes if there was an error. */
  9146. + urb->actual_length = 0;
  9147. + }
  9148. + }
  9149. +
  9150. +
  9151. + /* URB status mangling */
  9152. + if(urb->status == -EINPROGRESS) {
  9153. + /* The USB core hasn't changed the status, let's set our finish status */
  9154. + urb->status = status;
  9155. +
  9156. + if ((status == 0) && (urb->transfer_flags & URB_SHORT_NOT_OK) &&
  9157. + usb_pipein(urb->pipe) &&
  9158. + (urb->actual_length != urb->transfer_buffer_length)) {
  9159. + /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
  9160. + max length) is to be treated as an error. */
  9161. + errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
  9162. + " data:%d\n", (unsigned int)urb, urb_num,
  9163. + urb->actual_length);
  9164. + urb->status = -EREMOTEIO;
  9165. + }
  9166. +
  9167. + if(urb_priv->urb_state == UNLINK) {
  9168. + /* URB has been requested to be unlinked asynchronously */
  9169. + urb->status = -ECONNRESET;
  9170. + errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
  9171. + (unsigned int)urb, urb_num, urb->status);
  9172. + }
  9173. + } else {
  9174. + /* The USB Core wants to signal some error via the URB, pass it through */
  9175. + }
  9176. +
  9177. + /* hinko ignore usb_pipeisoc */
  9178. +#if 0
  9179. + /* use completely different finish function for Isoc URBs */
  9180. + if(usb_pipeisoc(urb->pipe)) {
  9181. + tc_finish_isoc_urb(hcd, urb, status);
  9182. + return;
  9183. + }
  9184. +#endif
  9185. +
  9186. + /* Do special unlinking of EPs for Intr traffic */
  9187. + if(usb_pipeint(urb->pipe)) {
  9188. + tc_dma_unlink_intr_urb(urb);
  9189. + }
  9190. +
  9191. + /* hinko ignore usb_pipeisoc */
  9192. +#if 0
  9193. + /* Release allocated bandwidth for periodic transfers */
  9194. + if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe))
  9195. + usb_release_bandwidth(urb->dev, urb, 0);
  9196. +#endif
  9197. +
  9198. + /* This URB is active on EP */
  9199. + if(urb == activeUrbList[epid]) {
  9200. + /* We need to fiddle with the toggle bits because the hardware doesn't do
  9201. + it for us. */
  9202. + toggle = etrax_epid_get_toggle(epid, usb_pipeout(urb->pipe));
  9203. + usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  9204. + usb_pipeout(urb->pipe), toggle);
  9205. +
  9206. + /* Checks for Ctrl and Bulk EPs */
  9207. + switch(usb_pipetype(urb->pipe)) {
  9208. + case PIPE_BULK:
  9209. + /* Check so Bulk EP realy is disabled before finishing active URB */
  9210. + ASSERT((TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
  9211. + IO_STATE(USB_EP_command, enable, no));
  9212. + /* Disable sub-pointer for EP to avoid next tx_interrupt() to
  9213. + process Bulk EP. */
  9214. + TxBulkEPList[epid].sub = 0;
  9215. + /* No need to wait for the DMA before changing the next pointer.
  9216. + The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
  9217. + the last one (INVALID_EPID) for actual traffic. */
  9218. + TxBulkEPList[epid].next =
  9219. + virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
  9220. + break;
  9221. + case PIPE_CONTROL:
  9222. + /* Check so Ctrl EP realy is disabled before finishing active URB */
  9223. + ASSERT((TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
  9224. + IO_STATE(USB_EP_command, enable, no));
  9225. + /* Disable sub-pointer for EP to avoid next tx_interrupt() to
  9226. + process Ctrl EP. */
  9227. + TxCtrlEPList[epid].sub = 0;
  9228. + break;
  9229. + }
  9230. + }
  9231. +
  9232. + /* Free HC-private URB data*/
  9233. + urb_priv_free(hcd, urb);
  9234. +
  9235. + if(urb->status) {
  9236. + errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
  9237. + (unsigned int)urb, urb_num, str_dir(urb->pipe),
  9238. + str_type(urb->pipe), urb->actual_length, urb->status);
  9239. + } else {
  9240. + tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
  9241. + (unsigned int)urb, urb_num, str_dir(urb->pipe),
  9242. + str_type(urb->pipe), urb->actual_length, urb->status);
  9243. + }
  9244. +
  9245. + /* If we just finished an active URB, clear active pointer. */
  9246. + if (urb == activeUrbList[epid]) {
  9247. + /* Make URB not active on EP anymore */
  9248. + activeUrbList[epid] = NULL;
  9249. +
  9250. + if(urb->status == 0) {
  9251. + /* URB finished sucessfully, process queue to see if there are any more
  9252. + URBs waiting before we call completion function.*/
  9253. + if(crisv10_hcd->running) {
  9254. + /* Only process queue if USB controller is running */
  9255. + tc_dma_process_queue(epid);
  9256. + } else {
  9257. + tc_warn("No processing of queue for epid:%d, USB Controller not"
  9258. + " running\n", epid);
  9259. + }
  9260. + }
  9261. + }
  9262. +
  9263. + /* Hand the URB from HCD to its USB device driver, using its completion
  9264. + functions */
  9265. +// usb_hcd_giveback_urb (hcd, urb);
  9266. + /**
  9267. + * usb_hcd_unlink_urb_from_ep - remove an URB from its endpoint queue
  9268. + * @hcd: host controller to which @urb was submitted
  9269. + * @urb: URB being unlinked
  9270. + *
  9271. + * Host controller drivers should call this routine before calling
  9272. + * usb_hcd_giveback_urb(). The HCD's private spinlock must be held and
  9273. + * interrupts must be disabled. The actions carried out here are required
  9274. + * for URB completion.
  9275. + */
  9276. +
  9277. + /*hinko link/unlink urb -> ep */
  9278. + //spin_lock(&crisv10_hcd->lock);
  9279. + spin_lock_irqsave(&crisv10_hcd->lock, flags);
  9280. + usb_hcd_unlink_urb_from_ep(hcd, urb);
  9281. + usb_hcd_giveback_urb(hcd, urb, status);
  9282. + //spin_unlock(&crisv10_hcd->lock);
  9283. + spin_unlock_irqrestore(&crisv10_hcd->lock, flags);
  9284. +
  9285. + /* Check the queue once more if the URB returned with error, because we
  9286. + didn't do it before the completion function because the specification
  9287. + states that the queue should not restart until all it's unlinked
  9288. + URBs have been fully retired, with the completion functions run */
  9289. + if(crisv10_hcd->running) {
  9290. + /* Only process queue if USB controller is running */
  9291. + tc_dma_process_queue(epid);
  9292. + } else {
  9293. + tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
  9294. + epid);
  9295. + }
  9296. +
  9297. + DBFEXIT;
  9298. +}
  9299. +
  9300. + /* hinko ignore usb_pipeisoc */
  9301. +#if 0
  9302. +static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
  9303. + int status) {
  9304. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  9305. + int epid, i;
  9306. + volatile int timeout = 10000;
  9307. +
  9308. + ASSERT(urb_priv);
  9309. + epid = urb_priv->epid;
  9310. +
  9311. + ASSERT(usb_pipeisoc(urb->pipe));
  9312. +
  9313. + /* Set that all isoc packets have status and length set before
  9314. + completing the urb. */
  9315. + for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++){
  9316. + urb->iso_frame_desc[i].actual_length = 0;
  9317. + urb->iso_frame_desc[i].status = -EPROTO;
  9318. + }
  9319. +
  9320. + /* Check if the URB is currently active (done or error) */
  9321. + if(urb == activeUrbList[epid]) {
  9322. + /* Check if there are another In Isoc URB queued for this epid */
  9323. + if (!list_empty(&urb_list[epid])&& !epid_state[epid].disabled) {
  9324. + /* Move it from queue to active and mark it started so Isoc transfers
  9325. + won't be interrupted.
  9326. + All Isoc URBs data transfers are already added to DMA lists so we
  9327. + don't have to insert anything in DMA lists here. */
  9328. + activeUrbList[epid] = urb_list_first(epid);
  9329. + ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_state =
  9330. + STARTED;
  9331. + urb_list_del(activeUrbList[epid], epid);
  9332. +
  9333. + if(urb->status) {
  9334. + errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
  9335. + " status:%d, new waiting URB:0x%x[%d]\n",
  9336. + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
  9337. + str_type(urb->pipe), urb_priv->isoc_packet_counter,
  9338. + urb->number_of_packets, urb->status,
  9339. + (unsigned int)activeUrbList[epid],
  9340. + ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_num);
  9341. + }
  9342. +
  9343. + } else { /* No other URB queued for this epid */
  9344. + if(urb->status) {
  9345. + errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
  9346. + " status:%d, no new URB waiting\n",
  9347. + (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
  9348. + str_type(urb->pipe), urb_priv->isoc_packet_counter,
  9349. + urb->number_of_packets, urb->status);
  9350. + }
  9351. +
  9352. + /* Check if EP is still enabled, then shut it down. */
  9353. + if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  9354. + isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid);
  9355. +
  9356. + /* Should only occur for In Isoc EPs where SB isn't consumed. */
  9357. + ASSERT(usb_pipein(urb->pipe));
  9358. +
  9359. + /* Disable it and wait for it to stop */
  9360. + TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
  9361. +
  9362. + /* Ah, the luxury of busy-wait. */
  9363. + while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
  9364. + (timeout-- > 0));
  9365. + if(timeout == 0) {
  9366. + warn("Timeout while waiting for DMA-TX-Isoc to leave EP for epid:%d\n", epid);
  9367. + }
  9368. + }
  9369. +
  9370. + /* Unlink SB to say that epid is finished. */
  9371. + TxIsocEPList[epid].sub = 0;
  9372. + TxIsocEPList[epid].hw_len = 0;
  9373. +
  9374. + /* No URB active for EP anymore */
  9375. + activeUrbList[epid] = NULL;
  9376. + }
  9377. + } else { /* Finishing of not active URB (queued up with SBs thought) */
  9378. + isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
  9379. + " SB queued but not active\n",
  9380. + (unsigned int)urb, str_dir(urb->pipe),
  9381. + urb_priv->isoc_packet_counter, urb->number_of_packets,
  9382. + urb->status);
  9383. + if(usb_pipeout(urb->pipe)) {
  9384. + /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
  9385. + struct USB_SB_Desc *iter_sb, *prev_sb, *next_sb;
  9386. +
  9387. + iter_sb = TxIsocEPList[epid].sub ?
  9388. + phys_to_virt(TxIsocEPList[epid].sub) : 0;
  9389. + prev_sb = 0;
  9390. +
  9391. + /* SB that is linked before this URBs first SB */
  9392. + while (iter_sb && (iter_sb != urb_priv->first_sb)) {
  9393. + prev_sb = iter_sb;
  9394. + iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
  9395. + }
  9396. +
  9397. + if (iter_sb == 0) {
  9398. + /* Unlink of the URB currently being transmitted. */
  9399. + prev_sb = 0;
  9400. + iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
  9401. + }
  9402. +
  9403. + while (iter_sb && (iter_sb != urb_priv->last_sb)) {
  9404. + iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
  9405. + }
  9406. +
  9407. + if (iter_sb) {
  9408. + next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
  9409. + } else {
  9410. + /* This should only happen if the DMA has completed
  9411. + processing the SB list for this EP while interrupts
  9412. + are disabled. */
  9413. + isoc_dbg("Isoc urb not found, already sent?\n");
  9414. + next_sb = 0;
  9415. + }
  9416. + if (prev_sb) {
  9417. + prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
  9418. + } else {
  9419. + TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
  9420. + }
  9421. + }
  9422. + }
  9423. +
  9424. + /* Free HC-private URB data*/
  9425. + urb_priv_free(hcd, urb);
  9426. +
  9427. + usb_release_bandwidth(urb->dev, urb, 0);
  9428. +
  9429. + /* Hand the URB from HCD to its USB device driver, using its completion
  9430. + functions */
  9431. + usb_hcd_giveback_urb (hcd, urb);
  9432. +}
  9433. +#endif
  9434. +
  9435. +static __u32 urb_num = 0;
  9436. +
  9437. +/* allocate and initialize URB private data */
  9438. +static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
  9439. + int mem_flags) {
  9440. + struct crisv10_urb_priv *urb_priv;
  9441. +
  9442. + urb_priv = kmalloc(sizeof *urb_priv, mem_flags);
  9443. + if (!urb_priv)
  9444. + return -ENOMEM;
  9445. + memset(urb_priv, 0, sizeof *urb_priv);
  9446. +
  9447. + urb_priv->epid = epid;
  9448. + urb_priv->urb_state = NOT_STARTED;
  9449. +
  9450. + urb->hcpriv = urb_priv;
  9451. + /* Assign URB a sequence number, and increment counter */
  9452. + urb_priv->urb_num = urb_num;
  9453. + urb_num++;
  9454. + return 0;
  9455. +}
  9456. +
  9457. +/* free URB private data */
  9458. +static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb) {
  9459. + int i;
  9460. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  9461. + ASSERT(urb_priv != 0);
  9462. +
  9463. + /* Check it has any SBs linked that needs to be freed*/
  9464. + if(urb_priv->first_sb != NULL) {
  9465. + struct USB_SB_Desc *next_sb, *first_sb, *last_sb;
  9466. + int i = 0;
  9467. + first_sb = urb_priv->first_sb;
  9468. + last_sb = urb_priv->last_sb;
  9469. + ASSERT(last_sb);
  9470. + while(first_sb != last_sb) {
  9471. + next_sb = (struct USB_SB_Desc *)phys_to_virt(first_sb->next);
  9472. + kmem_cache_free(usb_desc_cache, first_sb);
  9473. + first_sb = next_sb;
  9474. + i++;
  9475. + }
  9476. + kmem_cache_free(usb_desc_cache, last_sb);
  9477. + i++;
  9478. + }
  9479. +
  9480. + /* Check if it has any EPs in its Intr pool that also needs to be freed */
  9481. + if(urb_priv->intr_ep_pool_length > 0) {
  9482. + for(i = 0; i < urb_priv->intr_ep_pool_length; i++) {
  9483. + kfree(urb_priv->intr_ep_pool[i]);
  9484. + }
  9485. + /*
  9486. + tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
  9487. + urb_priv->intr_ep_pool_length, (unsigned int)urb);
  9488. + */
  9489. + }
  9490. +
  9491. + kfree(urb_priv);
  9492. + urb->hcpriv = NULL;
  9493. +}
  9494. +
  9495. +static int ep_priv_create(struct usb_host_endpoint *ep, int mem_flags) {
  9496. + struct crisv10_ep_priv *ep_priv;
  9497. +
  9498. + ep_priv = kmalloc(sizeof *ep_priv, mem_flags);
  9499. + if (!ep_priv)
  9500. + return -ENOMEM;
  9501. + memset(ep_priv, 0, sizeof *ep_priv);
  9502. +
  9503. + ep->hcpriv = ep_priv;
  9504. + return 0;
  9505. +}
  9506. +
  9507. +static void ep_priv_free(struct usb_host_endpoint *ep) {
  9508. + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
  9509. + ASSERT(ep_priv);
  9510. + kfree(ep_priv);
  9511. + ep->hcpriv = NULL;
  9512. +}
  9513. +
  9514. +/* EPID handling functions, managing EP-list in Etrax through wrappers */
  9515. +/* ------------------------------------------------------------------- */
  9516. +
  9517. +/* Sets up a new EPID for an endpoint or returns existing if found */
  9518. +//static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
  9519. +// int mem_flags) {
  9520. +static int tc_setup_epid(struct urb *urb, int mem_flags)
  9521. +{
  9522. + int epid;
  9523. + char devnum, endpoint, out_traffic, slow;
  9524. + int maxlen;
  9525. + __u32 epid_data;
  9526. + struct usb_host_endpoint *ep = urb->ep;
  9527. + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
  9528. +
  9529. + DBFENTER;
  9530. +
  9531. + /* Check if a valid epid already is setup for this endpoint */
  9532. + if(ep_priv != NULL) {
  9533. + return ep_priv->epid;
  9534. + }
  9535. +
  9536. + /* We must find and initiate a new epid for this urb. */
  9537. + epid = tc_allocate_epid();
  9538. +
  9539. + if (epid == -1) {
  9540. + /* Failed to allocate a new epid. */
  9541. + DBFEXIT;
  9542. + return epid;
  9543. + }
  9544. +
  9545. + /* We now have a new epid to use. Claim it. */
  9546. + epid_state[epid].inuse = 1;
  9547. +
  9548. + /* Init private data for new endpoint */
  9549. + if(ep_priv_create(ep, mem_flags) != 0) {
  9550. + return -ENOMEM;
  9551. + }
  9552. + ep_priv = ep->hcpriv;
  9553. + ep_priv->epid = epid;
  9554. +
  9555. + devnum = usb_pipedevice(urb->pipe);
  9556. + endpoint = usb_pipeendpoint(urb->pipe);
  9557. + slow = (urb->dev->speed == USB_SPEED_LOW);
  9558. + maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
  9559. +
  9560. + if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
  9561. + /* We want both IN and OUT control traffic to be put on the same
  9562. + EP/SB list. */
  9563. + out_traffic = 1;
  9564. + } else {
  9565. + out_traffic = usb_pipeout(urb->pipe);
  9566. + }
  9567. +
  9568. + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  9569. + epid_data = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
  9570. + /* FIXME: Change any to the actual port? */
  9571. + IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
  9572. + IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
  9573. + IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
  9574. + IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
  9575. + etrax_epid_iso_set(epid, epid_data);
  9576. + } else {
  9577. + epid_data = IO_STATE(R_USB_EPT_DATA, valid, yes) |
  9578. + IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
  9579. + /* FIXME: Change any to the actual port? */
  9580. + IO_STATE(R_USB_EPT_DATA, port, any) |
  9581. + IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
  9582. + IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
  9583. + IO_FIELD(R_USB_EPT_DATA, dev, devnum);
  9584. + etrax_epid_set(epid, epid_data);
  9585. + }
  9586. +
  9587. + epid_state[epid].out_traffic = out_traffic;
  9588. + epid_state[epid].type = usb_pipetype(urb->pipe);
  9589. +
  9590. + tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n",
  9591. + (unsigned int)ep, epid, devnum, endpoint, maxlen,
  9592. + str_type(urb->pipe), out_traffic ? "out" : "in",
  9593. + slow ? "low" : "full");
  9594. +
  9595. + /* Enable Isoc eof interrupt if we set up the first Isoc epid */
  9596. + if(usb_pipeisoc(urb->pipe)) {
  9597. + isoc_epid_counter++;
  9598. + if(isoc_epid_counter == 1) {
  9599. + isoc_warn("Enabled Isoc eof interrupt\n");
  9600. + *R_USB_IRQ_MASK_SET |= IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
  9601. + }
  9602. + }
  9603. +
  9604. + DBFEXIT;
  9605. + return epid;
  9606. +}
  9607. +
  9608. +static void tc_free_epid(struct usb_host_endpoint *ep) {
  9609. + unsigned long flags;
  9610. + struct crisv10_ep_priv *ep_priv = ep->hcpriv;
  9611. + int epid;
  9612. + volatile int timeout = 10000;
  9613. +
  9614. + DBFENTER;
  9615. +
  9616. + if (ep_priv == NULL) {
  9617. + tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep);
  9618. + DBFEXIT;
  9619. + return;
  9620. + }
  9621. +
  9622. + epid = ep_priv->epid;
  9623. +
  9624. + /* Disable Isoc eof interrupt if we free the last Isoc epid */
  9625. + if(epid_isoc(epid)) {
  9626. + ASSERT(isoc_epid_counter > 0);
  9627. + isoc_epid_counter--;
  9628. + if(isoc_epid_counter == 0) {
  9629. + *R_USB_IRQ_MASK_SET &= ~IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
  9630. + isoc_warn("Disabled Isoc eof interrupt\n");
  9631. + }
  9632. + }
  9633. +
  9634. + /* Take lock manualy instead of in epid_x_x wrappers,
  9635. + because we need to be polling here */
  9636. + spin_lock_irqsave(&etrax_epid_lock, flags);
  9637. +
  9638. + *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
  9639. + nop();
  9640. + while((*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) &&
  9641. + (timeout-- > 0));
  9642. + if(timeout == 0) {
  9643. + warn("Timeout while waiting for epid:%d to drop hold\n", epid);
  9644. + }
  9645. + /* This will, among other things, set the valid field to 0. */
  9646. + *R_USB_EPT_DATA = 0;
  9647. + spin_unlock_irqrestore(&etrax_epid_lock, flags);
  9648. +
  9649. + /* Free resource in software state info list */
  9650. + epid_state[epid].inuse = 0;
  9651. +
  9652. + /* Free private endpoint data */
  9653. + ep_priv_free(ep);
  9654. +
  9655. + DBFEXIT;
  9656. +}
  9657. +
  9658. +static int tc_allocate_epid(void) {
  9659. + int i;
  9660. + DBFENTER;
  9661. + for (i = 0; i < NBR_OF_EPIDS; i++) {
  9662. + if (!epid_inuse(i)) {
  9663. + DBFEXIT;
  9664. + return i;
  9665. + }
  9666. + }
  9667. +
  9668. + tc_warn("Found no free epids\n");
  9669. + DBFEXIT;
  9670. + return -1;
  9671. +}
  9672. +
  9673. +
  9674. +/* Wrappers around the list functions (include/linux/list.h). */
  9675. +/* ---------------------------------------------------------- */
  9676. +static inline int __urb_list_empty(int epid) {
  9677. + int retval;
  9678. + retval = list_empty(&urb_list[epid]);
  9679. + return retval;
  9680. +}
  9681. +
  9682. +/* Returns first urb for this epid, or NULL if list is empty. */
  9683. +static inline struct urb *urb_list_first(int epid) {
  9684. + unsigned long flags;
  9685. + struct urb *first_urb = 0;
  9686. + spin_lock_irqsave(&urb_list_lock, flags);
  9687. + if (!__urb_list_empty(epid)) {
  9688. + /* Get the first urb (i.e. head->next). */
  9689. + urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
  9690. + first_urb = urb_entry->urb;
  9691. + }
  9692. + spin_unlock_irqrestore(&urb_list_lock, flags);
  9693. + return first_urb;
  9694. +}
  9695. +
  9696. +/* Adds an urb_entry last in the list for this epid. */
  9697. +static inline void urb_list_add(struct urb *urb, int epid, int mem_flags) {
  9698. + unsigned long flags;
  9699. + urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), mem_flags);
  9700. + ASSERT(urb_entry);
  9701. +
  9702. + urb_entry->urb = urb;
  9703. + spin_lock_irqsave(&urb_list_lock, flags);
  9704. + list_add_tail(&urb_entry->list, &urb_list[epid]);
  9705. + spin_unlock_irqrestore(&urb_list_lock, flags);
  9706. +}
  9707. +
  9708. +/* Search through the list for an element that contains this urb. (The list
  9709. + is expected to be short and the one we are about to delete will often be
  9710. + the first in the list.)
  9711. + Should be protected by spin_locks in calling function */
  9712. +static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid) {
  9713. + struct list_head *entry;
  9714. + struct list_head *tmp;
  9715. + urb_entry_t *urb_entry;
  9716. +
  9717. + list_for_each_safe(entry, tmp, &urb_list[epid]) {
  9718. + urb_entry = list_entry(entry, urb_entry_t, list);
  9719. + ASSERT(urb_entry);
  9720. + ASSERT(urb_entry->urb);
  9721. +
  9722. + if (urb_entry->urb == urb) {
  9723. + return urb_entry;
  9724. + }
  9725. + }
  9726. + return 0;
  9727. +}
  9728. +
  9729. +/* Same function as above but for global use. Protects list by spinlock */
  9730. +static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid) {
  9731. + unsigned long flags;
  9732. + urb_entry_t *urb_entry;
  9733. + spin_lock_irqsave(&urb_list_lock, flags);
  9734. + urb_entry = __urb_list_entry(urb, epid);
  9735. + spin_unlock_irqrestore(&urb_list_lock, flags);
  9736. + return (urb_entry);
  9737. +}
  9738. +
  9739. +/* Delete an urb from the list. */
  9740. +static inline void urb_list_del(struct urb *urb, int epid) {
  9741. + unsigned long flags;
  9742. + urb_entry_t *urb_entry;
  9743. +
  9744. + /* Delete entry and free. */
  9745. + spin_lock_irqsave(&urb_list_lock, flags);
  9746. + urb_entry = __urb_list_entry(urb, epid);
  9747. + ASSERT(urb_entry);
  9748. +
  9749. + list_del(&urb_entry->list);
  9750. + spin_unlock_irqrestore(&urb_list_lock, flags);
  9751. + kfree(urb_entry);
  9752. +}
  9753. +
  9754. +/* Move an urb to the end of the list. */
  9755. +static inline void urb_list_move_last(struct urb *urb, int epid) {
  9756. + unsigned long flags;
  9757. + urb_entry_t *urb_entry;
  9758. +
  9759. + spin_lock_irqsave(&urb_list_lock, flags);
  9760. + urb_entry = __urb_list_entry(urb, epid);
  9761. + ASSERT(urb_entry);
  9762. +
  9763. + list_del(&urb_entry->list);
  9764. + list_add_tail(&urb_entry->list, &urb_list[epid]);
  9765. + spin_unlock_irqrestore(&urb_list_lock, flags);
  9766. +}
  9767. +
  9768. +/* Get the next urb in the list. */
  9769. +static inline struct urb *urb_list_next(struct urb *urb, int epid) {
  9770. + unsigned long flags;
  9771. + urb_entry_t *urb_entry;
  9772. +
  9773. + spin_lock_irqsave(&urb_list_lock, flags);
  9774. + urb_entry = __urb_list_entry(urb, epid);
  9775. + ASSERT(urb_entry);
  9776. +
  9777. + if (urb_entry->list.next != &urb_list[epid]) {
  9778. + struct list_head *elem = urb_entry->list.next;
  9779. + urb_entry = list_entry(elem, urb_entry_t, list);
  9780. + spin_unlock_irqrestore(&urb_list_lock, flags);
  9781. + return urb_entry->urb;
  9782. + } else {
  9783. + spin_unlock_irqrestore(&urb_list_lock, flags);
  9784. + return NULL;
  9785. + }
  9786. +}
  9787. +
  9788. +struct USB_EP_Desc* create_ep(int epid, struct USB_SB_Desc* sb_desc,
  9789. + int mem_flags) {
  9790. + struct USB_EP_Desc *ep_desc;
  9791. + ep_desc = (struct USB_EP_Desc *) kmem_cache_alloc(usb_desc_cache, mem_flags);
  9792. + if(ep_desc == NULL)
  9793. + return NULL;
  9794. + memset(ep_desc, 0, sizeof(struct USB_EP_Desc));
  9795. +
  9796. + ep_desc->hw_len = 0;
  9797. + ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
  9798. + IO_STATE(USB_EP_command, enable, yes));
  9799. + if(sb_desc == NULL) {
  9800. + ep_desc->sub = 0;
  9801. + } else {
  9802. + ep_desc->sub = virt_to_phys(sb_desc);
  9803. + }
  9804. + return ep_desc;
  9805. +}
  9806. +
  9807. +#define TT_ZOUT 0
  9808. +#define TT_IN 1
  9809. +#define TT_OUT 2
  9810. +#define TT_SETUP 3
  9811. +
  9812. +#define CMD_EOL IO_STATE(USB_SB_command, eol, yes)
  9813. +#define CMD_INTR IO_STATE(USB_SB_command, intr, yes)
  9814. +#define CMD_FULL IO_STATE(USB_SB_command, full, yes)
  9815. +
  9816. +/* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT
  9817. + SBs. Also used by create_sb_in() to avoid same allocation procedure at two
  9818. + places */
  9819. +struct USB_SB_Desc* create_sb(struct USB_SB_Desc* sb_prev, int tt, void* data,
  9820. + int datalen, int mem_flags) {
  9821. + struct USB_SB_Desc *sb_desc;
  9822. + sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
  9823. + if(sb_desc == NULL)
  9824. + return NULL;
  9825. + memset(sb_desc, 0, sizeof(struct USB_SB_Desc));
  9826. +
  9827. + sb_desc->command = IO_FIELD(USB_SB_command, tt, tt) |
  9828. + IO_STATE(USB_SB_command, eot, yes);
  9829. +
  9830. + sb_desc->sw_len = datalen;
  9831. + if(data != NULL) {
  9832. + sb_desc->buf = virt_to_phys(data);
  9833. + } else {
  9834. + sb_desc->buf = 0;
  9835. + }
  9836. + if(sb_prev != NULL) {
  9837. + sb_prev->next = virt_to_phys(sb_desc);
  9838. + }
  9839. + return sb_desc;
  9840. +}
  9841. +
  9842. +/* Creates a copy of an existing SB by allocation space for it and copy
  9843. + settings */
  9844. +struct USB_SB_Desc* create_sb_copy(struct USB_SB_Desc* sb_orig, int mem_flags) {
  9845. + struct USB_SB_Desc *sb_desc;
  9846. + sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
  9847. + if(sb_desc == NULL)
  9848. + return NULL;
  9849. +
  9850. + memcpy(sb_desc, sb_orig, sizeof(struct USB_SB_Desc));
  9851. + return sb_desc;
  9852. +}
  9853. +
  9854. +/* A specific create_sb function for creation of in SBs. This is due to
  9855. + that datalen in In SBs shows how many packets we are expecting. It also
  9856. + sets up the rem field to show if how many bytes we expect in last packet
  9857. + if it's not a full one */
  9858. +struct USB_SB_Desc* create_sb_in(struct USB_SB_Desc* sb_prev, int datalen,
  9859. + int maxlen, int mem_flags) {
  9860. + struct USB_SB_Desc *sb_desc;
  9861. + sb_desc = create_sb(sb_prev, TT_IN, NULL,
  9862. + datalen ? (datalen - 1) / maxlen + 1 : 0, mem_flags);
  9863. + if(sb_desc == NULL)
  9864. + return NULL;
  9865. + sb_desc->command |= IO_FIELD(USB_SB_command, rem, datalen % maxlen);
  9866. + return sb_desc;
  9867. +}
  9868. +
  9869. +void set_sb_cmds(struct USB_SB_Desc *sb_desc, __u16 flags) {
  9870. + sb_desc->command |= flags;
  9871. +}
  9872. +
  9873. +int create_sb_for_urb(struct urb *urb, int mem_flags) {
  9874. + int is_out = !usb_pipein(urb->pipe);
  9875. + int type = usb_pipetype(urb->pipe);
  9876. + int maxlen = usb_maxpacket(urb->dev, urb->pipe, is_out);
  9877. + int buf_len = urb->transfer_buffer_length;
  9878. + void *buf = buf_len > 0 ? urb->transfer_buffer : NULL;
  9879. + struct USB_SB_Desc *sb_desc = NULL;
  9880. +
  9881. + struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  9882. + ASSERT(urb_priv != NULL);
  9883. +
  9884. + switch(type) {
  9885. + case PIPE_CONTROL:
  9886. + /* Setup stage */
  9887. + sb_desc = create_sb(NULL, TT_SETUP, urb->setup_packet, 8, mem_flags);
  9888. + if(sb_desc == NULL)
  9889. + return -ENOMEM;
  9890. + set_sb_cmds(sb_desc, CMD_FULL);
  9891. +
  9892. + /* Attach first SB to URB */
  9893. + urb_priv->first_sb = sb_desc;
  9894. +
  9895. + if (is_out) { /* Out Control URB */
  9896. + /* If this Control OUT transfer has an optional data stage we add
  9897. + an OUT token before the mandatory IN (status) token */
  9898. + if ((buf_len > 0) && buf) {
  9899. + sb_desc = create_sb(sb_desc, TT_OUT, buf, buf_len, mem_flags);
  9900. + if(sb_desc == NULL)
  9901. + return -ENOMEM;
  9902. + set_sb_cmds(sb_desc, CMD_FULL);
  9903. + }
  9904. +
  9905. + /* Status stage */
  9906. + /* The data length has to be exactly 1. This is due to a requirement
  9907. + of the USB specification that a host must be prepared to receive
  9908. + data in the status phase */
  9909. + sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
  9910. + if(sb_desc == NULL)
  9911. + return -ENOMEM;
  9912. + } else { /* In control URB */
  9913. + /* Data stage */
  9914. + sb_desc = create_sb_in(sb_desc, buf_len, maxlen, mem_flags);
  9915. + if(sb_desc == NULL)
  9916. + return -ENOMEM;
  9917. +
  9918. + /* Status stage */
  9919. + /* Read comment at zout_buffer declaration for an explanation to this. */
  9920. + sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
  9921. + if(sb_desc == NULL)
  9922. + return -ENOMEM;
  9923. + /* Set descriptor interrupt flag for in URBs so we can finish URB after
  9924. + zout-packet has been sent */
  9925. + set_sb_cmds(sb_desc, CMD_INTR | CMD_FULL);
  9926. + }
  9927. + /* Set end-of-list flag in last SB */
  9928. + set_sb_cmds(sb_desc, CMD_EOL);
  9929. + /* Attach last SB to URB */
  9930. + urb_priv->last_sb = sb_desc;
  9931. + break;
  9932. +
  9933. + case PIPE_BULK:
  9934. + if (is_out) { /* Out Bulk URB */
  9935. + sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
  9936. + if(sb_desc == NULL)
  9937. + return -ENOMEM;
  9938. + /* The full field is set to yes, even if we don't actually check that
  9939. + this is a full-length transfer (i.e., that transfer_buffer_length %
  9940. + maxlen = 0).
  9941. + Setting full prevents the USB controller from sending an empty packet
  9942. + in that case. However, if URB_ZERO_PACKET was set we want that. */
  9943. + if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
  9944. + set_sb_cmds(sb_desc, CMD_FULL);
  9945. + }
  9946. + } else { /* In Bulk URB */
  9947. + sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
  9948. + if(sb_desc == NULL)
  9949. + return -ENOMEM;
  9950. + }
  9951. + /* Set end-of-list flag for last SB */
  9952. + set_sb_cmds(sb_desc, CMD_EOL);
  9953. +
  9954. + /* Attach SB to URB */
  9955. + urb_priv->first_sb = sb_desc;
  9956. + urb_priv->last_sb = sb_desc;
  9957. + break;
  9958. +
  9959. + case PIPE_INTERRUPT:
  9960. + if(is_out) { /* Out Intr URB */
  9961. + sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
  9962. + if(sb_desc == NULL)
  9963. + return -ENOMEM;
  9964. +
  9965. + /* The full field is set to yes, even if we don't actually check that
  9966. + this is a full-length transfer (i.e., that transfer_buffer_length %
  9967. + maxlen = 0).
  9968. + Setting full prevents the USB controller from sending an empty packet
  9969. + in that case. However, if URB_ZERO_PACKET was set we want that. */
  9970. + if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
  9971. + set_sb_cmds(sb_desc, CMD_FULL);
  9972. + }
  9973. + /* Only generate TX interrupt if it's a Out URB*/
  9974. + set_sb_cmds(sb_desc, CMD_INTR);
  9975. +
  9976. + } else { /* In Intr URB */
  9977. + sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
  9978. + if(sb_desc == NULL)
  9979. + return -ENOMEM;
  9980. + }
  9981. + /* Set end-of-list flag for last SB */
  9982. + set_sb_cmds(sb_desc, CMD_EOL);
  9983. +
  9984. + /* Attach SB to URB */
  9985. + urb_priv->first_sb = sb_desc;
  9986. + urb_priv->last_sb = sb_desc;
  9987. +
  9988. + break;
  9989. + case PIPE_ISOCHRONOUS:
  9990. + if(is_out) { /* Out Isoc URB */
  9991. + int i;
  9992. + if(urb->number_of_packets == 0) {
  9993. + tc_err("Can't create SBs for Isoc URB with zero packets\n");
  9994. + return -EPIPE;
  9995. + }
  9996. + /* Create one SB descriptor for each packet and link them together. */
  9997. + for(i = 0; i < urb->number_of_packets; i++) {
  9998. + if (urb->iso_frame_desc[i].length > 0) {
  9999. +
  10000. + sb_desc = create_sb(sb_desc, TT_OUT, urb->transfer_buffer +
  10001. + urb->iso_frame_desc[i].offset,
  10002. + urb->iso_frame_desc[i].length, mem_flags);
  10003. + if(sb_desc == NULL)
  10004. + return -ENOMEM;
  10005. +
  10006. + /* Check if it's a full length packet */
  10007. + if (urb->iso_frame_desc[i].length ==
  10008. + usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
  10009. + set_sb_cmds(sb_desc, CMD_FULL);
  10010. + }
  10011. +
  10012. + } else { /* zero length packet */
  10013. + sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
  10014. + if(sb_desc == NULL)
  10015. + return -ENOMEM;
  10016. + set_sb_cmds(sb_desc, CMD_FULL);
  10017. + }
  10018. + /* Attach first SB descriptor to URB */
  10019. + if (i == 0) {
  10020. + urb_priv->first_sb = sb_desc;
  10021. + }
  10022. + }
  10023. + /* Set interrupt and end-of-list flags in last SB */
  10024. + set_sb_cmds(sb_desc, CMD_INTR | CMD_EOL);
  10025. + /* Attach last SB descriptor to URB */
  10026. + urb_priv->last_sb = sb_desc;
  10027. + tc_dbg("Created %d out SBs for Isoc URB:0x%x\n",
  10028. + urb->number_of_packets, (unsigned int)urb);
  10029. + } else { /* In Isoc URB */
  10030. + /* Actual number of packets is not relevant for periodic in traffic as
  10031. + long as it is more than zero. Set to 1 always. */
  10032. + sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
  10033. + if(sb_desc == NULL)
  10034. + return -ENOMEM;
  10035. + /* Set end-of-list flags for SB */
  10036. + set_sb_cmds(sb_desc, CMD_EOL);
  10037. +
  10038. + /* Attach SB to URB */
  10039. + urb_priv->first_sb = sb_desc;
  10040. + urb_priv->last_sb = sb_desc;
  10041. + }
  10042. + break;
  10043. + default:
  10044. + tc_err("Unknown pipe-type\n");
  10045. + return -EPIPE;
  10046. + break;
  10047. + }
  10048. + return 0;
  10049. +}
  10050. +
  10051. +int init_intr_urb(struct urb *urb, int mem_flags) {
  10052. + struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  10053. + struct USB_EP_Desc* ep_desc;
  10054. + int interval;
  10055. + int i;
  10056. + int ep_count;
  10057. +
  10058. + ASSERT(urb_priv != NULL);
  10059. + ASSERT(usb_pipeint(urb->pipe));
  10060. + /* We can't support interval longer than amount of eof descriptors in
  10061. + TxIntrEPList */
  10062. + if(urb->interval > MAX_INTR_INTERVAL) {
  10063. + tc_err("Interrupt interval %dms too big (max: %dms)\n", urb->interval,
  10064. + MAX_INTR_INTERVAL);
  10065. + return -EINVAL;
  10066. + }
  10067. +
  10068. + /* We assume that the SB descriptors already have been setup */
  10069. + ASSERT(urb_priv->first_sb != NULL);
  10070. +
  10071. + /* Round of the interval to 2^n, it is obvious that this code favours
  10072. + smaller numbers, but that is actually a good thing */
  10073. + /* FIXME: The "rounding error" for larger intervals will be quite
  10074. + large. For in traffic this shouldn't be a problem since it will only
  10075. + mean that we "poll" more often. */
  10076. + interval = urb->interval;
  10077. + for (i = 0; interval; i++) {
  10078. + interval = interval >> 1;
  10079. + }
  10080. + urb_priv->interval = 1 << (i - 1);
  10081. +
  10082. + /* We can only have max interval for Out Interrupt due to that we can only
  10083. + handle one linked in EP for a certain epid in the Intr descr array at the
  10084. + time. The USB Controller in the Etrax 100LX continues to process Intr EPs
  10085. + so we have no way of knowing which one that caused the actual transfer if
  10086. + we have several linked in. */
  10087. + if(usb_pipeout(urb->pipe)) {
  10088. + urb_priv->interval = MAX_INTR_INTERVAL;
  10089. + }
  10090. +
  10091. + /* Calculate amount of EPs needed */
  10092. + ep_count = MAX_INTR_INTERVAL / urb_priv->interval;
  10093. +
  10094. + for(i = 0; i < ep_count; i++) {
  10095. + ep_desc = create_ep(urb_priv->epid, urb_priv->first_sb, mem_flags);
  10096. + if(ep_desc == NULL) {
  10097. + /* Free any descriptors that we may have allocated before failure */
  10098. + while(i > 0) {
  10099. + i--;
  10100. + kfree(urb_priv->intr_ep_pool[i]);
  10101. + }
  10102. + return -ENOMEM;
  10103. + }
  10104. + urb_priv->intr_ep_pool[i] = ep_desc;
  10105. + }
  10106. + urb_priv->intr_ep_pool_length = ep_count;
  10107. + return 0;
  10108. +}
  10109. +
  10110. +/* DMA RX/TX functions */
  10111. +/* ----------------------- */
  10112. +
  10113. +static void tc_dma_init_rx_list(void) {
  10114. + int i;
  10115. +
  10116. + /* Setup descriptor list except last one */
  10117. + for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) {
  10118. + RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
  10119. + RxDescList[i].command = 0;
  10120. + RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]);
  10121. + RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
  10122. + RxDescList[i].hw_len = 0;
  10123. + RxDescList[i].status = 0;
  10124. +
  10125. + /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as
  10126. + USB_IN_Desc for the relevant fields.) */
  10127. + prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]);
  10128. +
  10129. + }
  10130. + /* Special handling of last descriptor */
  10131. + RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
  10132. + RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes);
  10133. + RxDescList[i].next = virt_to_phys(&RxDescList[0]);
  10134. + RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
  10135. + RxDescList[i].hw_len = 0;
  10136. + RxDescList[i].status = 0;
  10137. +
  10138. + /* Setup list pointers that show progress in list */
  10139. + myNextRxDesc = &RxDescList[0];
  10140. + myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
  10141. +
  10142. + flush_etrax_cache();
  10143. + /* Point DMA to first descriptor in list and start it */
  10144. + *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc);
  10145. + *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start);
  10146. +}
  10147. +
  10148. +
  10149. +static void tc_dma_init_tx_bulk_list(void) {
  10150. + int i;
  10151. + volatile struct USB_EP_Desc *epDescr;
  10152. +
  10153. + for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
  10154. + epDescr = &(TxBulkEPList[i]);
  10155. + CHECK_ALIGN(epDescr);
  10156. + epDescr->hw_len = 0;
  10157. + epDescr->command = IO_FIELD(USB_EP_command, epid, i);
  10158. + epDescr->sub = 0;
  10159. + epDescr->next = virt_to_phys(&TxBulkEPList[i + 1]);
  10160. +
  10161. + /* Initiate two EPs, disabled and with the eol flag set. No need for any
  10162. + preserved epid. */
  10163. +
  10164. + /* The first one has the intr flag set so we get an interrupt when the DMA
  10165. + channel is about to become disabled. */
  10166. + CHECK_ALIGN(&TxBulkDummyEPList[i][0]);
  10167. + TxBulkDummyEPList[i][0].hw_len = 0;
  10168. + TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
  10169. + IO_STATE(USB_EP_command, eol, yes) |
  10170. + IO_STATE(USB_EP_command, intr, yes));
  10171. + TxBulkDummyEPList[i][0].sub = 0;
  10172. + TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]);
  10173. +
  10174. + /* The second one. */
  10175. + CHECK_ALIGN(&TxBulkDummyEPList[i][1]);
  10176. + TxBulkDummyEPList[i][1].hw_len = 0;
  10177. + TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
  10178. + IO_STATE(USB_EP_command, eol, yes));
  10179. + TxBulkDummyEPList[i][1].sub = 0;
  10180. + /* The last dummy's next pointer is the same as the current EP's next pointer. */
  10181. + TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]);
  10182. + }
  10183. +
  10184. + /* Special handling of last descr in list, make list circular */
  10185. + epDescr = &TxBulkEPList[i];
  10186. + CHECK_ALIGN(epDescr);
  10187. + epDescr->hw_len = 0;
  10188. + epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
  10189. + IO_FIELD(USB_EP_command, epid, i);
  10190. + epDescr->sub = 0;
  10191. + epDescr->next = virt_to_phys(&TxBulkEPList[0]);
  10192. +
  10193. + /* Init DMA sub-channel pointers to last item in each list */
  10194. + *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]);
  10195. + /* No point in starting the bulk channel yet.
  10196. + *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
  10197. +}
  10198. +
  10199. +static void tc_dma_init_tx_ctrl_list(void) {
  10200. + int i;
  10201. + volatile struct USB_EP_Desc *epDescr;
  10202. +
  10203. + for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
  10204. + epDescr = &(TxCtrlEPList[i]);
  10205. + CHECK_ALIGN(epDescr);
  10206. + epDescr->hw_len = 0;
  10207. + epDescr->command = IO_FIELD(USB_EP_command, epid, i);
  10208. + epDescr->sub = 0;
  10209. + epDescr->next = virt_to_phys(&TxCtrlEPList[i + 1]);
  10210. + }
  10211. + /* Special handling of last descr in list, make list circular */
  10212. + epDescr = &TxCtrlEPList[i];
  10213. + CHECK_ALIGN(epDescr);
  10214. + epDescr->hw_len = 0;
  10215. + epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
  10216. + IO_FIELD(USB_EP_command, epid, i);
  10217. + epDescr->sub = 0;
  10218. + epDescr->next = virt_to_phys(&TxCtrlEPList[0]);
  10219. +
  10220. + /* Init DMA sub-channel pointers to last item in each list */
  10221. + *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[i]);
  10222. + /* No point in starting the ctrl channel yet.
  10223. + *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
  10224. +}
  10225. +
  10226. +
  10227. +static void tc_dma_init_tx_intr_list(void) {
  10228. + int i;
  10229. +
  10230. + TxIntrSB_zout.sw_len = 1;
  10231. + TxIntrSB_zout.next = 0;
  10232. + TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]);
  10233. + TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
  10234. + IO_STATE(USB_SB_command, tt, zout) |
  10235. + IO_STATE(USB_SB_command, full, yes) |
  10236. + IO_STATE(USB_SB_command, eot, yes) |
  10237. + IO_STATE(USB_SB_command, eol, yes));
  10238. +
  10239. + for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) {
  10240. + CHECK_ALIGN(&TxIntrEPList[i]);
  10241. + TxIntrEPList[i].hw_len = 0;
  10242. + TxIntrEPList[i].command =
  10243. + (IO_STATE(USB_EP_command, eof, yes) |
  10244. + IO_STATE(USB_EP_command, enable, yes) |
  10245. + IO_FIELD(USB_EP_command, epid, INVALID_EPID));
  10246. + TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
  10247. + TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]);
  10248. + }
  10249. +
  10250. + /* Special handling of last descr in list, make list circular */
  10251. + CHECK_ALIGN(&TxIntrEPList[i]);
  10252. + TxIntrEPList[i].hw_len = 0;
  10253. + TxIntrEPList[i].command =
  10254. + (IO_STATE(USB_EP_command, eof, yes) |
  10255. + IO_STATE(USB_EP_command, eol, yes) |
  10256. + IO_STATE(USB_EP_command, enable, yes) |
  10257. + IO_FIELD(USB_EP_command, epid, INVALID_EPID));
  10258. + TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
  10259. + TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]);
  10260. +
  10261. + intr_dbg("Initiated Intr EP descriptor list\n");
  10262. +
  10263. +
  10264. + /* Connect DMA 8 sub-channel 2 to first in list */
  10265. + *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]);
  10266. +}
  10267. +
  10268. +static void tc_dma_init_tx_isoc_list(void) {
  10269. + int i;
  10270. +
  10271. + DBFENTER;
  10272. +
  10273. + /* Read comment at zout_buffer declaration for an explanation to this. */
  10274. + TxIsocSB_zout.sw_len = 1;
  10275. + TxIsocSB_zout.next = 0;
  10276. + TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]);
  10277. + TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
  10278. + IO_STATE(USB_SB_command, tt, zout) |
  10279. + IO_STATE(USB_SB_command, full, yes) |
  10280. + IO_STATE(USB_SB_command, eot, yes) |
  10281. + IO_STATE(USB_SB_command, eol, yes));
  10282. +
  10283. + /* The last isochronous EP descriptor is a dummy. */
  10284. + for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
  10285. + CHECK_ALIGN(&TxIsocEPList[i]);
  10286. + TxIsocEPList[i].hw_len = 0;
  10287. + TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
  10288. + TxIsocEPList[i].sub = 0;
  10289. + TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]);
  10290. + }
  10291. +
  10292. + CHECK_ALIGN(&TxIsocEPList[i]);
  10293. + TxIsocEPList[i].hw_len = 0;
  10294. +
  10295. + /* Must enable the last EP descr to get eof interrupt. */
  10296. + TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) |
  10297. + IO_STATE(USB_EP_command, eof, yes) |
  10298. + IO_STATE(USB_EP_command, eol, yes) |
  10299. + IO_FIELD(USB_EP_command, epid, INVALID_EPID));
  10300. + TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout);
  10301. + TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]);
  10302. +
  10303. + *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]);
  10304. + *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
  10305. +}
  10306. +
  10307. +static int tc_dma_init(struct usb_hcd *hcd) {
  10308. + tc_dma_init_rx_list();
  10309. + tc_dma_init_tx_bulk_list();
  10310. + tc_dma_init_tx_ctrl_list();
  10311. + tc_dma_init_tx_intr_list();
  10312. + tc_dma_init_tx_isoc_list();
  10313. +
  10314. + if (cris_request_dma(USB_TX_DMA_NBR,
  10315. + "ETRAX 100LX built-in USB (Tx)",
  10316. + DMA_VERBOSE_ON_ERROR,
  10317. + dma_usb)) {
  10318. + err("Could not allocate DMA ch 8 for USB");
  10319. + return -EBUSY;
  10320. + }
  10321. +
  10322. + if (cris_request_dma(USB_RX_DMA_NBR,
  10323. + "ETRAX 100LX built-in USB (Rx)",
  10324. + DMA_VERBOSE_ON_ERROR,
  10325. + dma_usb)) {
  10326. + err("Could not allocate DMA ch 9 for USB");
  10327. + return -EBUSY;
  10328. + }
  10329. +
  10330. + *R_IRQ_MASK2_SET =
  10331. + /* Note that these interrupts are not used. */
  10332. + IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) |
  10333. + /* Sub channel 1 (ctrl) descr. interrupts are used. */
  10334. + IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) |
  10335. + IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) |
  10336. + /* Sub channel 3 (isoc) descr. interrupts are used. */
  10337. + IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set);
  10338. +
  10339. + /* Note that the dma9_descr interrupt is not used. */
  10340. + *R_IRQ_MASK2_SET =
  10341. + IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) |
  10342. + IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set);
  10343. +
  10344. + if (request_irq(ETRAX_USB_RX_IRQ, tc_dma_rx_interrupt, 0,
  10345. + "ETRAX 100LX built-in USB (Rx)", hcd)) {
  10346. + err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ);
  10347. + return -EBUSY;
  10348. + }
  10349. +
  10350. + if (request_irq(ETRAX_USB_TX_IRQ, tc_dma_tx_interrupt, 0,
  10351. + "ETRAX 100LX built-in USB (Tx)", hcd)) {
  10352. + err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ);
  10353. + return -EBUSY;
  10354. + }
  10355. +
  10356. + return 0;
  10357. +}
  10358. +
  10359. +static void tc_dma_destroy(void) {
  10360. + free_irq(ETRAX_USB_RX_IRQ, NULL);
  10361. + free_irq(ETRAX_USB_TX_IRQ, NULL);
  10362. +
  10363. + cris_free_dma(USB_TX_DMA_NBR, "ETRAX 100LX built-in USB (Tx)");
  10364. + cris_free_dma(USB_RX_DMA_NBR, "ETRAX 100LX built-in USB (Rx)");
  10365. +
  10366. +}
  10367. +
  10368. +static void tc_dma_link_intr_urb(struct urb *urb);
  10369. +
  10370. +/* Handle processing of Bulk, Ctrl and Intr queues */
  10371. +static void tc_dma_process_queue(int epid) {
  10372. + struct urb *urb;
  10373. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  10374. + unsigned long flags;
  10375. + char toggle;
  10376. +
  10377. + if(epid_state[epid].disabled) {
  10378. + /* Don't process any URBs on a disabled endpoint */
  10379. + return;
  10380. + }
  10381. +
  10382. + /* Do not disturb us while fiddling with EPs and epids */
  10383. + local_irq_save(flags);
  10384. +
  10385. + /* For bulk, Ctrl and Intr can we only have one URB active at a time for
  10386. + a specific EP. */
  10387. + if(activeUrbList[epid] != NULL) {
  10388. + /* An URB is already active on EP, skip checking queue */
  10389. + local_irq_restore(flags);
  10390. + return;
  10391. + }
  10392. +
  10393. + urb = urb_list_first(epid);
  10394. + if(urb == NULL) {
  10395. + /* No URB waiting in EP queue. Nothing do to */
  10396. + local_irq_restore(flags);
  10397. + return;
  10398. + }
  10399. +
  10400. + urb_priv = urb->hcpriv;
  10401. + ASSERT(urb_priv != NULL);
  10402. + ASSERT(urb_priv->urb_state == NOT_STARTED);
  10403. + ASSERT(!usb_pipeisoc(urb->pipe));
  10404. +
  10405. + /* Remove this URB from the queue and move it to active */
  10406. + activeUrbList[epid] = urb;
  10407. + urb_list_del(urb, epid);
  10408. +
  10409. + urb_priv->urb_state = STARTED;
  10410. +
  10411. + /* Reset error counters (regardless of which direction this traffic is). */
  10412. + etrax_epid_clear_error(epid);
  10413. +
  10414. + /* Special handling of Intr EP lists */
  10415. + if(usb_pipeint(urb->pipe)) {
  10416. + tc_dma_link_intr_urb(urb);
  10417. + local_irq_restore(flags);
  10418. + return;
  10419. + }
  10420. +
  10421. + /* Software must preset the toggle bits for Bulk and Ctrl */
  10422. + if(usb_pipecontrol(urb->pipe)) {
  10423. + /* Toggle bits are initialized only during setup transaction in a
  10424. + CTRL transfer */
  10425. + etrax_epid_set_toggle(epid, 0, 0);
  10426. + etrax_epid_set_toggle(epid, 1, 0);
  10427. + } else {
  10428. + toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  10429. + usb_pipeout(urb->pipe));
  10430. + etrax_epid_set_toggle(epid, usb_pipeout(urb->pipe), toggle);
  10431. + }
  10432. +
  10433. + tc_dbg("Added SBs from (URB:0x%x %s %s) to epid %d: %s\n",
  10434. + (unsigned int)urb, str_dir(urb->pipe), str_type(urb->pipe), epid,
  10435. + sblist_to_str(urb_priv->first_sb));
  10436. +
  10437. + /* We start the DMA sub channel without checking if it's running or not,
  10438. + because:
  10439. + 1) If it's already running, issuing the start command is a nop.
  10440. + 2) We avoid a test-and-set race condition. */
  10441. + switch(usb_pipetype(urb->pipe)) {
  10442. + case PIPE_BULK:
  10443. + /* Assert that the EP descriptor is disabled. */
  10444. + ASSERT(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
  10445. +
  10446. + /* Set up and enable the EP descriptor. */
  10447. + TxBulkEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
  10448. + TxBulkEPList[epid].hw_len = 0;
  10449. + TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
  10450. +
  10451. + /* Check if the dummy list is already with us (if several urbs were queued). */
  10452. + if (usb_pipein(urb->pipe) && (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0]))) {
  10453. + tc_dbg("Inviting dummy list to the party for urb 0x%lx, epid %d",
  10454. + (unsigned long)urb, epid);
  10455. +
  10456. + /* We don't need to check if the DMA is at this EP or not before changing the
  10457. + next pointer, since we will do it in one 32-bit write (EP descriptors are
  10458. + 32-bit aligned). */
  10459. + TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]);
  10460. + }
  10461. +
  10462. + restart_dma8_sub0();
  10463. +
  10464. + /* Update/restart the bulk start timer since we just started the channel.*/
  10465. + mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
  10466. + /* Update/restart the bulk eot timer since we just inserted traffic. */
  10467. + mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
  10468. + break;
  10469. + case PIPE_CONTROL:
  10470. + /* Assert that the EP descriptor is disabled. */
  10471. + ASSERT(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
  10472. +
  10473. + /* Set up and enable the EP descriptor. */
  10474. + TxCtrlEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
  10475. + TxCtrlEPList[epid].hw_len = 0;
  10476. + TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
  10477. +
  10478. + *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
  10479. + break;
  10480. + }
  10481. + local_irq_restore(flags);
  10482. +}
  10483. +
  10484. +static void tc_dma_link_intr_urb(struct urb *urb) {
  10485. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  10486. + volatile struct USB_EP_Desc *tmp_ep;
  10487. + struct USB_EP_Desc *ep_desc;
  10488. + int i = 0, epid;
  10489. + int pool_idx = 0;
  10490. +
  10491. + ASSERT(urb_priv != NULL);
  10492. + epid = urb_priv->epid;
  10493. + ASSERT(urb_priv->interval > 0);
  10494. + ASSERT(urb_priv->intr_ep_pool_length > 0);
  10495. +
  10496. + tmp_ep = &TxIntrEPList[0];
  10497. +
  10498. + /* Only insert one EP descriptor in list for Out Intr URBs.
  10499. + We can only handle Out Intr with interval of 128ms because
  10500. + it's not possible to insert several Out Intr EPs because they
  10501. + are not consumed by the DMA. */
  10502. + if(usb_pipeout(urb->pipe)) {
  10503. + ep_desc = urb_priv->intr_ep_pool[0];
  10504. + ASSERT(ep_desc);
  10505. + ep_desc->next = tmp_ep->next;
  10506. + tmp_ep->next = virt_to_phys(ep_desc);
  10507. + i++;
  10508. + } else {
  10509. + /* Loop through Intr EP descriptor list and insert EP for URB at
  10510. + specified interval */
  10511. + do {
  10512. + /* Each EP descriptor with eof flag sat signals a new frame */
  10513. + if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) {
  10514. + /* Insert a EP from URBs EP pool at correct interval */
  10515. + if ((i % urb_priv->interval) == 0) {
  10516. + ep_desc = urb_priv->intr_ep_pool[pool_idx];
  10517. + ASSERT(ep_desc);
  10518. + ep_desc->next = tmp_ep->next;
  10519. + tmp_ep->next = virt_to_phys(ep_desc);
  10520. + pool_idx++;
  10521. + ASSERT(pool_idx <= urb_priv->intr_ep_pool_length);
  10522. + }
  10523. + i++;
  10524. + }
  10525. + tmp_ep = (struct USB_EP_Desc *)phys_to_virt(tmp_ep->next);
  10526. + } while(tmp_ep != &TxIntrEPList[0]);
  10527. + }
  10528. +
  10529. + intr_dbg("Added SBs to intr epid %d: %s interval:%d (%d EP)\n", epid,
  10530. + sblist_to_str(urb_priv->first_sb), urb_priv->interval, pool_idx);
  10531. +
  10532. + /* We start the DMA sub channel without checking if it's running or not,
  10533. + because:
  10534. + 1) If it's already running, issuing the start command is a nop.
  10535. + 2) We avoid a test-and-set race condition. */
  10536. + *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
  10537. +}
  10538. +
  10539. + /* hinko ignore usb_pipeisoc */
  10540. +#if 0
  10541. +static void tc_dma_process_isoc_urb(struct urb *urb) {
  10542. + unsigned long flags;
  10543. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  10544. + int epid;
  10545. +
  10546. + /* Do not disturb us while fiddling with EPs and epids */
  10547. + local_irq_save(flags);
  10548. +
  10549. + ASSERT(urb_priv);
  10550. + ASSERT(urb_priv->first_sb);
  10551. + epid = urb_priv->epid;
  10552. +
  10553. + if(activeUrbList[epid] == NULL) {
  10554. + /* EP is idle, so make this URB active */
  10555. + activeUrbList[epid] = urb;
  10556. + urb_list_del(urb, epid);
  10557. + ASSERT(TxIsocEPList[epid].sub == 0);
  10558. + ASSERT(!(TxIsocEPList[epid].command &
  10559. + IO_STATE(USB_EP_command, enable, yes)));
  10560. +
  10561. + /* Differentiate between In and Out Isoc. Because In SBs are not consumed*/
  10562. + if(usb_pipein(urb->pipe)) {
  10563. + /* Each EP for In Isoc will have only one SB descriptor, setup when
  10564. + submitting the first active urb. We do it here by copying from URBs
  10565. + pre-allocated SB. */
  10566. + memcpy((void *)&(TxIsocSBList[epid]), urb_priv->first_sb,
  10567. + sizeof(TxIsocSBList[epid]));
  10568. + TxIsocEPList[epid].hw_len = 0;
  10569. + TxIsocEPList[epid].sub = virt_to_phys(&(TxIsocSBList[epid]));
  10570. + } else {
  10571. + /* For Out Isoc we attach the pre-allocated list of SBs for the URB */
  10572. + TxIsocEPList[epid].hw_len = 0;
  10573. + TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
  10574. +
  10575. + isoc_dbg("Attached first URB:0x%x[%d] to epid:%d first_sb:0x%x"
  10576. + " last_sb::0x%x\n",
  10577. + (unsigned int)urb, urb_priv->urb_num, epid,
  10578. + (unsigned int)(urb_priv->first_sb),
  10579. + (unsigned int)(urb_priv->last_sb));
  10580. + }
  10581. +
  10582. + if (urb->transfer_flags & URB_ISO_ASAP) {
  10583. + /* The isoc transfer should be started as soon as possible. The
  10584. + start_frame field is a return value if URB_ISO_ASAP was set. Comparing
  10585. + R_USB_FM_NUMBER with a USB Chief trace shows that the first isoc IN
  10586. + token is sent 2 frames later. I'm not sure how this affects usage of
  10587. + the start_frame field by the device driver, or how it affects things
  10588. + when USB_ISO_ASAP is not set, so therefore there's no compensation for
  10589. + the 2 frame "lag" here. */
  10590. + urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
  10591. + TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
  10592. + urb_priv->urb_state = STARTED;
  10593. + isoc_dbg("URB_ISO_ASAP set, urb->start_frame set to %d\n",
  10594. + urb->start_frame);
  10595. + } else {
  10596. + /* Not started yet. */
  10597. + urb_priv->urb_state = NOT_STARTED;
  10598. + isoc_warn("urb_priv->urb_state set to NOT_STARTED for URB:0x%x\n",
  10599. + (unsigned int)urb);
  10600. + }
  10601. +
  10602. + } else {
  10603. + /* An URB is already active on the EP. Leave URB in queue and let
  10604. + finish_isoc_urb process it after current active URB */
  10605. + ASSERT(TxIsocEPList[epid].sub != 0);
  10606. +
  10607. + if(usb_pipein(urb->pipe)) {
  10608. + /* Because there already is a active In URB on this epid we do nothing
  10609. + and the finish_isoc_urb() function will handle switching to next URB*/
  10610. +
  10611. + } else { /* For Out Isoc, insert new URBs traffic last in SB-list. */
  10612. + struct USB_SB_Desc *temp_sb_desc;
  10613. +
  10614. + /* Set state STARTED to all Out Isoc URBs added to SB list because we
  10615. + don't know how many of them that are finished before descr interrupt*/
  10616. + urb_priv->urb_state = STARTED;
  10617. +
  10618. + /* Find end of current SB list by looking for SB with eol flag sat */
  10619. + temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
  10620. + while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) !=
  10621. + IO_STATE(USB_SB_command, eol, yes)) {
  10622. + ASSERT(temp_sb_desc->next);
  10623. + temp_sb_desc = phys_to_virt(temp_sb_desc->next);
  10624. + }
  10625. +
  10626. + isoc_dbg("Appended URB:0x%x[%d] (first:0x%x last:0x%x) to epid:%d"
  10627. + " sub:0x%x eol:0x%x\n",
  10628. + (unsigned int)urb, urb_priv->urb_num,
  10629. + (unsigned int)(urb_priv->first_sb),
  10630. + (unsigned int)(urb_priv->last_sb), epid,
  10631. + (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
  10632. + (unsigned int)temp_sb_desc);
  10633. +
  10634. + /* Next pointer must be set before eol is removed. */
  10635. + temp_sb_desc->next = virt_to_phys(urb_priv->first_sb);
  10636. + /* Clear the previous end of list flag since there is a new in the
  10637. + added SB descriptor list. */
  10638. + temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol);
  10639. +
  10640. + if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
  10641. + __u32 epid_data;
  10642. + /* 8.8.5 in Designer's Reference says we should check for and correct
  10643. + any errors in the EP here. That should not be necessary if
  10644. + epid_attn is handled correctly, so we assume all is ok. */
  10645. + epid_data = etrax_epid_iso_get(epid);
  10646. + if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) !=
  10647. + IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
  10648. + isoc_err("Disabled Isoc EP with error:%d on epid:%d when appending"
  10649. + " URB:0x%x[%d]\n",
  10650. + IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data), epid,
  10651. + (unsigned int)urb, urb_priv->urb_num);
  10652. + }
  10653. +
  10654. + /* The SB list was exhausted. */
  10655. + if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) {
  10656. + /* The new sublist did not get processed before the EP was
  10657. + disabled. Setup the EP again. */
  10658. +
  10659. + if(virt_to_phys(temp_sb_desc) == TxIsocEPList[epid].sub) {
  10660. + isoc_dbg("EP for epid:%d stoped at SB:0x%x before newly inserted"
  10661. + ", restarting from this URBs SB:0x%x\n",
  10662. + epid, (unsigned int)temp_sb_desc,
  10663. + (unsigned int)(urb_priv->first_sb));
  10664. + TxIsocEPList[epid].hw_len = 0;
  10665. + TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
  10666. + urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
  10667. + /* Enable the EP again so data gets processed this time */
  10668. + TxIsocEPList[epid].command |=
  10669. + IO_STATE(USB_EP_command, enable, yes);
  10670. +
  10671. + } else {
  10672. + /* The EP has been disabled but not at end this URB (god knows
  10673. + where). This should generate an epid_attn so we should not be
  10674. + here */
  10675. + isoc_warn("EP was disabled on sb:0x%x before SB list for"
  10676. + " URB:0x%x[%d] got processed\n",
  10677. + (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
  10678. + (unsigned int)urb, urb_priv->urb_num);
  10679. + }
  10680. + } else {
  10681. + /* This might happend if we are slow on this function and isn't
  10682. + an error. */
  10683. + isoc_dbg("EP was disabled and finished with SBs from appended"
  10684. + " URB:0x%x[%d]\n", (unsigned int)urb, urb_priv->urb_num);
  10685. + }
  10686. + }
  10687. + }
  10688. + }
  10689. +
  10690. + /* Start the DMA sub channel */
  10691. + *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
  10692. +
  10693. + local_irq_restore(flags);
  10694. +}
  10695. +#endif
  10696. +
  10697. +static void tc_dma_unlink_intr_urb(struct urb *urb) {
  10698. + struct crisv10_urb_priv *urb_priv = urb->hcpriv;
  10699. + volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
  10700. + volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
  10701. + volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
  10702. + volatile struct USB_EP_Desc *unlink_ep; /* The one we should remove from
  10703. + the list. */
  10704. + int count = 0;
  10705. + volatile int timeout = 10000;
  10706. + int epid;
  10707. +
  10708. + /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the
  10709. + List". */
  10710. + ASSERT(urb_priv);
  10711. + ASSERT(urb_priv->intr_ep_pool_length > 0);
  10712. + epid = urb_priv->epid;
  10713. +
  10714. + /* First disable all Intr EPs belonging to epid for this URB */
  10715. + first_ep = &TxIntrEPList[0];
  10716. + curr_ep = first_ep;
  10717. + do {
  10718. + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
  10719. + if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
  10720. + /* Disable EP */
  10721. + next_ep->command &= ~IO_MASK(USB_EP_command, enable);
  10722. + }
  10723. + curr_ep = phys_to_virt(curr_ep->next);
  10724. + } while (curr_ep != first_ep);
  10725. +
  10726. +
  10727. + /* Now unlink all EPs belonging to this epid from Descr list */
  10728. + first_ep = &TxIntrEPList[0];
  10729. + curr_ep = first_ep;
  10730. + do {
  10731. + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
  10732. + if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
  10733. + /* This is the one we should unlink. */
  10734. + unlink_ep = next_ep;
  10735. +
  10736. + /* Actually unlink the EP from the DMA list. */
  10737. + curr_ep->next = unlink_ep->next;
  10738. +
  10739. + /* Wait until the DMA is no longer at this descriptor. */
  10740. + while((*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep)) &&
  10741. + (timeout-- > 0));
  10742. + if(timeout == 0) {
  10743. + warn("Timeout while waiting for DMA-TX-Intr to leave unlink EP\n");
  10744. + }
  10745. +
  10746. + count++;
  10747. + }
  10748. + curr_ep = phys_to_virt(curr_ep->next);
  10749. + } while (curr_ep != first_ep);
  10750. +
  10751. + if(count != urb_priv->intr_ep_pool_length) {
  10752. + intr_warn("Unlinked %d of %d Intr EPs for URB:0x%x[%d]\n", count,
  10753. + urb_priv->intr_ep_pool_length, (unsigned int)urb,
  10754. + urb_priv->urb_num);
  10755. + } else {
  10756. + intr_dbg("Unlinked %d of %d interrupt EPs for URB:0x%x\n", count,
  10757. + urb_priv->intr_ep_pool_length, (unsigned int)urb);
  10758. + }
  10759. +}
  10760. +
  10761. +static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
  10762. + int timer) {
  10763. + unsigned long flags;
  10764. + int epid;
  10765. + struct urb *urb;
  10766. + struct crisv10_urb_priv * urb_priv;
  10767. + __u32 epid_data;
  10768. +
  10769. + /* Protect TxEPList */
  10770. + local_irq_save(flags);
  10771. +
  10772. + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
  10773. + /* A finished EP descriptor is disabled and has a valid sub pointer */
  10774. + if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
  10775. + (TxBulkEPList[epid].sub != 0)) {
  10776. +
  10777. + /* Get the active URB for this epid */
  10778. + urb = activeUrbList[epid];
  10779. + /* Sanity checks */
  10780. + ASSERT(urb);
  10781. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  10782. + ASSERT(urb_priv);
  10783. +
  10784. + /* Only handle finished out Bulk EPs here,
  10785. + and let RX interrupt take care of the rest */
  10786. + if(!epid_out_traffic(epid)) {
  10787. + continue;
  10788. + }
  10789. +
  10790. + if(timer) {
  10791. + tc_warn("Found finished %s Bulk epid:%d URB:0x%x[%d] from timeout\n",
  10792. + epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
  10793. + urb_priv->urb_num);
  10794. + } else {
  10795. + tc_dbg("Found finished %s Bulk epid:%d URB:0x%x[%d] from interrupt\n",
  10796. + epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
  10797. + urb_priv->urb_num);
  10798. + }
  10799. +
  10800. + if(urb_priv->urb_state == UNLINK) {
  10801. + /* This Bulk URB is requested to be unlinked, that means that the EP
  10802. + has been disabled and we might not have sent all data */
  10803. + tc_finish_urb(hcd, urb, urb->status);
  10804. + continue;
  10805. + }
  10806. +
  10807. + ASSERT(urb_priv->urb_state == STARTED);
  10808. + if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) {
  10809. + tc_err("Endpoint got disabled before reaching last sb\n");
  10810. + }
  10811. +
  10812. + epid_data = etrax_epid_get(epid);
  10813. + if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
  10814. + IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
  10815. + /* This means that the endpoint has no error, is disabled
  10816. + and had inserted traffic, i.e. transfer successfully completed. */
  10817. + tc_finish_urb(hcd, urb, 0);
  10818. + } else {
  10819. + /* Shouldn't happen. We expect errors to be caught by epid
  10820. + attention. */
  10821. + tc_err("Found disabled bulk EP desc (epid:%d error:%d)\n",
  10822. + epid, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
  10823. + }
  10824. + } else {
  10825. + tc_dbg("Ignoring In Bulk epid:%d, let RX interrupt handle it\n", epid);
  10826. + }
  10827. + }
  10828. +
  10829. + local_irq_restore(flags);
  10830. +}
  10831. +
  10832. +static void check_finished_ctrl_tx_epids(struct usb_hcd *hcd) {
  10833. + unsigned long flags;
  10834. + int epid;
  10835. + struct urb *urb;
  10836. + struct crisv10_urb_priv * urb_priv;
  10837. + __u32 epid_data;
  10838. +
  10839. + /* Protect TxEPList */
  10840. + local_irq_save(flags);
  10841. +
  10842. + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
  10843. + if(epid == DUMMY_EPID)
  10844. + continue;
  10845. +
  10846. + /* A finished EP descriptor is disabled and has a valid sub pointer */
  10847. + if (!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
  10848. + (TxCtrlEPList[epid].sub != 0)) {
  10849. +
  10850. + /* Get the active URB for this epid */
  10851. + urb = activeUrbList[epid];
  10852. +
  10853. + if(urb == NULL) {
  10854. + tc_warn("Found finished Ctrl epid:%d with no active URB\n", epid);
  10855. + continue;
  10856. + }
  10857. +
  10858. + /* Sanity checks */
  10859. + ASSERT(usb_pipein(urb->pipe));
  10860. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  10861. + ASSERT(urb_priv);
  10862. + if (phys_to_virt(TxCtrlEPList[epid].sub) != urb_priv->last_sb) {
  10863. + tc_err("Endpoint got disabled before reaching last sb\n");
  10864. + }
  10865. +
  10866. + epid_data = etrax_epid_get(epid);
  10867. + if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
  10868. + IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
  10869. + /* This means that the endpoint has no error, is disabled
  10870. + and had inserted traffic, i.e. transfer successfully completed. */
  10871. +
  10872. + /* Check if RX-interrupt for In Ctrl has been processed before
  10873. + finishing the URB */
  10874. + if(urb_priv->ctrl_rx_done) {
  10875. + tc_dbg("Finishing In Ctrl URB:0x%x[%d] in tx_interrupt\n",
  10876. + (unsigned int)urb, urb_priv->urb_num);
  10877. + tc_finish_urb(hcd, urb, 0);
  10878. + } else {
  10879. + /* If we get zout descriptor interrupt before RX was done for a
  10880. + In Ctrl transfer, then we flag that and it will be finished
  10881. + in the RX-Interrupt */
  10882. + urb_priv->ctrl_zout_done = 1;
  10883. + tc_dbg("Got zout descr interrupt before RX interrupt\n");
  10884. + }
  10885. + } else {
  10886. + /* Shouldn't happen. We expect errors to be caught by epid
  10887. + attention. */
  10888. + tc_err("Found disabled Ctrl EP desc (epid:%d URB:0x%x[%d]) error_code:%d\n", epid, (unsigned int)urb, urb_priv->urb_num, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
  10889. + __dump_ep_desc(&(TxCtrlEPList[epid]));
  10890. + __dump_ept_data(epid);
  10891. + }
  10892. + }
  10893. + }
  10894. + local_irq_restore(flags);
  10895. +}
  10896. +
  10897. + /* hinko ignore usb_pipeisoc */
  10898. +#if 0
  10899. +/* This function goes through all epids that are setup for Out Isoc transfers
  10900. + and marks (isoc_out_done) all queued URBs that the DMA has finished
  10901. + transfer for.
  10902. + No URB completetion is done here to make interrupt routine return quickly.
  10903. + URBs are completed later with help of complete_isoc_bottom_half() that
  10904. + becomes schedules when this functions is finished. */
  10905. +static void check_finished_isoc_tx_epids(void) {
  10906. + unsigned long flags;
  10907. + int epid;
  10908. + struct urb *urb;
  10909. + struct crisv10_urb_priv * urb_priv;
  10910. + struct USB_SB_Desc* sb_desc;
  10911. + int epid_done;
  10912. +
  10913. + /* Protect TxIsocEPList */
  10914. + local_irq_save(flags);
  10915. +
  10916. + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
  10917. + if (TxIsocEPList[epid].sub == 0 || epid == INVALID_EPID ||
  10918. + !epid_out_traffic(epid)) {
  10919. + /* Nothing here to see. */
  10920. + continue;
  10921. + }
  10922. + ASSERT(epid_inuse(epid));
  10923. + ASSERT(epid_isoc(epid));
  10924. +
  10925. + sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
  10926. + /* Find the last descriptor of the currently active URB for this ep.
  10927. + This is the first descriptor in the sub list marked for a descriptor
  10928. + interrupt. */
  10929. + while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) {
  10930. + sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0;
  10931. + }
  10932. + ASSERT(sb_desc);
  10933. +
  10934. + isoc_dbg("Descr IRQ checking epid:%d sub:0x%x intr:0x%x\n",
  10935. + epid, (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
  10936. + (unsigned int)sb_desc);
  10937. +
  10938. + urb = activeUrbList[epid];
  10939. + if(urb == NULL) {
  10940. + isoc_err("Isoc Descr irq on epid:%d with no active URB\n", epid);
  10941. + continue;
  10942. + }
  10943. +
  10944. + epid_done = 0;
  10945. + while(urb && !epid_done) {
  10946. + /* Sanity check. */
  10947. + ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
  10948. + ASSERT(usb_pipeout(urb->pipe));
  10949. +
  10950. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  10951. + ASSERT(urb_priv);
  10952. + ASSERT(urb_priv->urb_state == STARTED ||
  10953. + urb_priv->urb_state == UNLINK);
  10954. +
  10955. + if (sb_desc != urb_priv->last_sb) {
  10956. + /* This urb has been sent. */
  10957. + urb_priv->isoc_out_done = 1;
  10958. +
  10959. + } else { /* Found URB that has last_sb as the interrupt reason */
  10960. +
  10961. + /* Check if EP has been disabled, meaning that all transfers are done*/
  10962. + if(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
  10963. + ASSERT((sb_desc->command & IO_MASK(USB_SB_command, eol)) ==
  10964. + IO_STATE(USB_SB_command, eol, yes));
  10965. + ASSERT(sb_desc->next == 0);
  10966. + urb_priv->isoc_out_done = 1;
  10967. + } else {
  10968. + isoc_dbg("Skipping URB:0x%x[%d] because EP not disabled yet\n",
  10969. + (unsigned int)urb, urb_priv->urb_num);
  10970. + }
  10971. + /* Stop looking any further in queue */
  10972. + epid_done = 1;
  10973. + }
  10974. +
  10975. + if (!epid_done) {
  10976. + if(urb == activeUrbList[epid]) {
  10977. + urb = urb_list_first(epid);
  10978. + } else {
  10979. + urb = urb_list_next(urb, epid);
  10980. + }
  10981. + }
  10982. + } /* END: while(urb && !epid_done) */
  10983. + }
  10984. +
  10985. + local_irq_restore(flags);
  10986. +}
  10987. +
  10988. +
  10989. +/* This is where the Out Isoc URBs are realy completed. This function is
  10990. + scheduled from tc_dma_tx_interrupt() when one or more Out Isoc transfers
  10991. + are done. This functions completes all URBs earlier marked with
  10992. + isoc_out_done by fast interrupt routine check_finished_isoc_tx_epids() */
  10993. +
  10994. +static void complete_isoc_bottom_half(void *data) {
  10995. + struct crisv10_isoc_complete_data *comp_data;
  10996. + struct usb_iso_packet_descriptor *packet;
  10997. + struct crisv10_urb_priv * urb_priv;
  10998. + unsigned long flags;
  10999. + struct urb* urb;
  11000. + int epid_done;
  11001. + int epid;
  11002. + int i;
  11003. +
  11004. + comp_data = (struct crisv10_isoc_complete_data*)data;
  11005. +
  11006. + local_irq_save(flags);
  11007. +
  11008. + for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
  11009. + if(!epid_inuse(epid) || !epid_isoc(epid) || !epid_out_traffic(epid) || epid == DUMMY_EPID) {
  11010. + /* Only check valid Out Isoc epids */
  11011. + continue;
  11012. + }
  11013. +
  11014. + isoc_dbg("Isoc bottom-half checking epid:%d, sub:0x%x\n", epid,
  11015. + (unsigned int)phys_to_virt(TxIsocEPList[epid].sub));
  11016. +
  11017. + /* The descriptor interrupt handler has marked all transmitted Out Isoc
  11018. + URBs with isoc_out_done. Now we traverse all epids and for all that
  11019. + have out Isoc traffic we traverse its URB list and complete the
  11020. + transmitted URBs. */
  11021. + epid_done = 0;
  11022. + while (!epid_done) {
  11023. +
  11024. + /* Get the active urb (if any) */
  11025. + urb = activeUrbList[epid];
  11026. + if (urb == 0) {
  11027. + isoc_dbg("No active URB on epid:%d anymore\n", epid);
  11028. + epid_done = 1;
  11029. + continue;
  11030. + }
  11031. +
  11032. + /* Sanity check. */
  11033. + ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
  11034. + ASSERT(usb_pipeout(urb->pipe));
  11035. +
  11036. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  11037. + ASSERT(urb_priv);
  11038. +
  11039. + if (!(urb_priv->isoc_out_done)) {
  11040. + /* We have reached URB that isn't flaged done yet, stop traversing. */
  11041. + isoc_dbg("Stoped traversing Out Isoc URBs on epid:%d"
  11042. + " before not yet flaged URB:0x%x[%d]\n",
  11043. + epid, (unsigned int)urb, urb_priv->urb_num);
  11044. + epid_done = 1;
  11045. + continue;
  11046. + }
  11047. +
  11048. + /* This urb has been sent. */
  11049. + isoc_dbg("Found URB:0x%x[%d] that is flaged isoc_out_done\n",
  11050. + (unsigned int)urb, urb_priv->urb_num);
  11051. +
  11052. + /* Set ok on transfered packets for this URB and finish it */
  11053. + for (i = 0; i < urb->number_of_packets; i++) {
  11054. + packet = &urb->iso_frame_desc[i];
  11055. + packet->status = 0;
  11056. + packet->actual_length = packet->length;
  11057. + }
  11058. + urb_priv->isoc_packet_counter = urb->number_of_packets;
  11059. + tc_finish_urb(comp_data->hcd, urb, 0);
  11060. +
  11061. + } /* END: while(!epid_done) */
  11062. + } /* END: for(epid...) */
  11063. +
  11064. + local_irq_restore(flags);
  11065. + kmem_cache_free(isoc_compl_cache, comp_data);
  11066. +}
  11067. +#endif
  11068. +
  11069. +static void check_finished_intr_tx_epids(struct usb_hcd *hcd) {
  11070. + unsigned long flags;
  11071. + int epid;
  11072. + struct urb *urb;
  11073. + struct crisv10_urb_priv * urb_priv;
  11074. + volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
  11075. + volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
  11076. +
  11077. + /* Protect TxintrEPList */
  11078. + local_irq_save(flags);
  11079. +
  11080. + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
  11081. + if(!epid_inuse(epid) || !epid_intr(epid) || !epid_out_traffic(epid)) {
  11082. + /* Nothing to see on this epid. Only check valid Out Intr epids */
  11083. + continue;
  11084. + }
  11085. +
  11086. + urb = activeUrbList[epid];
  11087. + if(urb == 0) {
  11088. + intr_warn("Found Out Intr epid:%d with no active URB\n", epid);
  11089. + continue;
  11090. + }
  11091. +
  11092. + /* Sanity check. */
  11093. + ASSERT(usb_pipetype(urb->pipe) == PIPE_INTERRUPT);
  11094. + ASSERT(usb_pipeout(urb->pipe));
  11095. +
  11096. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  11097. + ASSERT(urb_priv);
  11098. +
  11099. + /* Go through EPs between first and second sof-EP. It's here Out Intr EPs
  11100. + are inserted.*/
  11101. + curr_ep = &TxIntrEPList[0];
  11102. + do {
  11103. + next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
  11104. + if(next_ep == urb_priv->intr_ep_pool[0]) {
  11105. + /* We found the Out Intr EP for this epid */
  11106. +
  11107. + /* Disable it so it doesn't get processed again */
  11108. + next_ep->command &= ~IO_MASK(USB_EP_command, enable);
  11109. +
  11110. + /* Finish the active Out Intr URB with status OK */
  11111. + tc_finish_urb(hcd, urb, 0);
  11112. + }
  11113. + curr_ep = phys_to_virt(curr_ep->next);
  11114. + } while (curr_ep != &TxIntrEPList[1]);
  11115. +
  11116. + }
  11117. + local_irq_restore(flags);
  11118. +}
  11119. +
  11120. +/* Interrupt handler for DMA8/IRQ24 with subchannels (called from hardware intr) */
  11121. +static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc) {
  11122. + struct usb_hcd *hcd = (struct usb_hcd*)vhc;
  11123. + ASSERT(hcd);
  11124. +
  11125. + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) {
  11126. + /* Clear this interrupt */
  11127. + *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do);
  11128. + restart_dma8_sub0();
  11129. + }
  11130. +
  11131. + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) {
  11132. + /* Clear this interrupt */
  11133. + *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do);
  11134. + check_finished_ctrl_tx_epids(hcd);
  11135. + }
  11136. +
  11137. + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) {
  11138. + /* Clear this interrupt */
  11139. + *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do);
  11140. + check_finished_intr_tx_epids(hcd);
  11141. + }
  11142. +
  11143. + /* hinko ignore usb_pipeisoc */
  11144. +#if 0
  11145. + if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) {
  11146. + struct crisv10_isoc_complete_data* comp_data;
  11147. +
  11148. + /* Flag done Out Isoc for later completion */
  11149. + check_finished_isoc_tx_epids();
  11150. +
  11151. + /* Clear this interrupt */
  11152. + *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
  11153. + /* Schedule bottom half of Out Isoc completion function. This function
  11154. + finishes the URBs marked with isoc_out_done */
  11155. + comp_data = (struct crisv10_isoc_complete_data*)
  11156. + kmem_cache_alloc(isoc_compl_cache, GFP_ATOMIC);
  11157. + ASSERT(comp_data != NULL);
  11158. + comp_data ->hcd = hcd;
  11159. +
  11160. + //INIT_WORK(&comp_data->usb_bh, complete_isoc_bottom_half, comp_data);
  11161. + INIT_WORK(&comp_data->usb_bh, complete_isoc_bottom_half);
  11162. + schedule_work(&comp_data->usb_bh);
  11163. + }
  11164. +#endif
  11165. +
  11166. + return IRQ_HANDLED;
  11167. +}
  11168. +
  11169. +/* Interrupt handler for DMA9/IRQ25 (called from hardware intr) */
  11170. +static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc) {
  11171. + unsigned long flags;
  11172. + struct urb *urb;
  11173. + struct usb_hcd *hcd = (struct usb_hcd*)vhc;
  11174. + struct crisv10_urb_priv *urb_priv;
  11175. + int epid = 0;
  11176. + int real_error;
  11177. +
  11178. + ASSERT(hcd);
  11179. +
  11180. + /* Clear this interrupt. */
  11181. + *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do);
  11182. +
  11183. + /* Custom clear interrupt for this interrupt */
  11184. + /* The reason we cli here is that we call the driver's callback functions. */
  11185. + local_irq_save(flags);
  11186. +
  11187. + /* Note that this while loop assumes that all packets span only
  11188. + one rx descriptor. */
  11189. + while(myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) {
  11190. + epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status);
  11191. + /* Get the active URB for this epid */
  11192. + urb = activeUrbList[epid];
  11193. +
  11194. + ASSERT(epid_inuse(epid));
  11195. + if (!urb) {
  11196. + dma_err("No urb for epid %d in rx interrupt\n", epid);
  11197. + goto skip_out;
  11198. + }
  11199. +
  11200. + /* Check if any errors on epid */
  11201. + real_error = 0;
  11202. + if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) {
  11203. + __u32 r_usb_ept_data;
  11204. +
  11205. + if (usb_pipeisoc(urb->pipe)) {
  11206. + r_usb_ept_data = etrax_epid_iso_get(epid);
  11207. + if((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) &&
  11208. + (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) &&
  11209. + (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) {
  11210. + /* Not an error, just a failure to receive an expected iso
  11211. + in packet in this frame. This is not documented
  11212. + in the designers reference. Continue processing.
  11213. + */
  11214. + } else real_error = 1;
  11215. + } else real_error = 1;
  11216. + }
  11217. +
  11218. + if(real_error) {
  11219. + dma_err("Error in RX descr on epid:%d for URB 0x%x",
  11220. + epid, (unsigned int)urb);
  11221. + dump_ept_data(epid);
  11222. + dump_in_desc(myNextRxDesc);
  11223. + goto skip_out;
  11224. + }
  11225. +
  11226. + urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
  11227. + ASSERT(urb_priv);
  11228. + ASSERT(urb_priv->urb_state == STARTED ||
  11229. + urb_priv->urb_state == UNLINK);
  11230. +
  11231. + if ((usb_pipetype(urb->pipe) == PIPE_BULK) ||
  11232. + (usb_pipetype(urb->pipe) == PIPE_CONTROL) ||
  11233. + (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
  11234. +
  11235. + /* We get nodata for empty data transactions, and the rx descriptor's
  11236. + hw_len field is not valid in that case. No data to copy in other
  11237. + words. */
  11238. + if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
  11239. + /* No data to copy */
  11240. + } else {
  11241. + /*
  11242. + dma_dbg("Processing RX for URB:0x%x epid:%d (data:%d ofs:%d)\n",
  11243. + (unsigned int)urb, epid, myNextRxDesc->hw_len,
  11244. + urb_priv->rx_offset);
  11245. + */
  11246. + /* Only copy data if URB isn't flaged to be unlinked*/
  11247. + if(urb_priv->urb_state != UNLINK) {
  11248. + /* Make sure the data fits in the buffer. */
  11249. + if(urb_priv->rx_offset + myNextRxDesc->hw_len
  11250. + <= urb->transfer_buffer_length) {
  11251. +
  11252. + /* Copy the data to URBs buffer */
  11253. + memcpy(urb->transfer_buffer + urb_priv->rx_offset,
  11254. + phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len);
  11255. + urb_priv->rx_offset += myNextRxDesc->hw_len;
  11256. + } else {
  11257. + /* Signal overflow when returning URB */
  11258. + urb->status = -EOVERFLOW;
  11259. + tc_finish_urb_later(hcd, urb, urb->status);
  11260. + }
  11261. + }
  11262. + }
  11263. +
  11264. + /* Check if it was the last packet in the transfer */
  11265. + if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) {
  11266. + /* Special handling for In Ctrl URBs. */
  11267. + if(usb_pipecontrol(urb->pipe) && usb_pipein(urb->pipe) &&
  11268. + !(urb_priv->ctrl_zout_done)) {
  11269. + /* Flag that RX part of Ctrl transfer is done. Because zout descr
  11270. + interrupt hasn't happend yet will the URB be finished in the
  11271. + TX-Interrupt. */
  11272. + urb_priv->ctrl_rx_done = 1;
  11273. + tc_dbg("Not finishing In Ctrl URB:0x%x from rx_interrupt, waiting"
  11274. + " for zout\n", (unsigned int)urb);
  11275. + } else {
  11276. + tc_finish_urb(hcd, urb, 0);
  11277. + }
  11278. + }
  11279. + } else { /* ISOC RX */
  11280. + /*
  11281. + isoc_dbg("Processing RX for epid:%d (URB:0x%x) ISOC pipe\n",
  11282. + epid, (unsigned int)urb);
  11283. + */
  11284. +
  11285. + struct usb_iso_packet_descriptor *packet;
  11286. +
  11287. + if (urb_priv->urb_state == UNLINK) {
  11288. + isoc_warn("Ignoring Isoc Rx data for urb being unlinked.\n");
  11289. + goto skip_out;
  11290. + } else if (urb_priv->urb_state == NOT_STARTED) {
  11291. + isoc_err("What? Got Rx data for Isoc urb that isn't started?\n");
  11292. + goto skip_out;
  11293. + }
  11294. +
  11295. + packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter];
  11296. + ASSERT(packet);
  11297. + packet->status = 0;
  11298. +
  11299. + if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
  11300. + /* We get nodata for empty data transactions, and the rx descriptor's
  11301. + hw_len field is not valid in that case. We copy 0 bytes however to
  11302. + stay in synch. */
  11303. + packet->actual_length = 0;
  11304. + } else {
  11305. + packet->actual_length = myNextRxDesc->hw_len;
  11306. + /* Make sure the data fits in the buffer. */
  11307. + ASSERT(packet->actual_length <= packet->length);
  11308. + memcpy(urb->transfer_buffer + packet->offset,
  11309. + phys_to_virt(myNextRxDesc->buf), packet->actual_length);
  11310. + if(packet->actual_length > 0)
  11311. + isoc_dbg("Copied %d bytes, packet %d for URB:0x%x[%d]\n",
  11312. + packet->actual_length, urb_priv->isoc_packet_counter,
  11313. + (unsigned int)urb, urb_priv->urb_num);
  11314. + }
  11315. +
  11316. + /* Increment the packet counter. */
  11317. + urb_priv->isoc_packet_counter++;
  11318. +
  11319. + /* Note that we don't care about the eot field in the rx descriptor's
  11320. + status. It will always be set for isoc traffic. */
  11321. + if (urb->number_of_packets == urb_priv->isoc_packet_counter) {
  11322. + /* Complete the urb with status OK. */
  11323. + tc_finish_urb(hcd, urb, 0);
  11324. + }
  11325. + }
  11326. +
  11327. + skip_out:
  11328. + myNextRxDesc->status = 0;
  11329. + myNextRxDesc->command |= IO_MASK(USB_IN_command, eol);
  11330. + myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol);
  11331. + myLastRxDesc = myNextRxDesc;
  11332. + myNextRxDesc = phys_to_virt(myNextRxDesc->next);
  11333. + flush_etrax_cache();
  11334. + *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, restart);
  11335. + }
  11336. +
  11337. + local_irq_restore(flags);
  11338. +
  11339. + return IRQ_HANDLED;
  11340. +}
  11341. +
  11342. +static void tc_bulk_start_timer_func(unsigned long dummy) {
  11343. + /* We might enable an EP descriptor behind the current DMA position when
  11344. + it's about to decide that there are no more bulk traffic and it should
  11345. + stop the bulk channel.
  11346. + Therefore we periodically check if the bulk channel is stopped and there
  11347. + is an enabled bulk EP descriptor, in which case we start the bulk
  11348. + channel. */
  11349. +
  11350. + if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
  11351. + int epid;
  11352. +
  11353. + timer_dbg("bulk_start_timer: Bulk DMA channel not running.\n");
  11354. +
  11355. + for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
  11356. + if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
  11357. + timer_warn("Found enabled EP for epid %d, starting bulk channel.\n",
  11358. + epid);
  11359. + restart_dma8_sub0();
  11360. +
  11361. + /* Restart the bulk eot timer since we just started the bulk channel.*/
  11362. + mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
  11363. +
  11364. + /* No need to search any further. */
  11365. + break;
  11366. + }
  11367. + }
  11368. + } else {
  11369. + timer_dbg("bulk_start_timer: Bulk DMA channel running.\n");
  11370. + }
  11371. +}
  11372. +
  11373. +static void tc_bulk_eot_timer_func(unsigned long dummy) {
  11374. + struct usb_hcd *hcd = (struct usb_hcd*)dummy;
  11375. + ASSERT(hcd);
  11376. + /* Because of a race condition in the top half, we might miss a bulk eot.
  11377. + This timer "simulates" a bulk eot if we don't get one for a while,
  11378. + hopefully correcting the situation. */
  11379. + timer_dbg("bulk_eot_timer timed out.\n");
  11380. + check_finished_bulk_tx_epids(hcd, 1);
  11381. +}
  11382. +
  11383. +
  11384. +/*************************************************************/
  11385. +/*************************************************************/
  11386. +/* Device driver block */
  11387. +/*************************************************************/
  11388. +/*************************************************************/
  11389. +
  11390. +/* Forward declarations for device driver functions */
  11391. +static int devdrv_hcd_probe(struct device *);
  11392. +static int devdrv_hcd_remove(struct device *);
  11393. +#ifdef CONFIG_PM
  11394. +static int devdrv_hcd_suspend(struct device *, u32, u32);
  11395. +static int devdrv_hcd_resume(struct device *, u32);
  11396. +#endif /* CONFIG_PM */
  11397. +
  11398. +/* the device */
  11399. +static struct platform_device *devdrv_hc_platform_device;
  11400. +
  11401. +/* device driver interface */
  11402. +static struct device_driver devdrv_hc_device_driver = {
  11403. + .name = (char *) hc_name,
  11404. + .bus = &platform_bus_type,
  11405. +
  11406. + .probe = devdrv_hcd_probe,
  11407. + .remove = devdrv_hcd_remove,
  11408. +
  11409. +#ifdef CONFIG_PM
  11410. + .suspend = devdrv_hcd_suspend,
  11411. + .resume = devdrv_hcd_resume,
  11412. +#endif /* CONFIG_PM */
  11413. +};
  11414. +
  11415. +/* initialize the host controller and driver */
  11416. +static int __init_or_module devdrv_hcd_probe(struct device *dev)
  11417. +{
  11418. + struct usb_hcd *hcd;
  11419. + struct crisv10_hcd *crisv10_hcd;
  11420. + int retval;
  11421. + int rev_maj, rev_min;
  11422. +
  11423. + /* Check DMA burst length */
  11424. + if(IO_EXTRACT(R_BUS_CONFIG, dma_burst, *R_BUS_CONFIG) !=
  11425. + IO_STATE(R_BUS_CONFIG, dma_burst, burst32)) {
  11426. + devdrv_err("Invalid DMA burst length in Etrax 100LX,"
  11427. + " needs to be 32\n");
  11428. + return -EPERM;
  11429. + }
  11430. +
  11431. + hcd = usb_create_hcd(&crisv10_hc_driver, dev, dev_name(dev));
  11432. + if (!hcd)
  11433. + return -ENOMEM;
  11434. +
  11435. + crisv10_hcd = hcd_to_crisv10_hcd(hcd);
  11436. + spin_lock_init(&crisv10_hcd->lock);
  11437. + crisv10_hcd->num_ports = num_ports();
  11438. + crisv10_hcd->running = 0;
  11439. +
  11440. + dev_set_drvdata(dev, crisv10_hcd);
  11441. +
  11442. + devdrv_dbg("ETRAX USB IRQs HC:%d RX:%d TX:%d\n", ETRAX_USB_HC_IRQ,
  11443. + ETRAX_USB_RX_IRQ, ETRAX_USB_TX_IRQ);
  11444. +
  11445. + /* Print out chip version read from registers */
  11446. + rev_maj = *R_USB_REVISION & IO_MASK(R_USB_REVISION, major);
  11447. + rev_min = *R_USB_REVISION & IO_MASK(R_USB_REVISION, minor);
  11448. + if(rev_min == 0) {
  11449. + devdrv_info("Etrax 100LX USB Revision %d v1,2\n", rev_maj);
  11450. + } else {
  11451. + devdrv_info("Etrax 100LX USB Revision %d v%d\n", rev_maj, rev_min);
  11452. + }
  11453. +
  11454. + devdrv_info("Bulk timer interval, start:%d eot:%d\n",
  11455. + BULK_START_TIMER_INTERVAL,
  11456. + BULK_EOT_TIMER_INTERVAL);
  11457. +
  11458. +
  11459. + /* Init root hub data structures */
  11460. + if(rh_init()) {
  11461. + devdrv_err("Failed init data for Root Hub\n");
  11462. + retval = -ENOMEM;
  11463. + }
  11464. +
  11465. + if(port_in_use(0)) {
  11466. + if (cris_request_io_interface(if_usb_1, "ETRAX100LX USB-HCD")) {
  11467. + printk(KERN_CRIT "usb-host: request IO interface usb1 failed");
  11468. + retval = -EBUSY;
  11469. + goto out;
  11470. + }
  11471. + devdrv_info("Claimed interface for USB physical port 1\n");
  11472. + }
  11473. + if(port_in_use(1)) {
  11474. + if (cris_request_io_interface(if_usb_2, "ETRAX100LX USB-HCD")) {
  11475. + /* Free first interface if second failed to be claimed */
  11476. + if(port_in_use(0)) {
  11477. + cris_free_io_interface(if_usb_1);
  11478. + }
  11479. + printk(KERN_CRIT "usb-host: request IO interface usb2 failed");
  11480. + retval = -EBUSY;
  11481. + goto out;
  11482. + }
  11483. + devdrv_info("Claimed interface for USB physical port 2\n");
  11484. + }
  11485. +
  11486. + /* Init transfer controller structs and locks */
  11487. + if((retval = tc_init(hcd)) != 0) {
  11488. + goto out;
  11489. + }
  11490. +
  11491. + /* Attach interrupt functions for DMA and init DMA controller */
  11492. + if((retval = tc_dma_init(hcd)) != 0) {
  11493. + goto out;
  11494. + }
  11495. +
  11496. + /* Attach the top IRQ handler for USB controller interrupts */
  11497. + if (request_irq(ETRAX_USB_HC_IRQ, crisv10_hcd_top_irq, 0,
  11498. + "ETRAX 100LX built-in USB (HC)", hcd)) {
  11499. + err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ);
  11500. + retval = -EBUSY;
  11501. + goto out;
  11502. + }
  11503. +
  11504. + /* iso_eof is only enabled when isoc traffic is running. */
  11505. + *R_USB_IRQ_MASK_SET =
  11506. + /* IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) | */
  11507. + IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) |
  11508. + IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) |
  11509. + IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) |
  11510. + IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
  11511. +
  11512. +
  11513. + crisv10_ready_wait();
  11514. + /* Reset the USB interface. */
  11515. + *R_USB_COMMAND =
  11516. + IO_STATE(R_USB_COMMAND, port_sel, nop) |
  11517. + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
  11518. + IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
  11519. +
  11520. + /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to
  11521. + 0x2A30 (10800), to guarantee that control traffic gets 10% of the
  11522. + bandwidth, and periodic transfer may allocate the rest (90%).
  11523. + This doesn't work though.
  11524. + The value 11960 is chosen to be just after the SOF token, with a couple
  11525. + of bit times extra for possible bit stuffing. */
  11526. + *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960);
  11527. +
  11528. + crisv10_ready_wait();
  11529. + /* Configure the USB interface as a host controller. */
  11530. + *R_USB_COMMAND =
  11531. + IO_STATE(R_USB_COMMAND, port_sel, nop) |
  11532. + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
  11533. + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config);
  11534. +
  11535. +
  11536. + /* Check so controller not busy before enabling ports */
  11537. + crisv10_ready_wait();
  11538. +
  11539. + /* Enable selected USB ports */
  11540. + if(port_in_use(0)) {
  11541. + *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
  11542. + } else {
  11543. + *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
  11544. + }
  11545. + if(port_in_use(1)) {
  11546. + *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
  11547. + } else {
  11548. + *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
  11549. + }
  11550. +
  11551. + crisv10_ready_wait();
  11552. + /* Start processing of USB traffic. */
  11553. + *R_USB_COMMAND =
  11554. + IO_STATE(R_USB_COMMAND, port_sel, nop) |
  11555. + IO_STATE(R_USB_COMMAND, port_cmd, reset) |
  11556. + IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
  11557. +
  11558. + /* Do not continue probing initialization before USB interface is done */
  11559. + crisv10_ready_wait();
  11560. +
  11561. + /* Register our Host Controller to USB Core
  11562. + * Finish the remaining parts of generic HCD initialization: allocate the
  11563. + * buffers of consistent memory, register the bus
  11564. + * and call the driver's reset() and start() routines. */
  11565. + retval = usb_add_hcd(hcd, ETRAX_USB_HC_IRQ, IRQF_DISABLED);
  11566. + if (retval != 0) {
  11567. + devdrv_err("Failed registering HCD driver\n");
  11568. + goto out;
  11569. + }
  11570. +
  11571. + return 0;
  11572. +
  11573. + out:
  11574. + devdrv_hcd_remove(dev);
  11575. + return retval;
  11576. +}
  11577. +
  11578. +
  11579. +/* cleanup after the host controller and driver */
  11580. +static int __init_or_module devdrv_hcd_remove(struct device *dev)
  11581. +{
  11582. + struct crisv10_hcd *crisv10_hcd = dev_get_drvdata(dev);
  11583. + struct usb_hcd *hcd;
  11584. +
  11585. + if (!crisv10_hcd)
  11586. + return 0;
  11587. + hcd = crisv10_hcd_to_hcd(crisv10_hcd);
  11588. +
  11589. +
  11590. + /* Stop USB Controller in Etrax 100LX */
  11591. + crisv10_hcd_reset(hcd);
  11592. +
  11593. + usb_remove_hcd(hcd);
  11594. + devdrv_dbg("Removed HCD from USB Core\n");
  11595. +
  11596. + /* Free USB Controller IRQ */
  11597. + free_irq(ETRAX_USB_HC_IRQ, NULL);
  11598. +
  11599. + /* Free resources */
  11600. + tc_dma_destroy();
  11601. + tc_destroy();
  11602. +
  11603. +
  11604. + if(port_in_use(0)) {
  11605. + cris_free_io_interface(if_usb_1);
  11606. + }
  11607. + if(port_in_use(1)) {
  11608. + cris_free_io_interface(if_usb_2);
  11609. + }
  11610. +
  11611. + devdrv_dbg("Freed all claimed resources\n");
  11612. +
  11613. + return 0;
  11614. +}
  11615. +
  11616. +
  11617. +#ifdef CONFIG_PM
  11618. +
  11619. +static int devdrv_hcd_suspend(struct usb_hcd *hcd, u32 state, u32 level)
  11620. +{
  11621. + return 0; /* no-op for now */
  11622. +}
  11623. +
  11624. +static int devdrv_hcd_resume(struct usb_hcd *hcd, u32 level)
  11625. +{
  11626. + return 0; /* no-op for now */
  11627. +}
  11628. +
  11629. +#endif /* CONFIG_PM */
  11630. +
  11631. +
  11632. +
  11633. +/*************************************************************/
  11634. +/*************************************************************/
  11635. +/* Module block */
  11636. +/*************************************************************/
  11637. +/*************************************************************/
  11638. +
  11639. +/* register driver */
  11640. +static int __init module_hcd_init(void)
  11641. +{
  11642. +
  11643. + if (usb_disabled())
  11644. + return -ENODEV;
  11645. +
  11646. + /* Here we select enabled ports by following defines created from
  11647. + menuconfig */
  11648. +#ifndef CONFIG_ETRAX_USB_HOST_PORT1
  11649. + ports &= ~(1<<0);
  11650. +#endif
  11651. +#ifndef CONFIG_ETRAX_USB_HOST_PORT2
  11652. + ports &= ~(1<<1);
  11653. +#endif
  11654. +
  11655. + printk(KERN_INFO "%s version "VERSION" "COPYRIGHT"\n", product_desc);
  11656. +
  11657. + devdrv_hc_platform_device =
  11658. + platform_device_register_simple((char *) hc_name, 0, NULL, 0);
  11659. +
  11660. + if (IS_ERR(devdrv_hc_platform_device))
  11661. + return PTR_ERR(devdrv_hc_platform_device);
  11662. + return driver_register(&devdrv_hc_device_driver);
  11663. + /*
  11664. + * Note that we do not set the DMA mask for the device,
  11665. + * i.e. we pretend that we will use PIO, since no specific
  11666. + * allocation routines are needed for DMA buffers. This will
  11667. + * cause the HCD buffer allocation routines to fall back to
  11668. + * kmalloc().
  11669. + */
  11670. +}
  11671. +
  11672. +/* unregister driver */
  11673. +static void __exit module_hcd_exit(void)
  11674. +{
  11675. + driver_unregister(&devdrv_hc_device_driver);
  11676. +}
  11677. +
  11678. +
  11679. +/* Module hooks */
  11680. +module_init(module_hcd_init);
  11681. +module_exit(module_hcd_exit);
  11682. diff -Nur linux-2.6.32.orig/drivers/usb/host/hc-crisv10.h linux-2.6.32/drivers/usb/host/hc-crisv10.h
  11683. --- linux-2.6.32.orig/drivers/usb/host/hc-crisv10.h 1970-01-01 01:00:00.000000000 +0100
  11684. +++ linux-2.6.32/drivers/usb/host/hc-crisv10.h 2010-01-10 13:41:59.326309689 +0100
  11685. @@ -0,0 +1,331 @@
  11686. +#ifndef __LINUX_ETRAX_USB_H
  11687. +#define __LINUX_ETRAX_USB_H
  11688. +
  11689. +#include <linux/types.h>
  11690. +#include <linux/list.h>
  11691. +
  11692. +struct USB_IN_Desc {
  11693. + volatile __u16 sw_len;
  11694. + volatile __u16 command;
  11695. + volatile unsigned long next;
  11696. + volatile unsigned long buf;
  11697. + volatile __u16 hw_len;
  11698. + volatile __u16 status;
  11699. +};
  11700. +
  11701. +struct USB_SB_Desc {
  11702. + volatile __u16 sw_len;
  11703. + volatile __u16 command;
  11704. + volatile unsigned long next;
  11705. + volatile unsigned long buf;
  11706. +};
  11707. +
  11708. +struct USB_EP_Desc {
  11709. + volatile __u16 hw_len;
  11710. + volatile __u16 command;
  11711. + volatile unsigned long sub;
  11712. + volatile unsigned long next;
  11713. +};
  11714. +
  11715. +
  11716. +/* Root Hub port status struct */
  11717. +struct crisv10_rh {
  11718. + volatile __u16 wPortChange[2];
  11719. + volatile __u16 wPortStatusPrev[2];
  11720. +};
  11721. +
  11722. +/* HCD description */
  11723. +struct crisv10_hcd {
  11724. + spinlock_t lock;
  11725. + __u8 num_ports;
  11726. + __u8 running;
  11727. +};
  11728. +
  11729. +
  11730. +/* Endpoint HC private data description */
  11731. +struct crisv10_ep_priv {
  11732. + int epid;
  11733. +};
  11734. +
  11735. +/* Additional software state info for a USB Controller epid */
  11736. +struct etrax_epid {
  11737. + __u8 inuse; /* !0 = setup in Etrax and used for a endpoint */
  11738. + __u8 disabled; /* !0 = Temporarly disabled to avoid resubmission */
  11739. + __u8 type; /* Setup as: PIPE_BULK, PIPE_CONTROL ... */
  11740. + __u8 out_traffic; /* !0 = This epid is for out traffic */
  11741. +};
  11742. +
  11743. +/* Struct to hold information of scheduled later URB completion */
  11744. +struct urb_later_data {
  11745. +// struct work_struct ws;
  11746. + struct delayed_work ws;
  11747. + struct usb_hcd *hcd;
  11748. + struct urb *urb;
  11749. + int urb_num;
  11750. + int status;
  11751. +};
  11752. +
  11753. +
  11754. +typedef enum {
  11755. + STARTED,
  11756. + NOT_STARTED,
  11757. + UNLINK,
  11758. +} crisv10_urb_state_t;
  11759. +
  11760. +
  11761. +struct crisv10_urb_priv {
  11762. + /* Sequence number for this URB. Every new submited URB gets this from
  11763. + a incrementing counter. Used when a URB is scheduled for later finish to
  11764. + be sure that the intended URB hasn't already been completed (device
  11765. + drivers has a tendency to reuse URBs once they are completed, causing us
  11766. + to not be able to single old ones out only based on the URB pointer.) */
  11767. + __u32 urb_num;
  11768. +
  11769. + /* The first_sb field is used for freeing all SB descriptors belonging
  11770. + to an urb. The corresponding ep descriptor's sub pointer cannot be
  11771. + used for this since the DMA advances the sub pointer as it processes
  11772. + the sb list. */
  11773. + struct USB_SB_Desc *first_sb;
  11774. +
  11775. + /* The last_sb field referes to the last SB descriptor that belongs to
  11776. + this urb. This is important to know so we can free the SB descriptors
  11777. + that ranges between first_sb and last_sb. */
  11778. + struct USB_SB_Desc *last_sb;
  11779. +
  11780. + /* The rx_offset field is used in ctrl and bulk traffic to keep track
  11781. + of the offset in the urb's transfer_buffer where incoming data should be
  11782. + copied to. */
  11783. + __u32 rx_offset;
  11784. +
  11785. + /* Counter used in isochronous transfers to keep track of the
  11786. + number of packets received/transmitted. */
  11787. + __u32 isoc_packet_counter;
  11788. +
  11789. + /* Flag that marks if this Isoc Out URB has finished it's transfer. Used
  11790. + because several URBs can be finished before list is processed */
  11791. + __u8 isoc_out_done;
  11792. +
  11793. + /* This field is used to pass information about the urb's current state
  11794. + between the various interrupt handlers (thus marked volatile). */
  11795. + volatile crisv10_urb_state_t urb_state;
  11796. +
  11797. + /* In Ctrl transfers consist of (at least) 3 packets: SETUP, IN and ZOUT.
  11798. + When DMA8 sub-channel 2 has processed the SB list for this sequence we
  11799. + get a interrupt. We also get a interrupt for In transfers and which
  11800. + one of these interrupts that comes first depends of data size and device.
  11801. + To be sure that we have got both interrupts before we complete the URB
  11802. + we have these to flags that shows which part that has completed.
  11803. + We can then check when we get one of the interrupts that if the other has
  11804. + occured it's safe for us to complete the URB, otherwise we set appropriate
  11805. + flag and do the completion when we get the other interrupt. */
  11806. + volatile unsigned char ctrl_zout_done;
  11807. + volatile unsigned char ctrl_rx_done;
  11808. +
  11809. + /* Connection between the submitted urb and ETRAX epid number */
  11810. + __u8 epid;
  11811. +
  11812. + /* The rx_data_list field is used for periodic traffic, to hold
  11813. + received data for later processing in the the complete_urb functions,
  11814. + where the data us copied to the urb's transfer_buffer. Basically, we
  11815. + use this intermediate storage because we don't know when it's safe to
  11816. + reuse the transfer_buffer (FIXME?). */
  11817. + struct list_head rx_data_list;
  11818. +
  11819. +
  11820. + /* The interval time rounded up to closest 2^N */
  11821. + int interval;
  11822. +
  11823. + /* Pool of EP descriptors needed if it's a INTR transfer.
  11824. + Amount of EPs in pool correspons to how many INTR that should
  11825. + be inserted in TxIntrEPList (max 128, defined by MAX_INTR_INTERVAL) */
  11826. + struct USB_EP_Desc* intr_ep_pool[128];
  11827. +
  11828. + /* The mount of EPs allocated for this INTR URB */
  11829. + int intr_ep_pool_length;
  11830. +
  11831. + /* Pointer to info struct if URB is scheduled to be finished later */
  11832. + struct urb_later_data* later_data;
  11833. +};
  11834. +
  11835. +
  11836. +/* This struct is for passing data from the top half to the bottom half irq
  11837. + handlers */
  11838. +struct crisv10_irq_reg {
  11839. + struct usb_hcd* hcd;
  11840. + __u32 r_usb_epid_attn;
  11841. + __u8 r_usb_status;
  11842. + __u16 r_usb_rh_port_status_1;
  11843. + __u16 r_usb_rh_port_status_2;
  11844. + __u32 r_usb_irq_mask_read;
  11845. + __u32 r_usb_fm_number;
  11846. + struct work_struct usb_bh;
  11847. +};
  11848. +
  11849. +
  11850. +/* This struct is for passing data from the isoc top half to the isoc bottom
  11851. + half. */
  11852. +struct crisv10_isoc_complete_data {
  11853. + struct usb_hcd *hcd;
  11854. + struct urb *urb;
  11855. + struct work_struct usb_bh;
  11856. +};
  11857. +
  11858. +/* Entry item for URB lists for each endpint */
  11859. +typedef struct urb_entry
  11860. +{
  11861. + struct urb *urb;
  11862. + struct list_head list;
  11863. +} urb_entry_t;
  11864. +
  11865. +/* ---------------------------------------------------------------------------
  11866. + Virtual Root HUB
  11867. + ------------------------------------------------------------------------- */
  11868. +/* destination of request */
  11869. +#define RH_INTERFACE 0x01
  11870. +#define RH_ENDPOINT 0x02
  11871. +#define RH_OTHER 0x03
  11872. +
  11873. +#define RH_CLASS 0x20
  11874. +#define RH_VENDOR 0x40
  11875. +
  11876. +/* Requests: bRequest << 8 | bmRequestType */
  11877. +#define RH_GET_STATUS 0x0080
  11878. +#define RH_CLEAR_FEATURE 0x0100
  11879. +#define RH_SET_FEATURE 0x0300
  11880. +#define RH_SET_ADDRESS 0x0500
  11881. +#define RH_GET_DESCRIPTOR 0x0680
  11882. +#define RH_SET_DESCRIPTOR 0x0700
  11883. +#define RH_GET_CONFIGURATION 0x0880
  11884. +#define RH_SET_CONFIGURATION 0x0900
  11885. +#define RH_GET_STATE 0x0280
  11886. +#define RH_GET_INTERFACE 0x0A80
  11887. +#define RH_SET_INTERFACE 0x0B00
  11888. +#define RH_SYNC_FRAME 0x0C80
  11889. +/* Our Vendor Specific Request */
  11890. +#define RH_SET_EP 0x2000
  11891. +
  11892. +
  11893. +/* Hub port features */
  11894. +#define RH_PORT_CONNECTION 0x00
  11895. +#define RH_PORT_ENABLE 0x01
  11896. +#define RH_PORT_SUSPEND 0x02
  11897. +#define RH_PORT_OVER_CURRENT 0x03
  11898. +#define RH_PORT_RESET 0x04
  11899. +#define RH_PORT_POWER 0x08
  11900. +#define RH_PORT_LOW_SPEED 0x09
  11901. +#define RH_C_PORT_CONNECTION 0x10
  11902. +#define RH_C_PORT_ENABLE 0x11
  11903. +#define RH_C_PORT_SUSPEND 0x12
  11904. +#define RH_C_PORT_OVER_CURRENT 0x13
  11905. +#define RH_C_PORT_RESET 0x14
  11906. +
  11907. +/* Hub features */
  11908. +#define RH_C_HUB_LOCAL_POWER 0x00
  11909. +#define RH_C_HUB_OVER_CURRENT 0x01
  11910. +
  11911. +#define RH_DEVICE_REMOTE_WAKEUP 0x00
  11912. +#define RH_ENDPOINT_STALL 0x01
  11913. +
  11914. +/* Our Vendor Specific feature */
  11915. +#define RH_REMOVE_EP 0x00
  11916. +
  11917. +
  11918. +#define RH_ACK 0x01
  11919. +#define RH_REQ_ERR -1
  11920. +#define RH_NACK 0x00
  11921. +
  11922. +/* Field definitions for */
  11923. +
  11924. +#define USB_IN_command__eol__BITNR 0 /* command macros */
  11925. +#define USB_IN_command__eol__WIDTH 1
  11926. +#define USB_IN_command__eol__no 0
  11927. +#define USB_IN_command__eol__yes 1
  11928. +
  11929. +#define USB_IN_command__intr__BITNR 3
  11930. +#define USB_IN_command__intr__WIDTH 1
  11931. +#define USB_IN_command__intr__no 0
  11932. +#define USB_IN_command__intr__yes 1
  11933. +
  11934. +#define USB_IN_status__eop__BITNR 1 /* status macros. */
  11935. +#define USB_IN_status__eop__WIDTH 1
  11936. +#define USB_IN_status__eop__no 0
  11937. +#define USB_IN_status__eop__yes 1
  11938. +
  11939. +#define USB_IN_status__eot__BITNR 5
  11940. +#define USB_IN_status__eot__WIDTH 1
  11941. +#define USB_IN_status__eot__no 0
  11942. +#define USB_IN_status__eot__yes 1
  11943. +
  11944. +#define USB_IN_status__error__BITNR 6
  11945. +#define USB_IN_status__error__WIDTH 1
  11946. +#define USB_IN_status__error__no 0
  11947. +#define USB_IN_status__error__yes 1
  11948. +
  11949. +#define USB_IN_status__nodata__BITNR 7
  11950. +#define USB_IN_status__nodata__WIDTH 1
  11951. +#define USB_IN_status__nodata__no 0
  11952. +#define USB_IN_status__nodata__yes 1
  11953. +
  11954. +#define USB_IN_status__epid__BITNR 8
  11955. +#define USB_IN_status__epid__WIDTH 5
  11956. +
  11957. +#define USB_EP_command__eol__BITNR 0
  11958. +#define USB_EP_command__eol__WIDTH 1
  11959. +#define USB_EP_command__eol__no 0
  11960. +#define USB_EP_command__eol__yes 1
  11961. +
  11962. +#define USB_EP_command__eof__BITNR 1
  11963. +#define USB_EP_command__eof__WIDTH 1
  11964. +#define USB_EP_command__eof__no 0
  11965. +#define USB_EP_command__eof__yes 1
  11966. +
  11967. +#define USB_EP_command__intr__BITNR 3
  11968. +#define USB_EP_command__intr__WIDTH 1
  11969. +#define USB_EP_command__intr__no 0
  11970. +#define USB_EP_command__intr__yes 1
  11971. +
  11972. +#define USB_EP_command__enable__BITNR 4
  11973. +#define USB_EP_command__enable__WIDTH 1
  11974. +#define USB_EP_command__enable__no 0
  11975. +#define USB_EP_command__enable__yes 1
  11976. +
  11977. +#define USB_EP_command__hw_valid__BITNR 5
  11978. +#define USB_EP_command__hw_valid__WIDTH 1
  11979. +#define USB_EP_command__hw_valid__no 0
  11980. +#define USB_EP_command__hw_valid__yes 1
  11981. +
  11982. +#define USB_EP_command__epid__BITNR 8
  11983. +#define USB_EP_command__epid__WIDTH 5
  11984. +
  11985. +#define USB_SB_command__eol__BITNR 0 /* command macros. */
  11986. +#define USB_SB_command__eol__WIDTH 1
  11987. +#define USB_SB_command__eol__no 0
  11988. +#define USB_SB_command__eol__yes 1
  11989. +
  11990. +#define USB_SB_command__eot__BITNR 1
  11991. +#define USB_SB_command__eot__WIDTH 1
  11992. +#define USB_SB_command__eot__no 0
  11993. +#define USB_SB_command__eot__yes 1
  11994. +
  11995. +#define USB_SB_command__intr__BITNR 3
  11996. +#define USB_SB_command__intr__WIDTH 1
  11997. +#define USB_SB_command__intr__no 0
  11998. +#define USB_SB_command__intr__yes 1
  11999. +
  12000. +#define USB_SB_command__tt__BITNR 4
  12001. +#define USB_SB_command__tt__WIDTH 2
  12002. +#define USB_SB_command__tt__zout 0
  12003. +#define USB_SB_command__tt__in 1
  12004. +#define USB_SB_command__tt__out 2
  12005. +#define USB_SB_command__tt__setup 3
  12006. +
  12007. +
  12008. +#define USB_SB_command__rem__BITNR 8
  12009. +#define USB_SB_command__rem__WIDTH 6
  12010. +
  12011. +#define USB_SB_command__full__BITNR 6
  12012. +#define USB_SB_command__full__WIDTH 1
  12013. +#define USB_SB_command__full__no 0
  12014. +#define USB_SB_command__full__yes 1
  12015. +
  12016. +#endif
  12017. diff -Nur linux-2.6.32.orig/drivers/usb/host/Makefile linux-2.6.32/drivers/usb/host/Makefile
  12018. --- linux-2.6.32.orig/drivers/usb/host/Makefile 2009-12-03 04:51:21.000000000 +0100
  12019. +++ linux-2.6.32/drivers/usb/host/Makefile 2010-01-10 13:41:59.326309689 +0100
  12020. @@ -32,3 +32,4 @@
  12021. obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
  12022. obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
  12023. obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
  12024. +obj-$(CONFIG_ETRAX_USB_HOST) += hc-crisv10.o
  12025. diff -Nur linux-2.6.32.orig/drivers/usb/host/Makefile.orig linux-2.6.32/drivers/usb/host/Makefile.orig
  12026. --- linux-2.6.32.orig/drivers/usb/host/Makefile.orig 1970-01-01 01:00:00.000000000 +0100
  12027. +++ linux-2.6.32/drivers/usb/host/Makefile.orig 2009-12-03 04:51:21.000000000 +0100
  12028. @@ -0,0 +1,34 @@
  12029. +#
  12030. +# Makefile for USB Host Controller Drivers
  12031. +#
  12032. +
  12033. +ifeq ($(CONFIG_USB_DEBUG),y)
  12034. + EXTRA_CFLAGS += -DDEBUG
  12035. +endif
  12036. +
  12037. +isp1760-objs := isp1760-hcd.o isp1760-if.o
  12038. +fhci-objs := fhci-hcd.o fhci-hub.o fhci-q.o fhci-mem.o \
  12039. + fhci-tds.o fhci-sched.o
  12040. +ifeq ($(CONFIG_FHCI_DEBUG),y)
  12041. +fhci-objs += fhci-dbg.o
  12042. +endif
  12043. +xhci-objs := xhci-hcd.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o
  12044. +
  12045. +obj-$(CONFIG_USB_WHCI_HCD) += whci/
  12046. +
  12047. +obj-$(CONFIG_PCI) += pci-quirks.o
  12048. +
  12049. +obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
  12050. +obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
  12051. +obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
  12052. +obj-$(CONFIG_USB_ISP1362_HCD) += isp1362-hcd.o
  12053. +obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
  12054. +obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
  12055. +obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
  12056. +obj-$(CONFIG_USB_XHCI_HCD) += xhci.o
  12057. +obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
  12058. +obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
  12059. +obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
  12060. +obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
  12061. +obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
  12062. +obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
  12063. diff -Nur linux-2.6.32.orig/drivers/usb/Makefile linux-2.6.32/drivers/usb/Makefile
  12064. --- linux-2.6.32.orig/drivers/usb/Makefile 2009-12-03 04:51:21.000000000 +0100
  12065. +++ linux-2.6.32/drivers/usb/Makefile 2010-01-10 13:41:59.326309689 +0100
  12066. @@ -21,6 +21,7 @@
  12067. obj-$(CONFIG_USB_R8A66597_HCD) += host/
  12068. obj-$(CONFIG_USB_HWA_HCD) += host/
  12069. obj-$(CONFIG_USB_ISP1760_HCD) += host/
  12070. +obj-$(CONFIG_ETRAX_USB_HOST) += host/
  12071. obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
  12072. diff -Nur linux-2.6.32.orig/drivers/usb/Makefile.orig linux-2.6.32/drivers/usb/Makefile.orig
  12073. --- linux-2.6.32.orig/drivers/usb/Makefile.orig 1970-01-01 01:00:00.000000000 +0100
  12074. +++ linux-2.6.32/drivers/usb/Makefile.orig 2009-12-03 04:51:21.000000000 +0100
  12075. @@ -0,0 +1,46 @@
  12076. +#
  12077. +# Makefile for the kernel USB device drivers.
  12078. +#
  12079. +
  12080. +# Object files in subdirectories
  12081. +
  12082. +obj-$(CONFIG_USB) += core/
  12083. +
  12084. +obj-$(CONFIG_USB_MON) += mon/
  12085. +
  12086. +obj-$(CONFIG_PCI) += host/
  12087. +obj-$(CONFIG_USB_EHCI_HCD) += host/
  12088. +obj-$(CONFIG_USB_ISP116X_HCD) += host/
  12089. +obj-$(CONFIG_USB_OHCI_HCD) += host/
  12090. +obj-$(CONFIG_USB_UHCI_HCD) += host/
  12091. +obj-$(CONFIG_USB_FHCI_HCD) += host/
  12092. +obj-$(CONFIG_USB_XHCI_HCD) += host/
  12093. +obj-$(CONFIG_USB_SL811_HCD) += host/
  12094. +obj-$(CONFIG_USB_ISP1362_HCD) += host/
  12095. +obj-$(CONFIG_USB_U132_HCD) += host/
  12096. +obj-$(CONFIG_USB_R8A66597_HCD) += host/
  12097. +obj-$(CONFIG_USB_HWA_HCD) += host/
  12098. +obj-$(CONFIG_USB_ISP1760_HCD) += host/
  12099. +
  12100. +obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
  12101. +
  12102. +obj-$(CONFIG_USB_WUSB) += wusbcore/
  12103. +
  12104. +obj-$(CONFIG_USB_ACM) += class/
  12105. +obj-$(CONFIG_USB_PRINTER) += class/
  12106. +obj-$(CONFIG_USB_WDM) += class/
  12107. +obj-$(CONFIG_USB_TMC) += class/
  12108. +
  12109. +obj-$(CONFIG_USB_STORAGE) += storage/
  12110. +obj-$(CONFIG_USB) += storage/
  12111. +
  12112. +obj-$(CONFIG_USB_MDC800) += image/
  12113. +obj-$(CONFIG_USB_MICROTEK) += image/
  12114. +
  12115. +obj-$(CONFIG_USB_SERIAL) += serial/
  12116. +
  12117. +obj-$(CONFIG_USB) += misc/
  12118. +obj-y += early/
  12119. +
  12120. +obj-$(CONFIG_USB_ATM) += atm/
  12121. +obj-$(CONFIG_USB_SPEEDTOUCH) += atm/
  12122. diff -Nur linux-2.6.32.orig/lib/klist.c linux-2.6.32/lib/klist.c
  12123. --- linux-2.6.32.orig/lib/klist.c 2009-12-03 04:51:21.000000000 +0100
  12124. +++ linux-2.6.32/lib/klist.c 2010-01-10 13:41:59.326309689 +0100
  12125. @@ -60,7 +60,7 @@
  12126. {
  12127. knode->n_klist = klist;
  12128. /* no knode deserves to start its life dead */
  12129. - WARN_ON(knode_dead(knode));
  12130. + //WARN_ON(knode_dead(knode));
  12131. }
  12132. static void knode_kill(struct klist_node *knode)